Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 1 May 2021 16:15:05 +0000 (09:15 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 1 May 2021 16:15:05 +0000 (09:15 -0700)
Pull rdma updates from Jason Gunthorpe:
 "This is significantly bug fixes and general cleanups. The noteworthy
  new features are fairly small:

   - XRC support for HNS and improves RQ operations

   - Bug fixes and updates for hns, mlx5, bnxt_re, hfi1, i40iw, rxe, siw
     and qib

   - Quite a few general cleanups on spelling, error handling, static
     checker detections, etc

   - Increase the number of device ports supported beyond 255. High port
     count software switches now exist

   - Several bug fixes for rtrs

   - mlx5 Device Memory support for host controlled atomics

   - Report SRQ tables through to rdma-tool"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (145 commits)
  IB/qib: Remove redundant assignment to ret
  RDMA/nldev: Add copy-on-fork attribute to get sys command
  RDMA/bnxt_re: Fix a double free in bnxt_qplib_alloc_res
  RDMA/siw: Fix a use after free in siw_alloc_mr
  IB/hfi1: Remove redundant variable rcd
  RDMA/nldev: Add QP numbers to SRQ information
  RDMA/nldev: Return SRQ information
  RDMA/restrack: Add support to get resource tracking for SRQ
  RDMA/nldev: Return context information
  RDMA/core: Add CM to restrack after successful attachment to a device
  RDMA/cma: Skip device which doesn't support CM
  RDMA/rxe: Fix a bug in rxe_fill_ip_info()
  RDMA/mlx5: Expose private query port
  RDMA/mlx4: Remove an unused variable
  RDMA/mlx5: Fix type assignment for ICM DM
  IB/mlx5: Set right RoCE l3 type and roce version while deleting GID
  RDMA/i40iw: Fix error unwinding when i40iw_hmc_sd_one fails
  RDMA/cxgb4: add missing qpid increment
  IB/ipoib: Remove unnecessary struct declaration
  RDMA/bnxt_re: Get rid of custom module reference counting
  ...

23 files changed:
1  2 
MAINTAINERS
drivers/infiniband/core/umem.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/hfi1/affinity.c
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/netdev_rx.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/ulp/rtrs/rtrs-clt.c
drivers/infiniband/ulp/rtrs/rtrs-clt.h
drivers/infiniband/ulp/rtrs/rtrs-pri.h
drivers/infiniband/ulp/rtrs/rtrs-srv.c
drivers/infiniband/ulp/rtrs/rtrs.h
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h

diff --combined MAINTAINERS
@@@ -261,8 -261,8 +261,8 @@@ ABI/AP
  L:    linux-api@vger.kernel.org
  F:    include/linux/syscalls.h
  F:    kernel/sys_ni.c
 -F:    include/uapi/
 -F:    arch/*/include/uapi/
 +X:    include/uapi/
 +X:    arch/*/include/uapi/
  
  ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
  M:    Hans de Goede <hdegoede@redhat.com>
@@@ -300,6 -300,7 +300,6 @@@ M: Syed Nayyar Waris <syednwaris@gmail.
  L:    linux-iio@vger.kernel.org
  S:    Maintained
  F:    Documentation/ABI/testing/sysfs-bus-counter-104-quad-8
 -F:    Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
  F:    drivers/counter/104-quad-8.c
  
  ACCES PCI-IDIO-16 GPIO DRIVER
@@@ -572,12 -573,6 +572,12 @@@ S:       Maintaine
  F:    Documentation/scsi/advansys.rst
  F:    drivers/scsi/advansys.c
  
 +ADVANTECH SWBTN DRIVER
 +M:    Andrea Ho <Andrea.Ho@advantech.com.tw>
 +L:    platform-driver-x86@vger.kernel.org
 +S:    Maintained
 +F:    drivers/platform/x86/adv_swbutton.c
 +
  ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
  M:    Michael Hennerich <michael.hennerich@analog.com>
  S:    Supported
@@@ -702,11 -697,6 +702,11 @@@ S:       Maintaine
  F:    Documentation/i2c/busses/i2c-ali1563.rst
  F:    drivers/i2c/busses/i2c-ali1563.c
  
 +ALIENWARE WMI DRIVER
 +L:    Dell.Client.Kernel@dell.com
 +S:    Maintained
 +F:    drivers/platform/x86/dell/alienware-wmi.c
 +
  ALL SENSORS DLH SERIES PRESSURE SENSORS DRIVER
  M:    Tomislav Denis <tomislav.denis@avl.com>
  L:    linux-iio@vger.kernel.org
@@@ -880,6 -870,13 +880,6 @@@ S:        Supporte
  T:    git git://people.freedesktop.org/~agd5f/linux
  F:    drivers/gpu/drm/amd/display/
  
 -AMD ENERGY DRIVER
 -M:    Naveen Krishna Chatradhi <nchatrad@amd.com>
 -L:    linux-hwmon@vger.kernel.org
 -S:    Maintained
 -F:    Documentation/hwmon/amd_energy.rst
 -F:    drivers/hwmon/amd_energy.c
 -
  AMD FAM15H PROCESSOR POWER MONITORING DRIVER
  M:    Huang Rui <ray.huang@amd.com>
  L:    linux-hwmon@vger.kernel.org
@@@ -1145,7 -1142,7 +1145,7 @@@ W:      http://ez.analog.com/community/linux
  F:    Documentation/ABI/testing/sysfs-bus-iio-frequency-ad9523
  F:    Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4350
  F:    Documentation/devicetree/bindings/iio/*/adi,*
 -F:    Documentation/devicetree/bindings/iio/dac/ad5758.txt
 +F:    Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml
  F:    drivers/iio/*/ad*
  F:    drivers/iio/adc/ltc249*
  F:    drivers/iio/amplifiers/hmc425a.c
@@@ -1184,7 -1181,7 +1184,7 @@@ M:      Joel Fernandes <joel@joelfernandes.o
  M:    Christian Brauner <christian@brauner.io>
  M:    Hridya Valsaraju <hridya@google.com>
  M:    Suren Baghdasaryan <surenb@google.com>
 -L:    devel@driverdev.osuosl.org
 +L:    linux-kernel@vger.kernel.org
  S:    Supported
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
  F:    drivers/android/
@@@ -1326,7 -1323,7 +1326,7 @@@ ARC PGU DRM DRIVE
  M:    Alexey Brodkin <abrodkin@synopsys.com>
  S:    Supported
  F:    Documentation/devicetree/bindings/display/snps,arcpgu.txt
 -F:    drivers/gpu/drm/arc/
 +F:    drivers/gpu/drm/tiny/arcpgu.c
  
  ARCNET NETWORK LAYER
  M:    Michael Grzeschik <m.grzeschik@pengutronix.de>
@@@ -1533,7 -1530,6 +1533,7 @@@ F:      Documentation/devicetree/bindings/dm
  F:    Documentation/devicetree/bindings/i2c/i2c-owl.yaml
  F:    Documentation/devicetree/bindings/interrupt-controller/actions,owl-sirq.yaml
  F:    Documentation/devicetree/bindings/mmc/owl-mmc.yaml
 +F:    Documentation/devicetree/bindings/net/actions,owl-emac.yaml
  F:    Documentation/devicetree/bindings/pinctrl/actions,*
  F:    Documentation/devicetree/bindings/power/actions,owl-sps.txt
  F:    Documentation/devicetree/bindings/timer/actions,owl-timer.txt
@@@ -1546,7 -1542,6 +1546,7 @@@ F:      drivers/dma/owl-dma.
  F:    drivers/i2c/busses/i2c-owl.c
  F:    drivers/irqchip/irq-owl-sirq.c
  F:    drivers/mmc/host/owl-mmc.c
 +F:    drivers/net/ethernet/actions/
  F:    drivers/pinctrl/actions/*
  F:    drivers/soc/actions/
  F:    include/dt-bindings/power/owl-*
@@@ -1581,13 -1576,11 +1581,13 @@@ R:   Jernej Skrabec <jernej.skrabec@siol.
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/sunxi/linux.git
 +L:    linux-sunxi@lists.linux.dev
  F:    arch/arm/mach-sunxi/
  F:    arch/arm64/boot/dts/allwinner/
  F:    drivers/clk/sunxi-ng/
  F:    drivers/pinctrl/sunxi/
  F:    drivers/soc/sunxi/
 +N:    allwinner
  N:    sun[x456789]i
  N:    sun50i
  
@@@ -1644,20 -1637,6 +1644,20 @@@ F:    arch/arm/mach-alpine
  F:    arch/arm64/boot/dts/amazon/
  F:    drivers/*/*alpine*
  
 +ARM/APPLE MACHINE SUPPORT
 +M:    Hector Martin <marcan@marcan.st>
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Maintained
 +W:    https://asahilinux.org
 +B:    https://github.com/AsahiLinux/linux/issues
 +C:    irc://chat.freenode.net/asahi-dev
 +T:    git https://github.com/AsahiLinux/linux.git
 +F:    Documentation/devicetree/bindings/arm/apple.yaml
 +F:    Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
 +F:    arch/arm64/boot/dts/apple/
 +F:    drivers/irqchip/irq-apple-aic.c
 +F:    include/dt-bindings/interrupt-controller/apple-aic.h
 +
  ARM/ARTPEC MACHINE SUPPORT
  M:    Jesper Nilsson <jesper.nilsson@axis.com>
  M:    Lars Persson <lars.persson@axis.com>
@@@ -1785,7 -1764,6 +1785,7 @@@ F:      Documentation/devicetree/bindings/ar
  F:    Documentation/trace/coresight/*
  F:    drivers/hwtracing/coresight/*
  F:    include/dt-bindings/arm/coresight-cti-dt.h
 +F:    include/linux/coresight*
  F:    tools/perf/arch/arm/util/auxtrace.c
  F:    tools/perf/arch/arm/util/cs-etm.c
  F:    tools/perf/arch/arm/util/cs-etm.h
@@@ -1812,26 -1790,19 +1812,26 @@@ F:   drivers/net/ethernet/cortina
  F:    drivers/pinctrl/pinctrl-gemini.c
  F:    drivers/rtc/rtc-ftrtc010.c
  
 -ARM/CZ.NIC TURRIS MOX SUPPORT
 -M:    Marek Behun <marek.behun@nic.cz>
 +ARM/CZ.NIC TURRIS SUPPORT
 +M:    Marek Behun <kabel@kernel.org>
  S:    Maintained
 -W:    http://mox.turris.cz
 +W:    https://www.turris.cz/
  F:    Documentation/ABI/testing/debugfs-moxtet
  F:    Documentation/ABI/testing/sysfs-bus-moxtet-devices
  F:    Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm
  F:    Documentation/devicetree/bindings/bus/moxtet.txt
  F:    Documentation/devicetree/bindings/firmware/cznic,turris-mox-rwtm.txt
  F:    Documentation/devicetree/bindings/gpio/gpio-moxtet.txt
 +F:    Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml
 +F:    Documentation/devicetree/bindings/watchdog/armada-37xx-wdt.txt
  F:    drivers/bus/moxtet.c
  F:    drivers/firmware/turris-mox-rwtm.c
 +F:    drivers/leds/leds-turris-omnia.c
 +F:    drivers/mailbox/armada-37xx-rwtm-mailbox.c
  F:    drivers/gpio/gpio-moxtet.c
 +F:    drivers/watchdog/armada_37xx_wdt.c
 +F:    include/dt-bindings/bus/moxtet.h
 +F:    include/linux/armada-37xx-rwtm-mailbox.h
  F:    include/linux/moxtet.h
  
  ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6)
@@@ -2223,15 -2194,6 +2223,15 @@@ F:    drivers/*/*npcm
  F:    drivers/*/*/*npcm*
  F:    include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
  
 +ARM/NUVOTON WPCM450 ARCHITECTURE
 +M:    Jonathan Neuschäfer <j.neuschaefer@gmx.net>
 +L:    openbmc@lists.ozlabs.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/*/*wpcm*
 +F:    arch/arm/boot/dts/nuvoton-wpcm450*
 +F:    arch/arm/mach-npcm/wpcm450.c
 +F:    drivers/*/*wpcm*
 +
  ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
  L:    openmoko-kernel@lists.openmoko.org (subscribers-only)
  S:    Orphan
@@@ -2334,7 -2296,6 +2334,7 @@@ F:      drivers/tty/serial/msm_serial.
  F:    drivers/usb/dwc3/dwc3-qcom.c
  F:    include/dt-bindings/*/qcom*
  F:    include/linux/*/qcom*
 +F:    include/linux/soc/qcom/
  
  ARM/RADISYS ENP2611 MACHINE SUPPORT
  M:    Lennert Buytenhek <kernel@wantstofly.org>
@@@ -2414,7 -2375,7 +2414,7 @@@ F:      sound/soc/rockchip
  N:    rockchip
  
  ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
 -M:    Krzysztof Kozlowski <krzk@kernel.org>
 +M:    Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  L:    linux-samsung-soc@vger.kernel.org
  S:    Maintained
@@@ -2528,7 -2489,7 +2528,7 @@@ N:      sc27x
  N:    sc2731
  
  ARM/STI ARCHITECTURE
 -M:    Patrice Chotard <patrice.chotard@st.com>
 +M:    Patrice Chotard <patrice.chotard@foss.st.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  W:    http://www.stlinux.com
@@@ -2561,7 -2522,7 +2561,7 @@@ F:      include/linux/remoteproc/st_slim_rpr
  
  ARM/STM32 ARCHITECTURE
  M:    Maxime Coquelin <mcoquelin.stm32@gmail.com>
 -M:    Alexandre Torgue <alexandre.torgue@st.com>
 +M:    Alexandre Torgue <alexandre.torgue@foss.st.com>
  L:    linux-stm32@st-md-mailman.stormreply.com (moderated for non-subscribers)
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
@@@ -2670,10 -2631,8 +2670,10 @@@ F:    drivers/watchdog/visconti_wdt.
  N:    visconti
  
  ARM/UNIPHIER ARCHITECTURE
 +M:    Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
 +M:    Masami Hiramatsu <mhiramat@kernel.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 -S:    Orphan
 +S:    Maintained
  F:    Documentation/devicetree/bindings/arm/socionext/uniphier.yaml
  F:    Documentation/devicetree/bindings/gpio/socionext,uniphier-gpio.yaml
  F:    Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.yaml
@@@ -2758,6 -2717,7 +2758,6 @@@ F:      Documentation/devicetree/bindings/i2
  F:    Documentation/devicetree/bindings/i2c/xlnx,xps-iic-2.00.a.yaml
  F:    Documentation/devicetree/bindings/spi/xlnx,zynq-qspi.yaml
  F:    arch/arm/mach-zynq/
 -F:    drivers/block/xsysace.c
  F:    drivers/clocksource/timer-cadence-ttc.c
  F:    drivers/cpuidle/cpuidle-zynq.c
  F:    drivers/edac/synopsys_edac.c
@@@ -2896,18 -2856,6 +2896,18 @@@ W:    http://www.openaoe.org
  F:    Documentation/admin-guide/aoe/
  F:    drivers/block/aoe/
  
 +ATC260X PMIC MFD DRIVER
 +M:    Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 +M:    Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
 +L:    linux-actions@lists.infradead.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/mfd/actions,atc260x.yaml
 +F:    drivers/input/misc/atc260x-onkey.c
 +F:    drivers/mfd/atc260*
 +F:    drivers/power/reset/atc260x-poweroff.c
 +F:    drivers/regulator/atc260x-regulator.c
 +F:    include/linux/mfd/atc260x/*
 +
  ATHEROS 71XX/9XXX GPIO DRIVER
  M:    Alban Bedel <albeu@free.fr>
  S:    Maintained
@@@ -3031,11 -2979,9 +3031,11 @@@ L:    linux-audit@redhat.com (moderated fo
  S:    Supported
  W:    https://github.com/linux-audit
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/audit.git
 +F:    include/asm-generic/audit_*.h
  F:    include/linux/audit.h
  F:    include/uapi/linux/audit.h
  F:    kernel/audit*
 +F:    lib/*audit.c
  
  AUXILIARY DISPLAY DRIVERS
  M:    Miguel Ojeda <ojeda@kernel.org>
@@@ -3169,7 -3115,7 +3169,7 @@@ C:      irc://irc.oftc.net/bcach
  F:    drivers/md/bcache/
  
  BDISP ST MEDIA DRIVER
 -M:    Fabien Dessenne <fabien.dessenne@st.com>
 +M:    Fabien Dessenne <fabien.dessenne@foss.st.com>
  L:    linux-media@vger.kernel.org
  S:    Supported
  W:    https://linuxtv.org
@@@ -3287,7 -3233,6 +3287,7 @@@ T:      git git://git.kernel.org/pub/scm/lin
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
  F:    Documentation/bpf/
  F:    Documentation/networking/filter.rst
 +F:    Documentation/userspace-api/ebpf/
  F:    arch/*/net/*
  F:    include/linux/bpf*
  F:    include/linux/filter.h
@@@ -3302,7 -3247,6 +3302,7 @@@ F:      net/core/filter.
  F:    net/sched/act_bpf.c
  F:    net/sched/cls_bpf.c
  F:    samples/bpf/
 +F:    scripts/bpf_doc.py
  F:    tools/bpf/
  F:    tools/lib/bpf/
  F:    tools/testing/selftests/bpf/
@@@ -3425,7 -3369,7 +3425,7 @@@ F:      include/linux/dsa/brcm.
  F:    include/linux/platform_data/b53.h
  
  BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
 -M:    Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
 +M:    Nicolas Saenz Julienne <nsaenz@kernel.org>
  L:    bcm-kernel-feedback-list@broadcom.com
  L:    linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@@ -3614,14 -3558,6 +3614,14 @@@ S:    Supporte
  F:    Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml
  F:    drivers/i2c/busses/i2c-brcmstb.c
  
 +BROADCOM BRCMSTB UART DRIVER
 +M:    Al Cooper <alcooperx@gmail.com>
 +L:    linux-serial@vger.kernel.org
 +L:    bcm-kernel-feedback-list@broadcom.com
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
 +F:    drivers/tty/serial/8250/8250_bcm7271.c
 +
  BROADCOM BRCMSTB USB EHCI DRIVER
  M:    Al Cooper <alcooperx@gmail.com>
  L:    linux-usb@vger.kernel.org
@@@ -3739,7 -3675,7 +3739,7 @@@ M:      bcm-kernel-feedback-list@broadcom.co
  L:    linux-pm@vger.kernel.org
  S:    Maintained
  T:    git git://github.com/broadcom/stblinux.git
 -F:    drivers/soc/bcm/bcm-pmb.c
 +F:    drivers/soc/bcm/bcm63xx/bcm-pmb.c
  F:    include/dt-bindings/soc/bcm-pmb.h
  
  BROADCOM SPECIFIC AMBA DRIVER (BCMA)
@@@ -3753,7 -3689,7 +3753,7 @@@ BROADCOM SPI DRIVE
  M:    Kamal Dasu <kdasu.kdev@gmail.com>
  M:    bcm-kernel-feedback-list@broadcom.com
  S:    Maintained
 -F:    Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt
 +F:    Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.yaml
  F:    drivers/spi/spi-bcm-qspi.*
  F:    drivers/spi/spi-brcmstb-qspi.c
  F:    drivers/spi/spi-iproc-qspi.c
@@@ -4245,20 -4181,13 +4245,20 @@@ X:   drivers/char/tpm
  CHECKPATCH
  M:    Andy Whitcroft <apw@canonical.com>
  M:    Joe Perches <joe@perches.com>
 +R:    Dwaipayan Ray <dwaipayanray1@gmail.com>
 +R:    Lukas Bulwahn <lukas.bulwahn@gmail.com>
  S:    Maintained
  F:    scripts/checkpatch.pl
  
 +CHECKPATCH DOCUMENTATION
 +M:    Dwaipayan Ray <dwaipayanray1@gmail.com>
 +M:    Lukas Bulwahn <lukas.bulwahn@gmail.com>
 +R:    Joe Perches <joe@perches.com>
 +S:    Maintained
 +F:    Documentation/dev-tools/checkpatch.rst
 +
  CHINESE DOCUMENTATION
 -M:    Harry Wei <harryxiyou@gmail.com>
 -M:    Alex Shi <alex.shi@linux.alibaba.com>
 -L:    xiyoulinuxkernelgroup@googlegroups.com (subscribers-only)
 +M:    Alex Shi <alexs@kernel.org>
  S:    Maintained
  F:    Documentation/translations/zh_CN/
  
@@@ -4491,12 -4420,6 +4491,12 @@@ S:    Supporte
  F:    Documentation/process/code-of-conduct-interpretation.rst
  F:    Documentation/process/code-of-conduct.rst
  
 +COMEDI DRIVERS
 +M:    Ian Abbott <abbotti@mev.co.uk>
 +M:    H Hartley Sweeten <hsweeten@visionengravers.com>
 +S:    Odd Fixes
 +F:    drivers/comedi/
 +
  COMMON CLK FRAMEWORK
  M:    Michael Turquette <mturquette@baylibre.com>
  M:    Stephen Boyd <sboyd@kernel.org>
@@@ -4662,11 -4585,6 +4662,11 @@@ F:    drivers/counter
  F:    include/linux/counter.h
  F:    include/linux/counter_enum.h
  
 +CP2615 I2C DRIVER
 +M:    Bence Csókás <bence98@sch.bme.hu>
 +S:    Maintained
 +F:    drivers/i2c/busses/i2c-cp2615.c
 +
  CPMAC ETHERNET DRIVER
  M:    Florian Fainelli <f.fainelli@gmail.com>
  L:    netdev@vger.kernel.org
@@@ -4958,8 -4876,16 +4958,8 @@@ S:     Maintaine
  W:    http://www.armlinux.org.uk/
  F:    drivers/video/fbdev/cyber2000fb.*
  
 -CYCLADES ASYNC MUX DRIVER
 -S:    Orphan
 -W:    http://www.cyclades.com/
 -F:    drivers/tty/cyclades.c
 -F:    include/linux/cyclades.h
 -F:    include/uapi/linux/cyclades.h
 -
  CYCLADES PC300 DRIVER
  S:    Orphan
 -W:    http://www.cyclades.com/
  F:    drivers/net/wan/pc300*
  
  CYPRESS_FIRMWARE MEDIA DRIVER
@@@ -5108,19 -5034,19 +5108,19 @@@ F:   drivers/platform/x86/dell/dell_rbu.
  
  DELL SMBIOS DRIVER
  M:    Pali Rohár <pali@kernel.org>
 -M:    Mario Limonciello <mario.limonciello@dell.com>
 +L:    Dell.Client.Kernel@dell.com
  L:    platform-driver-x86@vger.kernel.org
  S:    Maintained
  F:    drivers/platform/x86/dell/dell-smbios.*
  
  DELL SMBIOS SMM DRIVER
 -M:    Mario Limonciello <mario.limonciello@dell.com>
 +L:    Dell.Client.Kernel@dell.com
  L:    platform-driver-x86@vger.kernel.org
  S:    Maintained
  F:    drivers/platform/x86/dell/dell-smbios-smm.c
  
  DELL SMBIOS WMI DRIVER
 -M:    Mario Limonciello <mario.limonciello@dell.com>
 +L:    Dell.Client.Kernel@dell.com
  L:    platform-driver-x86@vger.kernel.org
  S:    Maintained
  F:    drivers/platform/x86/dell/dell-smbios-wmi.c
@@@ -5134,14 -5060,14 +5134,14 @@@ F:   Documentation/driver-api/dcdbas.rs
  F:    drivers/platform/x86/dell/dcdbas.*
  
  DELL WMI DESCRIPTOR DRIVER
 -M:    Mario Limonciello <mario.limonciello@dell.com>
 +L:    Dell.Client.Kernel@dell.com
  S:    Maintained
  F:    drivers/platform/x86/dell/dell-wmi-descriptor.c
  
  DELL WMI SYSMAN DRIVER
  M:    Divya Bharathi <divya.bharathi@dell.com>
 -M:    Mario Limonciello <mario.limonciello@dell.com>
  M:    Prasanth Ksr <prasanth.ksr@dell.com>
 +L:    Dell.Client.Kernel@dell.com
  L:    platform-driver-x86@vger.kernel.org
  S:    Maintained
  F:    Documentation/ABI/testing/sysfs-class-firmware-attributes
@@@ -5154,7 -5080,7 +5154,7 @@@ S:      Maintaine
  F:    drivers/platform/x86/dell/dell-wmi.c
  
  DELTA ST MEDIA DRIVER
 -M:    Hugues Fruchet <hugues.fruchet@st.com>
 +M:    Hugues Fruchet <hugues.fruchet@foss.st.com>
  L:    linux-media@vger.kernel.org
  S:    Supported
  W:    https://linuxtv.org
@@@ -5173,13 -5099,6 +5173,13 @@@ S:    Maintaine
  F:    drivers/dma/dw-edma/
  F:    include/linux/dma/edma.h
  
 +DESIGNWARE XDATA IP DRIVER
 +M:    Gustavo Pimentel <gustavo.pimentel@synopsys.com>
 +L:    linux-pci@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/misc-devices/dw-xdata-pcie.rst
 +F:    drivers/misc/dw-xdata-pcie.c
 +
  DESIGNWARE USB2 DRD IP DRIVER
  M:    Minas Harutyunyan <hminas@synopsys.com>
  L:    linux-usb@vger.kernel.org
@@@ -5250,12 -5169,6 +5250,12 @@@ M:    Torben Mathiasen <device@lanana.org
  S:    Maintained
  W:    http://lanana.org/docs/device-list/index.html
  
 +DEVICE RESOURCE MANAGEMENT HELPERS
 +M:    Hans de Goede <hdegoede@redhat.com>
 +R:    Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
 +S:    Maintained
 +F:    include/linux/devm-helpers.h
 +
  DEVICE-MAPPER  (LVM)
  M:    Alasdair Kergon <agk@redhat.com>
  M:    Mike Snitzer <snitzer@redhat.com>
@@@ -5465,7 -5378,7 +5465,7 @@@ F:      drivers/hwmon/dme1737.
  DMI/SMBIOS SUPPORT
  M:    Jean Delvare <jdelvare@suse.com>
  S:    Maintained
 -T:    quilt http://jdelvare.nerim.net/devel/linux/jdelvare-dmi/
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jdelvare/staging.git dmi-for-next
  F:    Documentation/ABI/testing/sysfs-firmware-dmi-tables
  F:    drivers/firmware/dmi-id.c
  F:    drivers/firmware/dmi_scan.c
@@@ -5491,12 -5404,6 +5491,12 @@@ X:    Documentation/power
  X:    Documentation/spi/
  X:    Documentation/userspace-api/media/
  
 +DOCUMENTATION REPORTING ISSUES
 +M:    Thorsten Leemhuis <linux@leemhuis.info>
 +L:    linux-doc@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/admin-guide/reporting-issues.rst
 +
  DOCUMENTATION SCRIPTS
  M:    Mauro Carvalho Chehab <mchehab@kernel.org>
  L:    linux-doc@vger.kernel.org
@@@ -5564,11 -5471,11 +5564,11 @@@ F:   drivers/net/ethernet/freescale/dpaa2
  F:    drivers/net/ethernet/freescale/dpaa2/dpni*
  
  DPAA2 ETHERNET SWITCH DRIVER
 -M:    Ioana Radulescu <ruxandra.radulescu@nxp.com>
  M:    Ioana Ciornei <ioana.ciornei@nxp.com>
 -L:    linux-kernel@vger.kernel.org
 +L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/staging/fsl-dpaa2/ethsw
 +F:    drivers/net/ethernet/freescale/dpaa2/dpaa2-switch*
 +F:    drivers/net/ethernet/freescale/dpaa2/dpsw*
  
  DPT_I2O SCSI RAID DRIVER
  M:    Adaptec OEM Raid Solutions <aacraid@microsemi.com>
@@@ -5661,12 -5568,6 +5661,12 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml
  F:    drivers/gpu/drm/panel/panel-boe-himax8279d.c
  
 +DRM DRIVER FOR CHIPONE ICN6211 MIPI-DSI to RGB CONVERTER BRIDGE
 +M:    Jagan Teki <jagan@amarulasolutions.com>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/display/bridge/chipone,icn6211.yaml
 +F:    drivers/gpu/drm/bridge/chipone-icn6211.c
 +
  DRM DRIVER FOR FARADAY TVE200 TV ENCODER
  M:    Linus Walleij <linus.walleij@linaro.org>
  S:    Maintained
@@@ -5685,14 -5586,6 +5685,14 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.yaml
  F:    drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
  
 +DRM DRIVER FOR GENERIC USB DISPLAY
 +M:    Noralf Trønnes <noralf@tronnes.org>
 +S:    Maintained
 +W:    https://github.com/notro/gud/wiki
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +F:    drivers/gpu/drm/gud/
 +F:    include/drm/gud.h
 +
  DRM DRIVER FOR GRAIN MEDIA GM12U320 PROJECTORS
  M:    Hans de Goede <hdegoede@redhat.com>
  S:    Maintained
@@@ -5886,7 -5779,7 +5886,7 @@@ DRM DRIVER FOR ST-ERICSSON MCD
  M:    Linus Walleij <linus.walleij@linaro.org>
  S:    Maintained
  T:    git git://anongit.freedesktop.org/drm/drm-misc
 -F:    Documentation/devicetree/bindings/display/ste,mcde.txt
 +F:    Documentation/devicetree/bindings/display/ste,mcde.yaml
  F:    drivers/gpu/drm/mcde/
  
  DRM DRIVER FOR TDFX VIDEO CARDS
@@@ -5942,7 -5835,7 +5942,7 @@@ M:      David Airlie <airlied@linux.ie
  M:    Daniel Vetter <daniel@ffwll.ch>
  L:    dri-devel@lists.freedesktop.org
  S:    Maintained
 -B:    https://bugs.freedesktop.org/
 +B:    https://gitlab.freedesktop.org/drm
  C:    irc://chat.freenode.net/dri-devel
  T:    git git://anongit.freedesktop.org/drm/drm
  F:    Documentation/devicetree/bindings/display/
@@@ -6001,7 -5894,6 +6001,7 @@@ F:      drivers/gpu/drm/atmel-hlcdc
  DRM DRIVERS FOR BRIDGE CHIPS
  M:    Andrzej Hajda <a.hajda@samsung.com>
  M:    Neil Armstrong <narmstrong@baylibre.com>
 +M:    Robert Foss <robert.foss@linaro.org>
  R:    Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
  R:    Jonas Karlman <jonas@kwiboo.se>
  R:    Jernej Skrabec <jernej.skrabec@siol.net>
@@@ -6071,7 -5963,6 +6071,7 @@@ DRM DRIVERS FOR MEDIATE
  M:    Chun-Kuang Hu <chunkuang.hu@kernel.org>
  M:    Philipp Zabel <p.zabel@pengutronix.de>
  L:    dri-devel@lists.freedesktop.org
 +L:    linux-mediatek@lists.infradead.org (moderated for non-subscribers)
  S:    Supported
  F:    Documentation/devicetree/bindings/display/mediatek/
  F:    drivers/gpu/drm/mediatek/
@@@ -6097,9 -5988,9 +6097,9 @@@ L:      dri-devel@lists.freedesktop.or
  L:    linux-renesas-soc@vger.kernel.org
  S:    Supported
  T:    git git://linuxtv.org/pinchartl/media drm/du/next
 -F:    Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.txt
 +F:    Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.yaml
  F:    Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
 -F:    Documentation/devicetree/bindings/display/renesas,du.txt
 +F:    Documentation/devicetree/bindings/display/renesas,du.yaml
  F:    drivers/gpu/drm/rcar-du/
  F:    drivers/gpu/drm/shmobile/
  F:    include/linux/platform_data/shmob_drm.h
@@@ -6115,6 -6006,7 +6115,6 @@@ F:      drivers/gpu/drm/rockchip
  
  DRM DRIVERS FOR STI
  M:    Benjamin Gaignard <benjamin.gaignard@linaro.org>
 -M:    Vincent Abriou <vincent.abriou@st.com>
  L:    dri-devel@lists.freedesktop.org
  S:    Maintained
  T:    git git://anongit.freedesktop.org/drm/drm-misc
@@@ -6122,9 -6014,10 +6122,9 @@@ F:     Documentation/devicetree/bindings/di
  F:    drivers/gpu/drm/sti
  
  DRM DRIVERS FOR STM
 -M:    Yannick Fertre <yannick.fertre@st.com>
 -M:    Philippe Cornu <philippe.cornu@st.com>
 +M:    Yannick Fertre <yannick.fertre@foss.st.com>
 +M:    Philippe Cornu <philippe.cornu@foss.st.com>
  M:    Benjamin Gaignard <benjamin.gaignard@linaro.org>
 -M:    Vincent Abriou <vincent.abriou@st.com>
  L:    dri-devel@lists.freedesktop.org
  S:    Maintained
  T:    git git://anongit.freedesktop.org/drm/drm-misc
@@@ -6161,7 -6054,7 +6161,7 @@@ DRM DRIVERS FOR V3
  M:    Eric Anholt <eric@anholt.net>
  S:    Supported
  T:    git git://anongit.freedesktop.org/drm/drm-misc
 -F:    Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.txt
 +F:    Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml
  F:    drivers/gpu/drm/v3d/
  F:    include/uapi/drm/v3d_drm.h
  
@@@ -7099,7 -6992,6 +7099,7 @@@ S:      Maintaine
  F:    Documentation/ABI/testing/sysfs-bus-dfl*
  F:    Documentation/fpga/dfl.rst
  F:    drivers/fpga/dfl*
 +F:    drivers/uio/uio_dfl.c
  F:    include/linux/dfl.h
  F:    include/uapi/linux/fpga-dfl.h
  
@@@ -7199,7 -7091,7 +7199,7 @@@ S:      Maintaine
  F:    drivers/i2c/busses/i2c-cpm.c
  
  FREESCALE IMX / MXC FEC DRIVER
 -M:    Fugang Duan <fugang.duan@nxp.com>
 +M:    Joakim Zhang <qiangqing.zhang@nxp.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/net/fsl-fec.txt
@@@ -7240,13 -7132,6 +7240,13 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
  F:    drivers/i2c/busses/i2c-imx-lpi2c.c
  
 +FREESCALE MPC I2C DRIVER
 +M:    Chris Packham <chris.packham@alliedtelesis.co.nz>
 +L:    linux-i2c@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/i2c/i2c-mpc.yaml
 +F:    drivers/i2c/busses/i2c-mpc.c
 +
  FREESCALE QORIQ DPAA ETHERNET DRIVER
  M:    Madalin Bucur <madalin.bucur@nxp.com>
  L:    netdev@vger.kernel.org
@@@ -7276,7 -7161,6 +7276,7 @@@ FREESCALE QUAD SPI DRIVE
  M:    Han Xu <han.xu@nxp.com>
  L:    linux-spi@vger.kernel.org
  S:    Maintained
 +F:    Documentation/devicetree/bindings/spi/fsl,spi-fsl-qspi.yaml
  F:    drivers/spi/spi-fsl-qspi.c
  
  FREESCALE QUICC ENGINE LIBRARY
@@@ -7312,7 -7196,7 +7312,7 @@@ M:      Li Yang <leoyang.li@nxp.com
  L:    linuxppc-dev@lists.ozlabs.org
  L:    linux-arm-kernel@lists.infradead.org
  S:    Maintained
 -F:    Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt
 +F:    Documentation/devicetree/bindings/misc/fsl,dpaa2-console.yaml
  F:    Documentation/devicetree/bindings/soc/fsl/
  F:    drivers/soc/fsl/
  F:    include/linux/fsl/
@@@ -7444,13 -7328,6 +7444,13 @@@ F:    fs/verity
  F:    include/linux/fsverity.h
  F:    include/uapi/linux/fsverity.h
  
 +FT260 FTDI USB-HID TO I2C BRIDGE DRIVER
 +M:    Michael Zaidman <michael.zaidman@gmail.com>
 +L:    linux-i2c@vger.kernel.org
 +L:    linux-input@vger.kernel.org
 +S:    Maintained
 +F:    drivers/hid/hid-ft260.c
 +
  FUJITSU LAPTOP EXTRAS
  M:    Jonathan Woithe <jwoithe@just42.net>
  L:    platform-driver-x86@vger.kernel.org
@@@ -7486,7 -7363,6 +7486,7 @@@ M:      Thomas Gleixner <tglx@linutronix.de
  M:    Ingo Molnar <mingo@redhat.com>
  R:    Peter Zijlstra <peterz@infradead.org>
  R:    Darren Hart <dvhart@infradead.org>
 +R:    Davidlohr Bueso <dave@stgolabs.net>
  L:    linux-kernel@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
@@@ -7509,6 -7385,14 +7509,6 @@@ F:     Documentation/hwmon/gsc-hwmon.rs
  F:    drivers/hwmon/gsc-hwmon.c
  F:    include/linux/platform_data/gsc_hwmon.h
  
 -GASKET DRIVER FRAMEWORK
 -M:    Rob Springer <rspringer@google.com>
 -M:    Todd Poynor <toddpoynor@google.com>
 -M:    Ben Chan <benchan@chromium.org>
 -M:    Richard Yeh <rcy@google.com>
 -S:    Maintained
 -F:    drivers/staging/gasket/
 -
  GCC PLUGINS
  M:    Kees Cook <keescook@chromium.org>
  L:    linux-hardening@vger.kernel.org
@@@ -7592,9 -7476,8 +7592,9 @@@ F:      include/uapi/asm-generic
  GENERIC PHY FRAMEWORK
  M:    Kishon Vijay Abraham I <kishon@ti.com>
  M:    Vinod Koul <vkoul@kernel.org>
 -L:    linux-kernel@vger.kernel.org
 +L:    linux-phy@lists.infradead.org
  S:    Supported
 +Q:    https://patchwork.kernel.org/project/linux-phy/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy.git
  F:    Documentation/devicetree/bindings/phy/
  F:    drivers/phy/
@@@ -7660,12 -7543,6 +7660,12 @@@ F:    Documentation/filesystems/gfs2
  F:    fs/gfs2/
  F:    include/uapi/linux/gfs2_ondisk.h
  
 +GIGABYTE WMI DRIVER
 +M:    Thomas Weißschuh <thomas@weissschuh.net>
 +L:    platform-driver-x86@vger.kernel.org
 +S:    Maintained
 +F:    drivers/platform/x86/gigabyte-wmi.c
 +
  GNSS SUBSYSTEM
  M:    Johan Hovold <johan@kernel.org>
  S:    Maintained
@@@ -7974,7 -7851,6 +7974,7 @@@ F:      Documentation/hwmon
  F:    drivers/hwmon/
  F:    include/linux/hwmon*.h
  F:    include/trace/events/hwmon*.h
 +K:    (devm_)?hwmon_device_(un)?register(|_with_groups|_with_info)
  
  HARDWARE RANDOM NUMBER GENERATOR CORE
  M:    Matt Mackall <mpm@selenic.com>
@@@ -8016,11 -7892,6 +8016,11 @@@ W:    https://linuxtv.or
  T:    git git://linuxtv.org/media_tree.git
  F:    drivers/media/usb/hdpvr/
  
 +HEWLETT PACKARD ENTERPRISE ILO CHIF DRIVER
 +M:    Matt Hsiao <matt.hsiao@hpe.com>
 +S:    Supported
 +F:    drivers/misc/hpilo.[ch]
 +
  HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER
  M:    Jerry Hoemann <jerry.hoemann@hpe.com>
  S:    Supported
@@@ -8169,13 -8040,6 +8169,13 @@@ F:    drivers/crypto/hisilicon/hpre/hpre.
  F:    drivers/crypto/hisilicon/hpre/hpre_crypto.c
  F:    drivers/crypto/hisilicon/hpre/hpre_main.c
  
 +HISILICON I2C CONTROLLER DRIVER
 +M:    Yicong Yang <yangyicong@hisilicon.com>
 +L:    linux-i2c@vger.kernel.org
 +S:    Maintained
 +W:    https://www.hisilicon.com
 +F:    drivers/i2c/busses/i2c-hisi.c
 +
  HISILICON LPC BUS DRIVER
  M:    john.garry@huawei.com
  S:    Maintained
@@@ -8226,7 -8090,6 +8226,6 @@@ F:      drivers/crypto/hisilicon/zip
  
  HISILICON ROCE DRIVER
  M:    Lijun Ou <oulijun@huawei.com>
- M:    Wei Hu(Xavier) <huwei87@hisilicon.com>
  M:    Weihang Li <liweihang@huawei.com>
  L:    linux-rdma@vger.kernel.org
  S:    Maintained
@@@ -8250,15 -8113,9 +8249,15 @@@ F:    drivers/crypto/hisilicon/sec2/sec_cr
  F:    drivers/crypto/hisilicon/sec2/sec_crypto.h
  F:    drivers/crypto/hisilicon/sec2/sec_main.c
  
 +HISILICON SPI Controller DRIVER FOR KUNPENG SOCS
 +M:    Jay Fang <f.fangjian@huawei.com>
 +L:    linux-spi@vger.kernel.org
 +S:    Maintained
 +W:    http://www.hisilicon.com
 +F:    drivers/spi/spi-hisi-kunpeng.c
 +
  HISILICON STAGING DRIVERS FOR HIKEY 960/970
  M:    Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
 -L:    devel@driverdev.osuosl.org
  S:    Maintained
  F:    drivers/staging/hikey9xx/
  
@@@ -8351,7 -8208,7 +8350,7 @@@ M:      Lorenzo Bianconi <lorenzo.bianconi83
  L:    linux-iio@vger.kernel.org
  S:    Maintained
  W:    http://www.st.com/
 -F:    Documentation/devicetree/bindings/iio/humidity/hts221.txt
 +F:    Documentation/devicetree/bindings/iio/humidity/st,hts221.yaml
  F:    drivers/iio/humidity/hts221*
  
  HUAWEI ETHERNET DRIVER
@@@ -8373,7 -8230,7 +8372,7 @@@ F:      include/linux/hugetlb.
  F:    mm/hugetlb.c
  
  HVA ST MEDIA DRIVER
 -M:    Jean-Christophe Trotin <jean-christophe.trotin@st.com>
 +M:    Jean-Christophe Trotin <jean-christophe.trotin@foss.st.com>
  L:    linux-media@vger.kernel.org
  S:    Supported
  W:    https://linuxtv.org
@@@ -8400,12 -8257,11 +8399,12 @@@ S:   Maintaine
  T:    git git://linuxtv.org/media_tree.git
  F:    drivers/media/i2c/hi556.c
  
 -Hyper-V CORE AND DRIVERS
 +Hyper-V/Azure CORE AND DRIVERS
  M:    "K. Y. Srinivasan" <kys@microsoft.com>
  M:    Haiyang Zhang <haiyangz@microsoft.com>
  M:    Stephen Hemminger <sthemmin@microsoft.com>
  M:    Wei Liu <wei.liu@kernel.org>
 +M:    Dexuan Cui <decui@microsoft.com>
  L:    linux-hyperv@vger.kernel.org
  S:    Supported
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
@@@ -8422,7 -8278,6 +8421,7 @@@ F:      drivers/hid/hid-hyperv.
  F:    drivers/hv/
  F:    drivers/input/serio/hyperv-keyboard.c
  F:    drivers/iommu/hyperv-iommu.c
 +F:    drivers/net/ethernet/microsoft/
  F:    drivers/net/hyperv/
  F:    drivers/pci/controller/pci-hyperv-intf.c
  F:    drivers/pci/controller/pci-hyperv.c
@@@ -8663,8 -8518,8 +8662,8 @@@ F:      drivers/pci/hotplug/rpaphp
  
  IBM Power SRIOV Virtual NIC Device Driver
  M:    Dany Madden <drt@linux.ibm.com>
 -M:    Lijun Pan <ljp@linux.ibm.com>
  M:    Sukadev Bhattiprolu <sukadev@linux.ibm.com>
 +R:    Thomas Falcon <tlfalcon@linux.ibm.com>
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/ibm/ibmvnic.*
@@@ -8690,8 -8545,7 +8689,8 @@@ S:      Supporte
  F:    drivers/scsi/ibmvscsi/ibmvfc*
  
  IBM Power Virtual Management Channel Driver
 -M:    Steven Royer <seroyer@linux.ibm.com>
 +M:    Brad Warrum <bwarrum@linux.ibm.com>
 +M:    Ritu Agarwal <rituagar@linux.ibm.com>
  S:    Supported
  F:    drivers/misc/ibmvmc.*
  
@@@ -8749,8 -8603,9 +8748,8 @@@ F:      drivers/ide
  F:    include/linux/ide.h
  
  IDE/ATAPI DRIVERS
 -M:    Borislav Petkov <bp@alien8.de>
  L:    linux-ide@vger.kernel.org
 -S:    Maintained
 +S:    Orphan
  F:    Documentation/cdrom/ide-cd.rst
  F:    drivers/ide/ide-cd*
  
@@@ -8818,7 -8673,7 +8817,7 @@@ M:      Peter Rosin <peda@axentia.se
  L:    linux-iio@vger.kernel.org
  S:    Maintained
  F:    Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
 -F:    Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
 +F:    Documentation/devicetree/bindings/iio/dac/dpot-dac.yaml
  F:    drivers/iio/dac/dpot-dac.c
  
  IIO ENVELOPE DETECTOR
@@@ -8826,7 -8681,7 +8825,7 @@@ M:      Peter Rosin <peda@axentia.se
  L:    linux-iio@vger.kernel.org
  S:    Maintained
  F:    Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
 -F:    Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
 +F:    Documentation/devicetree/bindings/iio/adc/envelope-detector.yaml
  F:    drivers/iio/adc/envelope-detector.c
  
  IIO MULTIPLEXER
@@@ -8836,15 -8691,10 +8835,15 @@@ S:   Maintaine
  F:    Documentation/devicetree/bindings/iio/multiplexer/io-channel-mux.txt
  F:    drivers/iio/multiplexer/iio-mux.c
  
 +IIO SCMI BASED DRIVER
 +M:    Jyoti Bhayana <jbhayana@google.com>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/iio/common/scmi_sensors/scmi_iio.c
 +
  IIO SUBSYSTEM AND DRIVERS
  M:    Jonathan Cameron <jic23@kernel.org>
  R:    Lars-Peter Clausen <lars@metafoo.de>
 -R:    Peter Meerwald-Stadler <pmeerw@pmeerw.net>
  L:    linux-iio@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio.git
@@@ -8860,9 -8710,9 +8859,9 @@@ IIO UNIT CONVERTE
  M:    Peter Rosin <peda@axentia.se>
  L:    linux-iio@vger.kernel.org
  S:    Maintained
 -F:    Documentation/devicetree/bindings/iio/afe/current-sense-amplifier.txt
 -F:    Documentation/devicetree/bindings/iio/afe/current-sense-shunt.txt
 -F:    Documentation/devicetree/bindings/iio/afe/voltage-divider.txt
 +F:    Documentation/devicetree/bindings/iio/afe/current-sense-amplifier.yaml
 +F:    Documentation/devicetree/bindings/iio/afe/current-sense-shunt.yaml
 +F:    Documentation/devicetree/bindings/iio/afe/voltage-divider.yaml
  F:    drivers/iio/afe/iio-rescale.c
  
  IKANOS/ADI EAGLE ADSL USB DRIVER
@@@ -9264,26 -9114,6 +9263,26 @@@ F:    include/linux/mei_cl_bus.
  F:    include/uapi/linux/mei.h
  F:    samples/mei/*
  
 +INTEL MAX 10 BMC MFD DRIVER
 +M:    Xu Yilun <yilun.xu@intel.com>
 +R:    Tom Rix <trix@redhat.com>
 +S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-driver-intel-m10-bmc
 +F:    Documentation/hwmon/intel-m10-bmc-hwmon.rst
 +F:    drivers/hwmon/intel-m10-bmc-hwmon.c
 +F:    drivers/mfd/intel-m10-bmc.c
 +F:    include/linux/mfd/intel-m10-bmc.h
 +
 +INTEL MAX 10 BMC MFD DRIVER
 +M:    Xu Yilun <yilun.xu@intel.com>
 +R:    Tom Rix <trix@redhat.com>
 +S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-driver-intel-m10-bmc
 +F:    Documentation/hwmon/intel-m10-bmc-hwmon.rst
 +F:    drivers/hwmon/intel-m10-bmc-hwmon.c
 +F:    drivers/mfd/intel-m10-bmc.c
 +F:    include/linux/mfd/intel-m10-bmc.h
 +
  INTEL MENLOW THERMAL DRIVER
  M:    Sujith Thomas <sujith.thomas@intel.com>
  L:    platform-driver-x86@vger.kernel.org
@@@ -9303,7 -9133,6 +9302,7 @@@ M:      Rajneesh Bhardwaj <irenic.rajneesh@g
  M:    David E Box <david.e.box@intel.com>
  L:    platform-driver-x86@vger.kernel.org
  S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-platform-intel-pmc
  F:    drivers/platform/x86/intel_pmc_core*
  
  INTEL PMIC GPIO DRIVERS
@@@ -9414,7 -9243,7 +9413,7 @@@ W:      https://slimbootloader.github.io/sec
  F:    drivers/platform/x86/intel-wmi-sbl-fw-update.c
  
  INTEL WMI THUNDERBOLT FORCE POWER DRIVER
 -M:    Mario Limonciello <mario.limonciello@dell.com>
 +L:    Dell.Client.Kernel@dell.com
  S:    Maintained
  F:    drivers/platform/x86/intel-wmi-thunderbolt.c
  
@@@ -9444,7 -9273,6 +9443,7 @@@ Q:      https://patchwork.kernel.org/project
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/sgx
  F:    Documentation/x86/sgx.rst
  F:    arch/x86/entry/vdso/vsgx.S
 +F:    arch/x86/include/asm/sgx.h
  F:    arch/x86/include/uapi/asm/sgx.h
  F:    arch/x86/kernel/cpu/sgx/*
  F:    tools/testing/selftests/sgx/*
@@@ -9454,7 -9282,6 +9453,7 @@@ INTERCONNECT AP
  M:    Georgi Djakov <djakov@kernel.org>
  L:    linux-pm@vger.kernel.org
  S:    Maintained
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/djakov/icc.git
  F:    Documentation/devicetree/bindings/interconnect/
  F:    Documentation/driver-api/interconnect.rst
  F:    drivers/interconnect/
@@@ -9462,13 -9289,6 +9461,13 @@@ F:    include/dt-bindings/interconnect
  F:    include/linux/interconnect-provider.h
  F:    include/linux/interconnect.h
  
 +INTERRUPT COUNTER DRIVER
 +M:    Oleksij Rempel <o.rempel@pengutronix.de>
 +R:    Pengutronix Kernel Team <kernel@pengutronix.de>
 +L:    linux-iio@vger.kernel.org
 +F:    Documentation/devicetree/bindings/counter/interrupt-counter.yaml
 +F:    drivers/counter/interrupt-cnt.c
 +
  INVENSENSE ICM-426xx IMU DRIVER
  M:    Jean-Baptiste Maneyrol <jmaneyrol@invensense.com>
  L:    linux-iio@vger.kernel.org
@@@ -9481,7 -9301,7 +9480,7 @@@ INVENSENSE MPU-3050 GYROSCOPE DRIVE
  M:    Linus Walleij <linus.walleij@linaro.org>
  L:    linux-iio@vger.kernel.org
  S:    Maintained
 -F:    Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
 +F:    Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml
  F:    drivers/iio/gyro/mpu3050*
  
  IOC3 ETHERNET DRIVER
@@@ -9870,7 -9690,6 +9869,7 @@@ F:      scripts/*vmlinux
  F:    scripts/Kbuild*
  F:    scripts/Makefile*
  F:    scripts/basic/
 +F:    scripts/dummy-tools/
  F:    scripts/mk*
  F:    scripts/mod/
  F:    scripts/package/
@@@ -9897,11 -9716,6 +9896,11 @@@ F:    include/uapi/linux/sunrpc
  F:    net/sunrpc/
  F:    Documentation/filesystems/nfs/
  
 +KERNEL REGRESSIONS
 +M:    Thorsten Leemhuis <linux@leemhuis.info>
 +L:    regressions@lists.linux.dev
 +S:    Supported
 +
  KERNEL SELFTEST FRAMEWORK
  M:    Shuah Khan <shuah@kernel.org>
  M:    Shuah Khan <skhan@linuxfoundation.org>
@@@ -10064,14 -9878,6 +10063,14 @@@ F:  include/keys/trusted-type.
  F:    include/keys/trusted_tpm.h
  F:    security/keys/trusted-keys/
  
 +KEYS-TRUSTED-TEE
 +M:    Sumit Garg <sumit.garg@linaro.org>
 +L:    linux-integrity@vger.kernel.org
 +L:    keyrings@vger.kernel.org
 +S:    Supported
 +F:    include/keys/trusted_tee.h
 +F:    security/keys/trusted-keys/trusted_tee.c
 +
  KEYS/KEYRINGS
  M:    David Howells <dhowells@redhat.com>
  M:    Jarkko Sakkinen <jarkko@kernel.org>
@@@ -10223,6 -10029,7 +10222,6 @@@ F:   scripts/leaking_addresses.p
  
  LED SUBSYSTEM
  M:    Pavel Machek <pavel@ucw.cz>
 -R:    Dan Murphy <dmurphy@ti.com>
  L:    linux-leds@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/pavel/linux-leds.git
@@@ -10239,7 -10046,7 +10238,7 @@@ F:   drivers/misc/eeprom/eeprom.
  LEGO MINDSTORMS EV3
  R:    David Lechner <david@lechnology.com>
  S:    Maintained
 -F:    Documentation/devicetree/bindings/power/supply/lego_ev3_battery.txt
 +F:    Documentation/devicetree/bindings/power/supply/lego,ev3-battery.yaml
  F:    arch/arm/boot/dts/da850-lego-ev3.dts
  F:    drivers/power/supply/lego_ev3_battery.c
  
@@@ -10652,12 -10459,6 +10651,12 @@@ S: Maintaine
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
  F:    drivers/hid/hid-lg-g15.c
  
 +LONTIUM LT8912B MIPI TO HDMI BRIDGE
 +M:    Adrien Grassein <adrien.grassein@gmail.com>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/display/bridge/lontium,lt8912b.yaml
 +F:    drivers/gpu/drm/bridge/lontium-lt8912b.c
 +
  LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
  M:    Sathya Prakash <sathya.prakash@broadcom.com>
  M:    Sreekanth Reddy <sreekanth.reddy@broadcom.com>
@@@ -10804,7 -10605,6 +10803,7 @@@ S:   Maintaine
  F:    drivers/mailbox/
  F:    include/linux/mailbox_client.h
  F:    include/linux/mailbox_controller.h
 +F:    Documentation/devicetree/bindings/mailbox/
  
  MAILBOX ARM MHUv2
  M:    Viresh Kumar <viresh.kumar@linaro.org>
@@@ -10890,7 -10690,6 +10889,7 @@@ F:   include/linux/mv643xx.
  
  MARVELL MV88X3310 PHY DRIVER
  M:    Russell King <linux@armlinux.org.uk>
 +M:    Marek Behun <marek.behun@nic.cz>
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/phy/marvell10g.c
@@@ -10916,8 -10715,7 +10915,8 @@@ F:   drivers/net/ethernet/marvell/mvpp2
  
  MARVELL MWIFIEX WIRELESS DRIVER
  M:    Amitkumar Karwar <amitkarwar@gmail.com>
 -M:    Ganapathi Bhat <ganapathi.bhat@nxp.com>
 +M:    Ganapathi Bhat <ganapathi017@gmail.com>
 +M:    Sharvari Harisangam <sharvari.harisangam@nxp.com>
  M:    Xinming Hu <huxinming820@gmail.com>
  L:    linux-wireless@vger.kernel.org
  S:    Maintained
@@@ -10990,13 -10788,6 +10989,13 @@@ S: Orpha
  F:    drivers/video/fbdev/matrox/matroxfb_*
  F:    include/uapi/linux/matroxfb.h
  
 +MAX15301 DRIVER
 +M:    Daniel Nilsson <daniel.nilsson@flex.com>
 +L:    linux-hwmon@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/hwmon/max15301.rst
 +F:    drivers/hwmon/pmbus/max15301.c
 +
  MAX16065 HARDWARE MONITOR DRIVER
  M:    Guenter Roeck <linux@roeck-us.net>
  L:    linux-hwmon@vger.kernel.org
@@@ -11076,7 -10867,7 +11075,7 @@@ F:   drivers/regulator/max77802-regulator
  F:    include/dt-bindings/*/*max77802.h
  
  MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
 -M:    Krzysztof Kozlowski <krzk@kernel.org>
 +M:    Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
  M:    Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
  L:    linux-pm@vger.kernel.org
  S:    Supported
@@@ -11085,7 -10876,7 +11084,7 @@@ F:   drivers/power/supply/max77693_charge
  
  MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
  M:    Chanwoo Choi <cw00.choi@samsung.com>
 -M:    Krzysztof Kozlowski <krzk@kernel.org>
 +M:    Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
  M:    Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
  L:    linux-kernel@vger.kernel.org
  S:    Supported
@@@ -11113,7 -10904,8 +11112,7 @@@ T:   git git://linuxtv.org/media_tree.gi
  F:    drivers/media/radio/radio-maxiradio*
  
  MCAN MMIO DEVICE DRIVER
 -M:    Dan Murphy <dmurphy@ti.com>
 -M:    Pankaj Sharma <pankj.sharma@samsung.com>
 +M:    Chandrasekar Ramakrishnan <rcsekar@samsung.com>
  L:    linux-can@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
@@@ -11311,12 -11103,12 +11310,12 @@@ F:        drivers/media/platform/renesas-ceu.
  F:    include/media/drv-intf/renesas-ceu.h
  
  MEDIA DRIVERS FOR RENESAS - DRIF
 -M:    Ramesh Shanmugasundaram <rashanmu@gmail.com>
 +M:    Fabrizio Castro <fabrizio.castro.jz@renesas.com>
  L:    linux-media@vger.kernel.org
  L:    linux-renesas-soc@vger.kernel.org
  S:    Supported
  T:    git git://linuxtv.org/media_tree.git
 -F:    Documentation/devicetree/bindings/media/renesas,drif.txt
 +F:    Documentation/devicetree/bindings/media/renesas,drif.yaml
  F:    drivers/media/platform/rcar_drif.c
  
  MEDIA DRIVERS FOR RENESAS - FCP
@@@ -11373,7 -11165,7 +11372,7 @@@ T:   git git://linuxtv.org/media_tree.gi
  F:    drivers/media/dvb-frontends/stv6111*
  
  MEDIA DRIVERS FOR STM32 - DCMI
 -M:    Hugues Fruchet <hugues.fruchet@st.com>
 +M:    Hugues Fruchet <hugues.fruchet@foss.st.com>
  L:    linux-media@vger.kernel.org
  S:    Supported
  T:    git git://linuxtv.org/media_tree.git
@@@ -11485,7 -11277,7 +11484,7 @@@ F:   drivers/media/platform/mtk-vpu
  MEDIATEK MMC/SD/SDIO DRIVER
  M:    Chaotian Jing <chaotian.jing@mediatek.com>
  S:    Maintained
 -F:    Documentation/devicetree/bindings/mmc/mtk-sd.txt
 +F:    Documentation/devicetree/bindings/mmc/mtk-sd.yaml
  F:    drivers/mmc/host/mtk-sd.c
  
  MEDIATEK MT76 WIRELESS LAN DRIVER
@@@ -11502,12 -11294,6 +11501,12 @@@ L: linux-wireless@vger.kernel.or
  S:    Maintained
  F:    drivers/net/wireless/mediatek/mt7601u/
  
 +MEDIATEK MT7621 CLOCK DRIVER
 +M:    Sergio Paracuellos <sergio.paracuellos@gmail.com>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/clock/mediatek,mt7621-sysc.yaml
 +F:    drivers/clk/ralink/clk-mt7621.c
 +
  MEDIATEK MT7621/28/88 I2C DRIVER
  M:    Stefan Roese <sr@denx.de>
  L:    linux-i2c@vger.kernel.org
@@@ -11650,8 -11436,8 +11649,8 @@@ Q:   https://patchwork.kernel.org/project
  F:    drivers/net/ethernet/mellanox/mlxfw/
  
  MELLANOX HARDWARE PLATFORM SUPPORT
 -M:    Andy Shevchenko <andy@infradead.org>
 -M:    Darren Hart <dvhart@infradead.org>
 +M:    Hans de Goede <hdegoede@redhat.com>
 +M:    Mark Gross <mgross@linux.intel.com>
  M:    Vadim Pasternak <vadimp@nvidia.com>
  L:    platform-driver-x86@vger.kernel.org
  S:    Supported
@@@ -11742,7 -11528,7 +11741,7 @@@ F:   include/linux/memblock.
  F:    mm/memblock.c
  
  MEMORY CONTROLLER DRIVERS
 -M:    Krzysztof Kozlowski <krzk@kernel.org>
 +M:    Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
  L:    linux-kernel@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl.git
@@@ -11770,7 -11556,6 +11769,7 @@@ F:   include/linux/gfp.
  F:    include/linux/memory_hotplug.h
  F:    include/linux/mm.h
  F:    include/linux/mmzone.h
 +F:    include/linux/pagewalk.h
  F:    include/linux/vmalloc.h
  F:    mm/
  
@@@ -12011,7 -11796,7 +12010,7 @@@ MICROCHIP SAMA5D2-COMPATIBLE ADC DRIVE
  M:    Eugen Hristev <eugen.hristev@microchip.com>
  L:    linux-iio@vger.kernel.org
  S:    Supported
 -F:    Documentation/devicetree/bindings/iio/adc/at91-sama5d2_adc.txt
 +F:    Documentation/devicetree/bindings/iio/adc/atmel,sama5d2-adc.yaml
  F:    drivers/iio/adc/at91-sama5d2_adc.c
  F:    include/dt-bindings/iio/adc/at91-sama5d2_adc.h
  
@@@ -12075,22 -11860,6 +12074,22 @@@ F: drivers/scsi/smartpqi/smartpqi*.[ch
  F:    include/linux/cciss*.h
  F:    include/uapi/linux/cciss*.h
  
 +MICROSOFT SURFACE BATTERY AND AC DRIVERS
 +M:    Maximilian Luz <luzmaximilian@gmail.com>
 +L:    linux-pm@vger.kernel.org
 +L:    platform-driver-x86@vger.kernel.org
 +S:    Maintained
 +F:    drivers/power/supply/surface_battery.c
 +F:    drivers/power/supply/surface_charger.c
 +
 +MICROSOFT SURFACE DTX DRIVER
 +M:    Maximilian Luz <luzmaximilian@gmail.com>
 +L:    platform-driver-x86@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/driver-api/surface_aggregator/clients/dtx.rst
 +F:    drivers/platform/surface/surface_dtx.c
 +F:    include/uapi/linux/surface_aggregator/dtx.h
 +
  MICROSOFT SURFACE GPE LID SUPPORT DRIVER
  M:    Maximilian Luz <luzmaximilian@gmail.com>
  L:    platform-driver-x86@vger.kernel.org
@@@ -12106,25 -11875,12 +12105,25 @@@ S:        Maintaine
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86.git
  F:    drivers/platform/surface/
  
 +MICROSOFT SURFACE HID TRANSPORT DRIVER
 +M:    Maximilian Luz <luzmaximilian@gmail.com>
 +L:    linux-input@vger.kernel.org
 +L:    platform-driver-x86@vger.kernel.org
 +S:    Maintained
 +F:    drivers/hid/surface-hid/
 +
  MICROSOFT SURFACE HOT-PLUG DRIVER
  M:    Maximilian Luz <luzmaximilian@gmail.com>
  L:    platform-driver-x86@vger.kernel.org
  S:    Maintained
  F:    drivers/platform/surface/surface_hotplug.c
  
 +MICROSOFT SURFACE PLATFORM PROFILE DRIVER
 +M:    Maximilian Luz <luzmaximilian@gmail.com>
 +L:    platform-driver-x86@vger.kernel.org
 +S:    Maintained
 +F:    drivers/platform/surface/surface_platform_profile.c
 +
  MICROSOFT SURFACE PRO 3 BUTTON DRIVER
  M:    Chen Yu <yu.c.chen@intel.com>
  L:    platform-driver-x86@vger.kernel.org
@@@ -12140,7 -11896,6 +12139,7 @@@ F:   Documentation/driver-api/surface_agg
  F:    drivers/platform/surface/aggregator/
  F:    drivers/platform/surface/surface_acpi_notify.c
  F:    drivers/platform/surface/surface_aggregator_cdev.c
 +F:    drivers/platform/surface/surface_aggregator_registry.c
  F:    include/linux/surface_acpi_notify.h
  F:    include/linux/surface_aggregator/
  F:    include/uapi/linux/surface_aggregator/
@@@ -12332,7 -12087,8 +12331,7 @@@ F:   drivers/media/pci/meye
  F:    include/uapi/linux/meye.h
  
  MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
 -M:    Jiri Slaby <jirislaby@kernel.org>
 -S:    Maintained
 +S:    Orphan
  F:    Documentation/driver-api/serial/moxa-smartio.rst
  F:    drivers/tty/mxser.*
  
@@@ -12476,6 -12232,11 +12475,6 @@@ F:  drivers/mux
  F:    include/dt-bindings/mux/
  F:    include/linux/mux/
  
 -MULTITECH MULTIPORT CARD (ISICOM)
 -S:    Orphan
 -F:    drivers/tty/isicom.c
 -F:    include/linux/isicom.h
 -
  MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
  M:    Bin Liu <b-liu@ti.com>
  L:    linux-usb@vger.kernel.org
@@@ -12504,7 -12265,7 +12503,7 @@@ M:   Stefan Agner <stefan@agner.ch
  L:    dri-devel@lists.freedesktop.org
  S:    Supported
  T:    git git://anongit.freedesktop.org/drm/drm-misc
 -F:    Documentation/devicetree/bindings/display/mxsfb.txt
 +F:    Documentation/devicetree/bindings/display/fsl,lcdif.yaml
  F:    drivers/gpu/drm/mxsfb/
  
  MYLEX DAC960 PCI RAID Controller
@@@ -12623,15 -12384,6 +12622,15 @@@ F: include/net/netrom.
  F:    include/uapi/linux/netrom.h
  F:    net/netrom/
  
 +NETRONIX EMBEDDED CONTROLLER
 +M:    Jonathan Neuschäfer <j.neuschaefer@gmx.net>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/mfd/netronix,ntxec.yaml
 +F:    drivers/mfd/ntxec.c
 +F:    drivers/pwm/pwm-ntxec.c
 +F:    drivers/rtc/rtc-ntxec.c
 +F:    include/linux/mfd/ntxec.h
 +
  NETRONOME ETHERNET DRIVERS
  M:    Simon Horman <simon.horman@netronome.com>
  R:    Jakub Kicinski <kuba@kernel.org>
@@@ -12784,13 -12536,12 +12783,13 @@@ NETWORKING [MPTCP
  M:    Mat Martineau <mathew.j.martineau@linux.intel.com>
  M:    Matthieu Baerts <matthieu.baerts@tessares.net>
  L:    netdev@vger.kernel.org
 -L:    mptcp@lists.01.org
 +L:    mptcp@lists.linux.dev
  S:    Maintained
  W:    https://github.com/multipath-tcp/mptcp_net-next/wiki
  B:    https://github.com/multipath-tcp/mptcp_net-next/issues
  F:    Documentation/networking/mptcp-sysctl.rst
  F:    include/net/mptcp.h
 +F:    include/trace/events/mptcp.h
  F:    include/uapi/linux/mptcp.h
  F:    net/mptcp/
  F:    tools/testing/selftests/net/mptcp/
@@@ -13075,12 -12826,6 +13074,12 @@@ F: drivers/nvmem
  F:    include/linux/nvmem-consumer.h
  F:    include/linux/nvmem-provider.h
  
 +NXP C45 TJA11XX PHY DRIVER
 +M:    Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/phy/nxp-c45-tja11xx.c
 +
  NXP FSPI DRIVER
  M:    Ashish Kumar <ashish.kumar@nxp.com>
  R:    Yogesh Gaur <yogeshgaur.83@gmail.com>
@@@ -13121,7 -12866,7 +13120,7 @@@ F:   Documentation/devicetree/bindings/re
  F:    drivers/regulator/pf8x00-regulator.c
  
  NXP PTN5150A CC LOGIC AND EXTCON DRIVER
 -M:    Krzysztof Kozlowski <krzk@kernel.org>
 +M:    Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
  L:    linux-kernel@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
@@@ -13164,21 -12909,6 +13163,21 @@@ L: linux-nfc@lists.01.org (moderated fo
  S:    Supported
  F:    drivers/nfc/nxp-nci
  
 +NXP i.MX 8QXP/8QM JPEG V4L2 DRIVER
 +M:    Mirela Rabulea <mirela.rabulea@nxp.com>
 +R:    NXP Linux Team <linux-imx@nxp.com>
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/media/imx8-jpeg.yaml
 +F:    drivers/media/platform/imx-jpeg
 +
 +NZXT-KRAKEN2 HARDWARE MONITORING DRIVER
 +M:    Jonas Malaco <jonas@protocubo.io>
 +L:    linux-hwmon@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/hwmon/nzxt-kraken2.rst
 +F:    drivers/hwmon/nzxt-kraken2.c
 +
  OBJAGG
  M:    Jiri Pirko <jiri@nvidia.com>
  L:    netdev@vger.kernel.org
@@@ -13453,7 -13183,7 +13452,7 @@@ M:   Rui Miguel Silva <rmfrfs@gmail.com
  L:    linux-media@vger.kernel.org
  S:    Maintained
  T:    git git://linuxtv.org/media_tree.git
 -F:    Documentation/devicetree/bindings/media/i2c/ov2680.yaml
 +F:    Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
  F:    drivers/media/i2c/ov2680.c
  
  OMNIVISION OV2685 SENSOR DRIVER
@@@ -14372,7 -14102,7 +14371,7 @@@ L:   linux-gpio@vger.kernel.or
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
  F:    Documentation/devicetree/bindings/pinctrl/
 -F:    Documentation/driver-api/pinctl.rst
 +F:    Documentation/driver-api/pin-control.rst
  F:    drivers/pinctrl/
  F:    include/linux/pinctrl/
  
@@@ -14427,7 -14157,7 +14426,7 @@@ F:   drivers/pinctrl/renesas
  
  PIN CONTROLLER - SAMSUNG
  M:    Tomasz Figa <tomasz.figa@gmail.com>
 -M:    Krzysztof Kozlowski <krzk@kernel.org>
 +M:    Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
  M:    Sylwester Nawrocki <s.nawrocki@samsung.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  L:    linux-samsung-soc@vger.kernel.org
@@@ -14548,7 -14278,7 +14547,7 @@@ PNI RM3100 IIO DRIVE
  M:    Song Qiang <songqiang1304521@gmail.com>
  L:    linux-iio@vger.kernel.org
  S:    Maintained
 -F:    Documentation/devicetree/bindings/iio/magnetometer/pni,rm3100.txt
 +F:    Documentation/devicetree/bindings/iio/magnetometer/pni,rm3100.yaml
  F:    drivers/iio/magnetometer/rm3100*
  
  PNP SUPPORT
@@@ -14583,15 -14313,6 +14582,15 @@@ F: include/linux/pm_
  F:    include/linux/powercap.h
  F:    kernel/configs/nopm.config
  
 +DYNAMIC THERMAL POWER MANAGEMENT (DTPM)
 +M:    Daniel Lezcano <daniel.lezcano@kernel.org>
 +L:    linux-pm@vger.kernel.org
 +S:    Supported
 +B:    https://bugzilla.kernel.org
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
 +F:    drivers/powercap/dtpm*
 +F:    include/linux/dtpm.h
 +
  POWER STATE COORDINATION INTERFACE (PSCI)
  M:    Mark Rutland <mark.rutland@arm.com>
  M:    Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
@@@ -14669,7 -14390,7 +14668,7 @@@ F:   kernel/sched/psi.
  
  PRINTK
  M:    Petr Mladek <pmladek@suse.com>
 -M:    Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
 +M:    Sergey Senozhatsky <senozhatsky@chromium.org>
  R:    Steven Rostedt <rostedt@goodmis.org>
  R:    John Ogness <john.ogness@linutronix.de>
  S:    Maintained
@@@ -14986,11 -14707,15 +14985,11 @@@ F:        drivers/net/ethernet/qlogic/qlcnic
  QLOGIC QLGE 10Gb ETHERNET DRIVER
  M:    Manish Chopra <manishc@marvell.com>
  M:    GR-Linux-NIC-Dev@marvell.com
 -L:    netdev@vger.kernel.org
 -S:    Supported
 -F:    drivers/staging/qlge/
 -
 -QLOGIC QLGE 10Gb ETHERNET DRIVER
  M:    Coiby Xu <coiby.xu@gmail.com>
  L:    netdev@vger.kernel.org
 -S:    Maintained
 +S:    Supported
  F:    Documentation/networking/device_drivers/qlogic/qlge.rst
 +F:    drivers/staging/qlge/
  
  QM1D1B0004 MEDIA DRIVER
  M:    Akihiro Tsukada <tskd08@gmail.com>
@@@ -15061,7 -14786,7 +15060,7 @@@ M:   Todor Tomov <todor.too@gmail.com
  L:    linux-media@vger.kernel.org
  S:    Maintained
  F:    Documentation/admin-guide/media/qcom_camss.rst
 -F:    Documentation/devicetree/bindings/media/qcom,camss.txt
 +F:    Documentation/devicetree/bindings/media/*camss*
  F:    drivers/media/platform/qcom/camss/
  
  QUALCOMM CORE POWER REDUCTION (CPR) AVS DRIVER
@@@ -15130,14 -14855,6 +15129,14 @@@ L: linux-arm-msm@vger.kernel.or
  S:    Maintained
  F:    drivers/iommu/arm/arm-smmu/qcom_iommu.c
  
 +QUALCOMM IPC ROUTER (QRTR) DRIVER
 +M:    Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 +L:    linux-arm-msm@vger.kernel.org
 +S:    Maintained
 +F:    include/trace/events/qrtr.h
 +F:    include/uapi/linux/qrtr.h
 +F:    net/qrtr/
 +
  QUALCOMM IPCC MAILBOX DRIVER
  M:    Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
  L:    linux-arm-msm@vger.kernel.org
@@@ -15360,7 -15077,7 +15359,7 @@@ M:   Laurent Pinchart <laurent.pinchart+r
  M:    Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
  L:    linux-media@vger.kernel.org
  S:    Maintained
 -F:    Documentation/devicetree/bindings/media/i2c/rdacm2x-gmsl.yaml
 +F:    Documentation/devicetree/bindings/media/i2c/imi,rdacm2x-gmsl.yaml
  F:    drivers/media/i2c/max9271.c
  F:    drivers/media/i2c/max9271.h
  F:    drivers/media/i2c/rdacm21.c
@@@ -15487,7 -15204,6 +15486,7 @@@ F:   fs/reiserfs
  REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
  M:    Ohad Ben-Cohen <ohad@wizery.com>
  M:    Bjorn Andersson <bjorn.andersson@linaro.org>
 +M:    Mathieu Poirier <mathieu.poirier@linaro.org>
  L:    linux-remoteproc@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rproc-next
@@@ -15501,7 -15217,6 +15500,7 @@@ F:   include/linux/remoteproc
  REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
  M:    Ohad Ben-Cohen <ohad@wizery.com>
  M:    Bjorn Andersson <bjorn.andersson@linaro.org>
 +M:    Mathieu Poirier <mathieu.poirier@linaro.org>
  L:    linux-remoteproc@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rpmsg-next
@@@ -15539,7 -15254,7 +15538,7 @@@ RENESAS R-CAR GYROADC DRIVE
  M:    Marek Vasut <marek.vasut@gmail.com>
  L:    linux-iio@vger.kernel.org
  S:    Supported
 -F:    Documentation/devicetree/bindings/iio/adc/renesas,gyroadc.txt
 +F:    Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml
  F:    drivers/iio/adc/rcar-gyroadc.c
  
  RENESAS R-CAR I2C DRIVERS
@@@ -15643,8 -15358,8 +15642,8 @@@ N:   risc
  K:    riscv
  
  RNBD BLOCK DRIVERS
 -M:    Danil Kipnis <danil.kipnis@cloud.ionos.com>
 -M:    Jack Wang <jinpu.wang@cloud.ionos.com>
 +M:    Md. Haris Iqbal <haris.iqbal@ionos.com>
 +M:    Jack Wang <jinpu.wang@ionos.com>
  L:    linux-block@vger.kernel.org
  S:    Maintained
  F:    drivers/block/rnbd/
@@@ -15692,6 -15407,12 +15691,6 @@@ L:  netdev@vger.kernel.or
  S:    Supported
  F:    drivers/net/ethernet/rocker/
  
 -ROCKETPORT DRIVER
 -S:    Maintained
 -W:    http://www.comtrol.com
 -F:    Documentation/driver-api/serial/rocket.rst
 -F:    drivers/tty/rocket*
 -
  ROCKETPORT EXPRESS/INFINITY DRIVER
  M:    Kevin Cernekee <cernekee@gmail.com>
  L:    linux-serial@vger.kernel.org
@@@ -15730,27 -15451,20 +15729,27 @@@ F:        Documentation/devicetree/bindings/mf
  F:    Documentation/devicetree/bindings/regulator/rohm,bd70528-regulator.txt
  F:    drivers/clk/clk-bd718x7.c
  F:    drivers/gpio/gpio-bd70528.c
 +F:    drivers/gpio/gpio-bd71815.c
  F:    drivers/gpio/gpio-bd71828.c
  F:    drivers/mfd/rohm-bd70528.c
  F:    drivers/mfd/rohm-bd71828.c
  F:    drivers/mfd/rohm-bd718x7.c
 +F:    drivers/mfd/rohm-bd9576.c
  F:    drivers/power/supply/bd70528-charger.c
  F:    drivers/regulator/bd70528-regulator.c
 +F:    drivers/regulator/bd71815-regulator.c
  F:    drivers/regulator/bd71828-regulator.c
  F:    drivers/regulator/bd718x7-regulator.c
 +F:    drivers/regulator/bd9576-regulator.c
  F:    drivers/regulator/rohm-regulator.c
  F:    drivers/rtc/rtc-bd70528.c
  F:    drivers/watchdog/bd70528_wdt.c
 +F:    drivers/watchdog/bd9576_wdt.c
  F:    include/linux/mfd/rohm-bd70528.h
 +F:    include/linux/mfd/rohm-bd71815.h
  F:    include/linux/mfd/rohm-bd71828.h
  F:    include/linux/mfd/rohm-bd718x7.h
 +F:    include/linux/mfd/rohm-bd957x.h
  F:    include/linux/mfd/rohm-generic.h
  F:    include/linux/mfd/rohm-shared.h
  
@@@ -15826,8 -15540,8 +15825,8 @@@ T:   git git://git.kernel.org/pub/scm/lin
  F:    drivers/net/wireless/realtek/rtl8xxxu/
  
  RTRS TRANSPORT DRIVERS
- M:    Danil Kipnis <danil.kipnis@cloud.ionos.com>
- M:    Jack Wang <jinpu.wang@cloud.ionos.com>
+ M:    Md. Haris Iqbal <haris.iqbal@ionos.com>
+ M:    Jack Wang <jinpu.wang@ionos.com>
  L:    linux-rdma@vger.kernel.org
  S:    Maintained
  F:    drivers/infiniband/ulp/rtrs/
@@@ -15919,8 -15633,8 +15918,8 @@@ F:   Documentation/s390/pci.rs
  
  S390 VFIO AP DRIVER
  M:    Tony Krowiak <akrowiak@linux.ibm.com>
 -M:    Pierre Morel <pmorel@linux.ibm.com>
  M:    Halil Pasic <pasic@linux.ibm.com>
 +M:    Jason Herne <jjherne@linux.ibm.com>
  L:    linux-s390@vger.kernel.org
  S:    Supported
  W:    http://www.ibm.com/developerworks/linux/linux390/
@@@ -15932,7 -15646,6 +15931,7 @@@ F:   drivers/s390/crypto/vfio_ap_private.
  S390 VFIO-CCW DRIVER
  M:    Cornelia Huck <cohuck@redhat.com>
  M:    Eric Farman <farman@linux.ibm.com>
 +M:    Matthew Rosato <mjrosato@linux.ibm.com>
  R:    Halil Pasic <pasic@linux.ibm.com>
  L:    linux-s390@vger.kernel.org
  L:    kvm@vger.kernel.org
@@@ -15943,7 -15656,6 +15942,7 @@@ F:   include/uapi/linux/vfio_ccw.
  
  S390 VFIO-PCI DRIVER
  M:    Matthew Rosato <mjrosato@linux.ibm.com>
 +M:    Eric Farman <farman@linux.ibm.com>
  L:    linux-s390@vger.kernel.org
  L:    kvm@vger.kernel.org
  S:    Supported
@@@ -15965,13 -15677,6 +15964,13 @@@ S: Supporte
  W:    http://www.ibm.com/developerworks/linux/linux390/
  F:    drivers/s390/scsi/zfcp_*
  
 +S3C ADC BATTERY DRIVER
 +M:    Krzysztof Kozlowski <krzk@kernel.org>
 +L:    linux-samsung-soc@vger.kernel.org
 +S:    Odd Fixes
 +F:    drivers/power/supply/s3c_adc_battery.c
 +F:    include/linux/s3c_adc_battery.h
 +
  S3C24XX SD/MMC Driver
  M:    Ben Dooks <ben-linux@fluff.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@@ -16011,7 -15716,7 +16010,7 @@@ F:   Documentation/admin-guide/LSM/SafeSe
  F:    security/safesetid/
  
  SAMSUNG AUDIO (ASoC) DRIVERS
 -M:    Krzysztof Kozlowski <krzk@kernel.org>
 +M:    Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
  M:    Sylwester Nawrocki <s.nawrocki@samsung.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
  S:    Supported
@@@ -16019,7 -15724,7 +16018,7 @@@ F:   Documentation/devicetree/bindings/so
  F:    sound/soc/samsung/
  
  SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
 -M:    Krzysztof Kozlowski <krzk@kernel.org>
 +M:    Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
  L:    linux-crypto@vger.kernel.org
  L:    linux-samsung-soc@vger.kernel.org
  S:    Maintained
@@@ -16054,7 -15759,7 +16053,7 @@@ S:   Maintaine
  F:    drivers/platform/x86/samsung-laptop.c
  
  SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
 -M:    Krzysztof Kozlowski <krzk@kernel.org>
 +M:    Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
  M:    Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
  L:    linux-kernel@vger.kernel.org
  L:    linux-samsung-soc@vger.kernel.org
@@@ -16079,7 -15784,7 +16078,7 @@@ F:   drivers/media/platform/s3c-camif
  F:    include/media/drv-intf/s3c_camif.h
  
  SAMSUNG S3FWRN5 NFC DRIVER
 -M:    Krzysztof Kozlowski <krzk@kernel.org>
 +M:    Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
  M:    Krzysztof Opasiak <k.opasiak@samsung.com>
  L:    linux-nfc@lists.01.org (moderated for non-subscribers)
  S:    Maintained
@@@ -16099,7 -15804,7 +16098,7 @@@ S:   Supporte
  F:    drivers/media/i2c/s5k5baf.c
  
  SAMSUNG S5P Security SubSystem (SSS) DRIVER
 -M:    Krzysztof Kozlowski <krzk@kernel.org>
 +M:    Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
  M:    Vladimir Zapolskiy <vz@mleia.com>
  L:    linux-crypto@vger.kernel.org
  L:    linux-samsung-soc@vger.kernel.org
@@@ -16131,7 -15836,7 +16130,7 @@@ F:   include/linux/clk/samsung.
  F:    include/linux/platform_data/clk-s3c2410.h
  
  SAMSUNG SPI DRIVERS
 -M:    Krzysztof Kozlowski <krzk@kernel.org>
 +M:    Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
  M:    Andi Shyti <andi@etezian.org>
  L:    linux-spi@vger.kernel.org
  L:    linux-samsung-soc@vger.kernel.org
@@@ -16894,13 -16599,6 +16893,13 @@@ F: drivers/firmware/arm_sdei.
  F:    include/linux/arm_sdei.h
  F:    include/uapi/linux/arm_sdei.h
  
 +SOFTWARE NODES
 +R:    Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 +R:    Heikki Krogerus <heikki.krogerus@linux.intel.com>
 +L:    linux-acpi@vger.kernel.org
 +S:    Maintained
 +F:    drivers/base/swnode.c
 +
  SOFTWARE RAID (Multiple Disks) SUPPORT
  M:    Song Liu <song@kernel.org>
  L:    linux-raid@vger.kernel.org
@@@ -17163,8 -16861,6 +17162,8 @@@ F:   arch/arm/mach-spear
  
  SPI NOR SUBSYSTEM
  M:    Tudor Ambarus <tudor.ambarus@microchip.com>
 +R:    Michael Walle <michael@walle.cc>
 +R:    Pratyush Yadav <p.yadav@ti.com>
  L:    linux-mtd@lists.infradead.org
  S:    Maintained
  W:    http://www.linux-mtd.infradead.org/
@@@ -17189,10 -16885,8 +17188,10 @@@ F: tools/spi
  
  SPIDERNET NETWORK DRIVER for CELL
  M:    Ishizaki Kou <kou.ishizaki@toshiba.co.jp>
 +M:    Geoff Levand <geoff@infradead.org>
  L:    netdev@vger.kernel.org
 -S:    Supported
 +L:    linuxppc-dev@lists.ozlabs.org
 +S:    Maintained
  F:    Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst
  F:    drivers/net/ethernet/toshiba/spider_net*
  
@@@ -17234,7 -16928,7 +17233,7 @@@ M:   Lorenzo Bianconi <lorenzo.bianconi83
  L:    linux-iio@vger.kernel.org
  S:    Maintained
  W:    http://www.st.com/
 -F:    Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
 +F:    Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml
  F:    drivers/iio/imu/st_lsm6dsx/
  
  ST MIPID02 CSI-2 TO PARALLEL BRIDGE DRIVER
@@@ -17246,24 -16940,16 +17245,24 @@@ F:        Documentation/devicetree/bindings/me
  F:    drivers/media/i2c/st-mipid02.c
  
  ST STM32 I2C/SMBUS DRIVER
 -M:    Pierre-Yves MORDRET <pierre-yves.mordret@st.com>
 +M:    Pierre-Yves MORDRET <pierre-yves.mordret@foss.st.com>
 +M:    Alain Volmat <alain.volmat@foss.st.com>
  L:    linux-i2c@vger.kernel.org
  S:    Maintained
  F:    drivers/i2c/busses/i2c-stm32*
  
 +ST STPDDC60 DRIVER
 +M:    Daniel Nilsson <daniel.nilsson@flex.com>
 +L:    linux-hwmon@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/hwmon/stpddc60.rst
 +F:    drivers/hwmon/pmbus/stpddc60.c
 +
  ST VL53L0X ToF RANGER(I2C) IIO DRIVER
  M:    Song Qiang <songqiang1304521@gmail.com>
  L:    linux-iio@vger.kernel.org
  S:    Maintained
 -F:    Documentation/devicetree/bindings/iio/proximity/vl53l0x.txt
 +F:    Documentation/devicetree/bindings/iio/proximity/st,vl53l0x.yaml
  F:    drivers/iio/proximity/vl53l0x-i2c.c
  
  STABLE BRANCH
@@@ -17280,6 -16966,12 +17279,6 @@@ L:  linux-media@vger.kernel.or
  S:    Maintained
  F:    drivers/staging/media/atomisp/
  
 -STAGING - COMEDI
 -M:    Ian Abbott <abbotti@mev.co.uk>
 -M:    H Hartley Sweeten <hsweeten@visionengravers.com>
 -S:    Odd Fixes
 -F:    drivers/staging/comedi/
 -
  STAGING - FIELDBUS SUBSYSTEM
  M:    Sven Van Asbroeck <TheSven73@gmail.com>
  S:    Maintained
@@@ -17346,7 -17038,7 +17345,7 @@@ F:   drivers/staging/vt665?
  
  STAGING SUBSYSTEM
  M:    Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 -L:    devel@driverdev.osuosl.org
 +L:    linux-staging@lists.linux.dev
  S:    Supported
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
  F:    drivers/staging/
@@@ -17373,7 -17065,7 +17372,7 @@@ F:   kernel/jump_label.
  F:    kernel/static_call.c
  
  STI AUDIO (ASoC) DRIVERS
 -M:    Arnaud Pouliquen <arnaud.pouliquen@st.com>
 +M:    Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
  S:    Maintained
  F:    Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
@@@ -17393,15 -17085,15 +17392,15 @@@ T:        git git://linuxtv.org/media_tree.gi
  F:    drivers/media/usb/stk1160/
  
  STM32 AUDIO (ASoC) DRIVERS
 -M:    Olivier Moysan <olivier.moysan@st.com>
 -M:    Arnaud Pouliquen <arnaud.pouliquen@st.com>
 +M:    Olivier Moysan <olivier.moysan@foss.st.com>
 +M:    Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
  S:    Maintained
  F:    Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
  F:    sound/soc/stm/
  
  STM32 TIMER/LPTIMER DRIVERS
 -M:    Fabrice Gasnier <fabrice.gasnier@st.com>
 +M:    Fabrice Gasnier <fabrice.gasnier@foss.st.com>
  S:    Maintained
  F:    Documentation/ABI/testing/*timer-stm32
  F:    Documentation/devicetree/bindings/*/*stm32-*timer*
@@@ -17411,7 -17103,7 +17410,7 @@@ F:   include/linux/*/stm32-*tim
  
  STMMAC ETHERNET DRIVER
  M:    Giuseppe Cavallaro <peppe.cavallaro@st.com>
 -M:    Alexandre Torgue <alexandre.torgue@st.com>
 +M:    Alexandre Torgue <alexandre.torgue@foss.st.com>
  M:    Jose Abreu <joabreu@synopsys.com>
  L:    netdev@vger.kernel.org
  S:    Supported
@@@ -17577,7 -17269,7 +17576,7 @@@ F:   drivers/spi/spi-dw
  SYNOPSYS DESIGNWARE AXI DMAC DRIVER
  M:    Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
  S:    Maintained
 -F:    Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
 +F:    Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
  F:    drivers/dma/dw-axi-dmac/
  
  SYNOPSYS DESIGNWARE DMAC DRIVER
@@@ -17993,7 -17685,7 +17992,7 @@@ TEXAS INSTRUMENTS' DAC7612 DAC DRIVE
  M:    Ricardo Ribalda <ribalda@kernel.org>
  L:    linux-iio@vger.kernel.org
  S:    Supported
 -F:    Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt
 +F:    Documentation/devicetree/bindings/iio/dac/ti,dac7612.yaml
  F:    drivers/iio/dac/ti-dac7612.c
  
  TEXAS INSTRUMENTS DMA DRIVERS
@@@ -18135,13 -17827,6 +18134,13 @@@ M: Robert Richter <rric@kernel.org
  S:    Odd Fixes
  F:    drivers/gpio/gpio-thunderx.c
  
 +TI ADS131E0X ADC SERIES DRIVER
 +M:    Tomislav Denis <tomislav.denis@avl.com>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/iio/adc/ti,ads131e08.yaml
 +F:    drivers/iio/adc/ti-ads131e08.c
 +
  TI AM437X VPFE DRIVER
  M:    "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
  L:    linux-media@vger.kernel.org
@@@ -18160,6 -17845,7 +18159,6 @@@ S:   Maintaine
  F:    drivers/thermal/ti-soc-thermal/
  
  TI BQ27XXX POWER SUPPLY DRIVER
 -R:    Dan Murphy <dmurphy@ti.com>
  F:    drivers/power/supply/bq27xxx_battery.c
  F:    drivers/power/supply/bq27xxx_battery_i2c.c
  F:    include/linux/power/bq27xxx_battery.h
@@@ -18250,6 -17936,29 +18249,6 @@@ S:  Maintaine
  F:    sound/soc/codecs/isabelle*
  F:    sound/soc/codecs/lm49453*
  
 -TI LP855x BACKLIGHT DRIVER
 -M:    Milo Kim <milo.kim@ti.com>
 -S:    Maintained
 -F:    Documentation/driver-api/backlight/lp855x-driver.rst
 -F:    drivers/video/backlight/lp855x_bl.c
 -F:    include/linux/platform_data/lp855x.h
 -
 -TI LP8727 CHARGER DRIVER
 -M:    Milo Kim <milo.kim@ti.com>
 -S:    Maintained
 -F:    drivers/power/supply/lp8727_charger.c
 -F:    include/linux/platform_data/lp8727.h
 -
 -TI LP8788 MFD DRIVER
 -M:    Milo Kim <milo.kim@ti.com>
 -S:    Maintained
 -F:    drivers/iio/adc/lp8788_adc.c
 -F:    drivers/leds/leds-lp8788.c
 -F:    drivers/mfd/lp8788*.c
 -F:    drivers/power/supply/lp8788-charger.c
 -F:    drivers/regulator/lp8788-*.c
 -F:    include/linux/mfd/lp8788*.h
 -
  TI NETCP ETHERNET DRIVER
  M:    Wingman Kwok <w-kwok2@ti.com>
  M:    Murali Karicheri <m-karicheri2@ti.com>
@@@ -18270,6 -17979,13 +18269,6 @@@ L:  alsa-devel@alsa-project.org (moderat
  S:    Odd Fixes
  F:    sound/soc/codecs/tas571x*
  
 -TI TCAN4X5X DEVICE DRIVER
 -M:    Dan Murphy <dmurphy@ti.com>
 -L:    linux-can@vger.kernel.org
 -S:    Maintained
 -F:    Documentation/devicetree/bindings/net/can/tcan4x5x.txt
 -F:    drivers/net/can/m_can/tcan4x5x*
 -
  TI TRF7970A NFC DRIVER
  M:    Mark Greer <mgreer@animalcreek.com>
  L:    linux-wireless@vger.kernel.org
@@@ -19369,15 -19085,6 +19368,15 @@@ W: https://virtio-mem.gitlab.io
  F:    drivers/virtio/virtio_mem.c
  F:    include/uapi/linux/virtio_mem.h
  
 +VIRTIO SOUND DRIVER
 +M:    Anton Yakovlev <anton.yakovlev@opensynergy.com>
 +M:    "Michael S. Tsirkin" <mst@redhat.com>
 +L:    virtualization@lists.linux-foundation.org
 +L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    include/uapi/linux/virtio_snd.h
 +F:    sound/virtio/*
 +
  VIRTUAL BOX GUEST DEVICE DRIVER
  M:    Hans de Goede <hdegoede@redhat.com>
  M:    Arnd Bergmann <arnd@arndb.de>
@@@ -19426,7 -19133,7 +19425,7 @@@ VME SUBSYSTE
  M:    Martyn Welch <martyn@welchs.me.uk>
  M:    Manohar Vanga <manohar.vanga@gmail.com>
  M:    Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 -L:    devel@driverdev.osuosl.org
 +L:    linux-kernel@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
  F:    Documentation/driver-api/vme.rst
@@@ -19457,7 -19164,7 +19456,7 @@@ S:   Maintaine
  F:    drivers/infiniband/hw/vmw_pvrdma/
  
  VMware PVSCSI driver
 -M:    Jim Gill <jgill@vmware.com>
 +M:    Vishal Bhakta <vbhakta@vmware.com>
  M:    VMware PV-Drivers <pv-drivers@vmware.com>
  L:    linux-scsi@vger.kernel.org
  S:    Maintained
@@@ -19516,7 -19223,7 +19515,7 @@@ F:   drivers/net/vrf.
  VSPRINTF
  M:    Petr Mladek <pmladek@suse.com>
  M:    Steven Rostedt <rostedt@goodmis.org>
 -M:    Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
 +M:    Sergey Senozhatsky <senozhatsky@chromium.org>
  R:    Andy Shevchenko <andriy.shevchenko@linux.intel.com>
  R:    Rasmus Villemoes <linux@rasmusvillemoes.dk>
  S:    Maintained
@@@ -19670,6 -19377,7 +19669,6 @@@ F:   Documentation/devicetree/bindings/so
  F:    Documentation/hwmon/wm83??.rst
  F:    arch/arm/mach-s3c/mach-crag6410*
  F:    drivers/clk/clk-wm83*.c
 -F:    drivers/extcon/extcon-arizona.c
  F:    drivers/gpio/gpio-*wm*.c
  F:    drivers/gpio/gpio-arizona.c
  F:    drivers/hwmon/wm83??-hwmon.c
@@@ -19693,7 -19401,7 +19692,7 @@@ F:   include/linux/mfd/wm8400
  F:    include/linux/regulator/arizona*
  F:    include/linux/wm97xx.h
  F:    include/sound/wm????.h
 -F:    sound/soc/codecs/arizona.?
 +F:    sound/soc/codecs/arizona*
  F:    sound/soc/codecs/cs47l24*
  F:    sound/soc/codecs/wm*
  
@@@ -20166,7 -19874,7 +20165,7 @@@ F:   drivers/staging/media/zoran
  ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER
  M:    Minchan Kim <minchan@kernel.org>
  M:    Nitin Gupta <ngupta@vflare.org>
 -R:    Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
 +R:    Sergey Senozhatsky <senozhatsky@chromium.org>
  L:    linux-kernel@vger.kernel.org
  S:    Maintained
  F:    Documentation/admin-guide/blockdev/zram.rst
@@@ -20180,7 -19888,7 +20179,7 @@@ F:   drivers/tty/serial/zs.
  ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR
  M:    Minchan Kim <minchan@kernel.org>
  M:    Nitin Gupta <ngupta@vflare.org>
 -R:    Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
 +R:    Sergey Senozhatsky <senozhatsky@chromium.org>
  L:    linux-mm@kvack.org
  S:    Maintained
  F:    Documentation/vm/zsmalloc.rst
  
  static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
  {
 -      struct sg_page_iter sg_iter;
 -      struct page *page;
 +      bool make_dirty = umem->writable && dirty;
 +      struct scatterlist *sg;
 +      unsigned int i;
  
        if (umem->nmap > 0)
                ib_dma_unmap_sg(dev, umem->sg_head.sgl, umem->sg_nents,
                                DMA_BIDIRECTIONAL);
  
 -      for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
 -              page = sg_page_iter_page(&sg_iter);
 -              unpin_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
 -      }
 +      for_each_sg(umem->sg_head.sgl, sg, umem->sg_nents, i)
 +              unpin_user_page_range_dirty_lock(sg_page(sg),
 +                      DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
  
        sg_free_table(&umem->sg_head);
  }
@@@ -100,10 -100,6 +100,6 @@@ unsigned long ib_umem_find_best_pgsz(st
         */
        pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT);
  
-       /* At minimum, drivers must support PAGE_SIZE or smaller */
-       if (WARN_ON(!(pgsz_bitmap & GENMASK(PAGE_SHIFT, 0))))
-               return 0;
        umem->iova = va = virt;
        /* The best result is the smallest page size that results in the minimum
         * number of required pages. Compute the largest page size that could
@@@ -309,8 -305,8 +305,8 @@@ int ib_umem_copy_from(void *dst, struc
        int ret;
  
        if (offset > umem->length || length > umem->length - offset) {
-               pr_err("ib_umem_copy_from not in range. offset: %zd umem length: %zd end: %zd\n",
-                      offset, umem->length, end);
+               pr_err("%s not in range. offset: %zd umem length: %zd end: %zd\n",
+                      __func__, offset, umem->length, end);
                return -EINVAL;
        }
  
@@@ -145,7 -145,7 +145,7 @@@ static void connect_reply_upcall(struc
  static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
  
  static LIST_HEAD(timeout_list);
- static spinlock_t timeout_lock;
+ static DEFINE_SPINLOCK(timeout_lock);
  
  static void deref_cm_id(struct c4iw_ep_common *epc)
  {
@@@ -3610,14 -3610,13 +3610,14 @@@ int c4iw_destroy_listen(struct iw_cm_i
            ep->com.local_addr.ss_family == AF_INET) {
                err = cxgb4_remove_server_filter(
                        ep->com.dev->rdev.lldi.ports[0], ep->stid,
 -                      ep->com.dev->rdev.lldi.rxq_ids[0], 0);
 +                      ep->com.dev->rdev.lldi.rxq_ids[0], false);
        } else {
                struct sockaddr_in6 *sin6;
                c4iw_init_wr_wait(ep->com.wr_waitp);
                err = cxgb4_remove_server(
                                ep->com.dev->rdev.lldi.ports[0], ep->stid,
 -                              ep->com.dev->rdev.lldi.rxq_ids[0], 0);
 +                              ep->com.dev->rdev.lldi.rxq_ids[0],
 +                              ep->com.local_addr.ss_family == AF_INET6);
                if (err)
                        goto done;
                err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
@@@ -4452,7 -4451,6 +4452,6 @@@ c4iw_handler_func c4iw_handlers[NUM_CPL
  
  int __init c4iw_cm_init(void)
  {
-       spin_lock_init(&timeout_lock);
        skb_queue_head_init(&rxq);
  
        workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM);
@@@ -632,11 -632,22 +632,11 @@@ static void _dev_comp_vect_cpu_mask_cle
   */
  int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
  {
 -      int node = pcibus_to_node(dd->pcidev->bus);
        struct hfi1_affinity_node *entry;
        const struct cpumask *local_mask;
        int curr_cpu, possible, i, ret;
        bool new_entry = false;
  
 -      /*
 -       * If the BIOS does not have the NUMA node information set, select
 -       * NUMA 0 so we get consistent performance.
 -       */
 -      if (node < 0) {
 -              dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
 -              node = 0;
 -      }
 -      dd->node = node;
 -
        local_mask = cpumask_of_node(dd->node);
        if (cpumask_first(local_mask) >= nr_cpu_ids)
                local_mask = topology_core_cpumask(0);
         * create an entry in the global affinity structure and initialize it.
         */
        if (!entry) {
 -              entry = node_affinity_allocate(node);
 +              entry = node_affinity_allocate(dd->node);
                if (!entry) {
                        dd_dev_err(dd,
                                   "Unable to allocate global affinity node\n");
        if (new_entry)
                node_affinity_add_tail(entry);
  
 +      dd->affinity_entry = entry;
        mutex_unlock(&node_affinity.lock);
  
        return 0;
@@@ -756,9 -766,10 +756,9 @@@ void hfi1_dev_affinity_clean_up(struct 
  {
        struct hfi1_affinity_node *entry;
  
 -      if (dd->node < 0)
 -              return;
 -
        mutex_lock(&node_affinity.lock);
 +      if (!dd->affinity_entry)
 +              goto unlock;
        entry = node_affinity_lookup(dd->node);
        if (!entry)
                goto unlock;
         */
        _dev_comp_vect_cpu_mask_clean_up(dd, entry);
  unlock:
 +      dd->affinity_entry = NULL;
        mutex_unlock(&node_affinity.lock);
 -      dd->node = NUMA_NO_NODE;
  }
  
  /*
@@@ -962,7 -973,6 +962,6 @@@ void hfi1_put_irq_affinity(struct hfi1_
                           struct hfi1_msix_entry *msix)
  {
        struct cpu_mask_set *set = NULL;
-       struct hfi1_ctxtdata *rcd;
        struct hfi1_affinity_node *entry;
  
        mutex_lock(&node_affinity.lock);
        case IRQ_GENERAL:
                /* Don't do accounting for general contexts */
                break;
-       case IRQ_RCVCTXT:
-               rcd = (struct hfi1_ctxtdata *)msix->arg;
+       case IRQ_RCVCTXT: {
+               struct hfi1_ctxtdata *rcd = msix->arg;
                /* Don't do accounting for control contexts */
                if (rcd->ctxt != HFI1_CTRL_CTXT)
                        set = &entry->rcv_intr;
                break;
+       }
        case IRQ_NETDEVCTXT:
-               rcd = (struct hfi1_ctxtdata *)msix->arg;
                set = &entry->def_intr;
                break;
        default:
@@@ -69,7 -69,6 +69,6 @@@
  #include <rdma/ib_hdrs.h>
  #include <rdma/opa_addr.h>
  #include <linux/rhashtable.h>
- #include <linux/netdevice.h>
  #include <rdma/rdma_vt.h>
  
  #include "chip_registers.h"
@@@ -717,12 -716,6 +716,6 @@@ static inline void incr_cntr64(u64 *cnt
                (*cntr)++;
  }
  
- static inline void incr_cntr32(u32 *cntr)
- {
-       if (*cntr < (u32)-1LL)
-               (*cntr)++;
- }
  #define MAX_NAME_SIZE 64
  struct hfi1_msix_entry {
        enum irq_type type;
@@@ -864,7 -857,7 +857,7 @@@ struct hfi1_pportdata 
        u8 rx_pol_inv;
  
        u8 hw_pidx;     /* physical port index */
-       u8 port;        /* IB port number and index into dd->pports - 1 */
+       u32 port;        /* IB port number and index into dd->pports - 1 */
        /* type of neighbor node */
        u8 neighbor_type;
        u8 neighbor_normal;
@@@ -1066,6 -1059,7 +1059,7 @@@ struct sdma_vl_map
  #define SERIAL_MAX 16 /* length of the serial number */
  
  typedef int (*send_routine)(struct rvt_qp *, struct hfi1_pkt_state *, u64);
+ struct hfi1_netdev_rx;
  struct hfi1_devdata {
        struct hfi1_ibdev verbs_dev;     /* must be first */
        /* pointers to related structs for this device */
        /* Lock to protect IRQ SRC register access */
        spinlock_t irq_src_lock;
        int vnic_num_vports;
-       struct net_device *dummy_netdev;
+       struct hfi1_netdev_rx *netdev_rx;
 +      struct hfi1_affinity_node *affinity_entry;
  
        /* Keeps track of IPoIB RSM rule users */
        atomic_t ipoib_rsm_usr_num;
@@@ -1480,7 -1473,7 +1474,7 @@@ int hfi1_create_ctxtdata(struct hfi1_pp
                         struct hfi1_ctxtdata **rcd);
  void hfi1_free_ctxt(struct hfi1_ctxtdata *rcd);
  void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
-                        struct hfi1_devdata *dd, u8 hw_pidx, u8 port);
+                        struct hfi1_devdata *dd, u8 hw_pidx, u32 port);
  void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
  int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
  int hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
@@@ -1976,10 -1969,10 +1970,10 @@@ static inline struct hfi1_ibdev *dev_fr
        return container_of(rdi, struct hfi1_ibdev, rdi);
  }
  
- static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u8 port)
+ static inline struct hfi1_ibport *to_iport(struct ib_device *ibdev, u32 port)
  {
        struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
-       unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
+       u32 pidx = port - 1; /* IB number port from 1, hdw from 0 */
  
        WARN_ON(pidx >= dd->num_pports);
        return &dd->pport[pidx].ibport_data;
@@@ -2198,7 -2191,7 +2192,7 @@@ extern const struct attribute_group ib_
  int hfi1_device_create(struct hfi1_devdata *dd);
  void hfi1_device_remove(struct hfi1_devdata *dd);
  
- int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
+ int hfi1_create_port_files(struct ib_device *ibdev, u32 port_num,
                           struct kobject *kobj);
  int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd);
  void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
@@@ -627,7 -627,7 +627,7 @@@ static enum hrtimer_restart cca_timer_f
   * Common code for initializing the physical port structure.
   */
  void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
-                        struct hfi1_devdata *dd, u8 hw_pidx, u8 port)
+                        struct hfi1_devdata *dd, u8 hw_pidx, u32 port)
  {
        int i;
        uint default_pkey_idx;
@@@ -1277,6 -1277,7 +1277,6 @@@ static struct hfi1_devdata *hfi1_alloc_
        dd->pport = (struct hfi1_pportdata *)(dd + 1);
        dd->pcidev = pdev;
        pci_set_drvdata(pdev, dd);
 -      dd->node = NUMA_NO_NODE;
  
        ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
                        GFP_KERNEL);
                goto bail;
        }
        rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
 +      /*
 +       * If the BIOS does not have the NUMA node information set, select
 +       * NUMA 0 so we get consistent performance.
 +       */
 +      dd->node = pcibus_to_node(pdev->bus);
 +      if (dd->node == NUMA_NO_NODE) {
 +              dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
 +              dd->node = 0;
 +      }
  
        /*
         * Initialize all locks for the device. This needs to be as early as
@@@ -1775,7 -1767,7 +1775,7 @@@ static void remove_one(struct pci_dev *
        hfi1_unregister_ib_device(dd);
  
        /* free netdev data */
-       hfi1_netdev_free(dd);
+       hfi1_free_rx(dd);
  
        /*
         * Disable the IB link, disable interrupts on the device,
@@@ -1860,7 -1852,8 +1860,8 @@@ bail
  }
  
  /**
-  * allocate eager buffers, both kernel and user contexts.
+  * hfi1_setup_eagerbufs - llocate eager buffers, both kernel and user
+  * contexts.
   * @rcd: the context we are setting up.
   *
   * Allocate the eager TID buffers and program them into hip.
  #include <linux/etherdevice.h>
  #include <rdma/ib_verbs.h>
  
- static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_priv *priv,
+ static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_rx *rx,
                                  struct hfi1_ctxtdata *uctxt)
  {
        unsigned int rcvctrl_ops;
-       struct hfi1_devdata *dd = priv->dd;
+       struct hfi1_devdata *dd = rx->dd;
        int ret;
  
        uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
@@@ -118,11 -118,11 +118,11 @@@ static void hfi1_netdev_deallocate_ctxt
        hfi1_free_ctxt(uctxt);
  }
  
- static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
+ static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_rx *rx,
                                  struct hfi1_ctxtdata **ctxt)
  {
        int rc;
-       struct hfi1_devdata *dd = priv->dd;
+       struct hfi1_devdata *dd = rx->dd;
  
        rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
        if (rc) {
                return rc;
        }
  
-       rc = hfi1_netdev_setup_ctxt(priv, *ctxt);
+       rc = hfi1_netdev_setup_ctxt(rx, *ctxt);
        if (rc) {
                dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
                hfi1_netdev_deallocate_ctxt(dd, *ctxt);
@@@ -173,7 -173,8 +173,7 @@@ u32 hfi1_num_netdev_contexts(struct hfi
                return 0;
        }
  
 -      cpumask_and(node_cpu_mask, cpu_mask,
 -                  cpumask_of_node(pcibus_to_node(dd->pcidev->bus)));
 +      cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
  
        available_cpus = cpumask_weight(node_cpu_mask);
  
                    (u32)HFI1_MAX_NETDEV_CTXTS);
  }
  
- static int hfi1_netdev_rxq_init(struct net_device *dev)
+ static int hfi1_netdev_rxq_init(struct hfi1_netdev_rx *rx)
  {
        int i;
        int rc;
-       struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
-       struct hfi1_devdata *dd = priv->dd;
+       struct hfi1_devdata *dd = rx->dd;
+       struct net_device *dev = &rx->rx_napi;
  
-       priv->num_rx_q = dd->num_netdev_contexts;
-       priv->rxq = kcalloc_node(priv->num_rx_q, sizeof(struct hfi1_netdev_rxq),
-                                GFP_KERNEL, dd->node);
+       rx->num_rx_q = dd->num_netdev_contexts;
+       rx->rxq = kcalloc_node(rx->num_rx_q, sizeof(*rx->rxq),
+                              GFP_KERNEL, dd->node);
  
-       if (!priv->rxq) {
+       if (!rx->rxq) {
                dd_dev_err(dd, "Unable to allocate netdev queue data\n");
                return (-ENOMEM);
        }
  
-       for (i = 0; i < priv->num_rx_q; i++) {
-               struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+       for (i = 0; i < rx->num_rx_q; i++) {
+               struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
  
-               rc = hfi1_netdev_allot_ctxt(priv, &rxq->rcd);
+               rc = hfi1_netdev_allot_ctxt(rx, &rxq->rcd);
                if (rc)
                        goto bail_context_irq_failure;
  
                hfi1_rcd_get(rxq->rcd);
-               rxq->priv = priv;
+               rxq->rx = rx;
                rxq->rcd->napi = &rxq->napi;
                dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
                            i, rxq->rcd->ctxt);
  bail_context_irq_failure:
        dd_dev_err(dd, "Unable to allot receive context\n");
        for (; i >= 0; i--) {
-               struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+               struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
  
                if (rxq->rcd) {
                        hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
                        rxq->rcd = NULL;
                }
        }
-       kfree(priv->rxq);
-       priv->rxq = NULL;
+       kfree(rx->rxq);
+       rx->rxq = NULL;
  
        return rc;
  }
  
- static void hfi1_netdev_rxq_deinit(struct net_device *dev)
+ static void hfi1_netdev_rxq_deinit(struct hfi1_netdev_rx *rx)
  {
        int i;
-       struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
-       struct hfi1_devdata *dd = priv->dd;
+       struct hfi1_devdata *dd = rx->dd;
  
-       for (i = 0; i < priv->num_rx_q; i++) {
-               struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+       for (i = 0; i < rx->num_rx_q; i++) {
+               struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
  
                netif_napi_del(&rxq->napi);
                hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
                rxq->rcd = NULL;
        }
  
-       kfree(priv->rxq);
-       priv->rxq = NULL;
-       priv->num_rx_q = 0;
+       kfree(rx->rxq);
+       rx->rxq = NULL;
+       rx->num_rx_q = 0;
  }
  
- static void enable_queues(struct hfi1_netdev_priv *priv)
+ static void enable_queues(struct hfi1_netdev_rx *rx)
  {
        int i;
  
-       for (i = 0; i < priv->num_rx_q; i++) {
-               struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+       for (i = 0; i < rx->num_rx_q; i++) {
+               struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
  
-               dd_dev_info(priv->dd, "enabling queue %d on context %d\n", i,
+               dd_dev_info(rx->dd, "enabling queue %d on context %d\n", i,
                            rxq->rcd->ctxt);
                napi_enable(&rxq->napi);
-               hfi1_rcvctrl(priv->dd,
+               hfi1_rcvctrl(rx->dd,
                             HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
                             rxq->rcd);
        }
  }
  
- static void disable_queues(struct hfi1_netdev_priv *priv)
+ static void disable_queues(struct hfi1_netdev_rx *rx)
  {
        int i;
  
-       msix_netdev_synchronize_irq(priv->dd);
+       msix_netdev_synchronize_irq(rx->dd);
  
-       for (i = 0; i < priv->num_rx_q; i++) {
-               struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
+       for (i = 0; i < rx->num_rx_q; i++) {
+               struct hfi1_netdev_rxq *rxq = &rx->rxq[i];
  
-               dd_dev_info(priv->dd, "disabling queue %d on context %d\n", i,
+               dd_dev_info(rx->dd, "disabling queue %d on context %d\n", i,
                            rxq->rcd->ctxt);
  
                /* wait for napi if it was scheduled */
-               hfi1_rcvctrl(priv->dd,
+               hfi1_rcvctrl(rx->dd,
                             HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
                             rxq->rcd);
                napi_synchronize(&rxq->napi);
   */
  int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
  {
-       struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+       struct hfi1_netdev_rx *rx = dd->netdev_rx;
        int res;
  
-       if (atomic_fetch_inc(&priv->netdevs))
+       if (atomic_fetch_inc(&rx->netdevs))
                return 0;
  
        mutex_lock(&hfi1_mutex);
-       init_dummy_netdev(dd->dummy_netdev);
-       res = hfi1_netdev_rxq_init(dd->dummy_netdev);
+       res = hfi1_netdev_rxq_init(rx);
        mutex_unlock(&hfi1_mutex);
        return res;
  }
   */
  int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
  {
-       struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+       struct hfi1_netdev_rx *rx = dd->netdev_rx;
  
        /* destroy the RX queues only if it is the last netdev going away */
-       if (atomic_fetch_add_unless(&priv->netdevs, -1, 0) == 1) {
+       if (atomic_fetch_add_unless(&rx->netdevs, -1, 0) == 1) {
                mutex_lock(&hfi1_mutex);
-               hfi1_netdev_rxq_deinit(dd->dummy_netdev);
+               hfi1_netdev_rxq_deinit(rx);
                mutex_unlock(&hfi1_mutex);
        }
  
  }
  
  /**
-  * hfi1_netdev_alloc - Allocates netdev and private data. It is required
-  * because RMT index and MSI-X interrupt can be set only
-  * during driver initialization.
-  *
+  * hfi1_alloc_rx - Allocates the rx support structure
   * @dd: hfi1 dev data
+  *
+  * Allocate the rx structure to support gathering the receive
+  * resources and the dummy netdev.
+  *
+  * Updates dd struct pointer upon success.
+  *
+  * Return: 0 (success) -error on failure
+  *
   */
- int hfi1_netdev_alloc(struct hfi1_devdata *dd)
+ int hfi1_alloc_rx(struct hfi1_devdata *dd)
  {
-       struct hfi1_netdev_priv *priv;
-       const int netdev_size = sizeof(*dd->dummy_netdev) +
-               sizeof(struct hfi1_netdev_priv);
+       struct hfi1_netdev_rx *rx;
  
-       dd_dev_info(dd, "allocating netdev size %d\n", netdev_size);
-       dd->dummy_netdev = kcalloc_node(1, netdev_size, GFP_KERNEL, dd->node);
+       dd_dev_info(dd, "allocating rx size %ld\n", sizeof(*rx));
+       rx = kzalloc_node(sizeof(*rx), GFP_KERNEL, dd->node);
  
-       if (!dd->dummy_netdev)
+       if (!rx)
                return -ENOMEM;
+       rx->dd = dd;
+       init_dummy_netdev(&rx->rx_napi);
  
-       priv = hfi1_netdev_priv(dd->dummy_netdev);
-       priv->dd = dd;
-       xa_init(&priv->dev_tbl);
-       atomic_set(&priv->enabled, 0);
-       atomic_set(&priv->netdevs, 0);
+       xa_init(&rx->dev_tbl);
+       atomic_set(&rx->enabled, 0);
+       atomic_set(&rx->netdevs, 0);
+       dd->netdev_rx = rx;
  
        return 0;
  }
  
- void hfi1_netdev_free(struct hfi1_devdata *dd)
+ void hfi1_free_rx(struct hfi1_devdata *dd)
  {
-       if (dd->dummy_netdev) {
-               dd_dev_info(dd, "hfi1 netdev freed\n");
-               kfree(dd->dummy_netdev);
-               dd->dummy_netdev = NULL;
+       if (dd->netdev_rx) {
+               dd_dev_info(dd, "hfi1 rx freed\n");
+               kfree(dd->netdev_rx);
+               dd->netdev_rx = NULL;
        }
  }
  
   */
  void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
  {
-       struct hfi1_netdev_priv *priv;
+       struct hfi1_netdev_rx *rx;
  
-       if (!dd->dummy_netdev)
+       if (!dd->netdev_rx)
                return;
  
-       priv = hfi1_netdev_priv(dd->dummy_netdev);
-       if (atomic_fetch_inc(&priv->enabled))
+       rx = dd->netdev_rx;
+       if (atomic_fetch_inc(&rx->enabled))
                return;
  
        mutex_lock(&hfi1_mutex);
-       enable_queues(priv);
+       enable_queues(rx);
        mutex_unlock(&hfi1_mutex);
  }
  
  void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
  {
-       struct hfi1_netdev_priv *priv;
+       struct hfi1_netdev_rx *rx;
  
-       if (!dd->dummy_netdev)
+       if (!dd->netdev_rx)
                return;
  
-       priv = hfi1_netdev_priv(dd->dummy_netdev);
-       if (atomic_dec_if_positive(&priv->enabled))
+       rx = dd->netdev_rx;
+       if (atomic_dec_if_positive(&rx->enabled))
                return;
  
        mutex_lock(&hfi1_mutex);
-       disable_queues(priv);
+       disable_queues(rx);
        mutex_unlock(&hfi1_mutex);
  }
  
   */
  int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
  {
-       struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+       struct hfi1_netdev_rx *rx = dd->netdev_rx;
  
-       return xa_insert(&priv->dev_tbl, id, data, GFP_NOWAIT);
+       return xa_insert(&rx->dev_tbl, id, data, GFP_NOWAIT);
  }
  
  /**
   */
  void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
  {
-       struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+       struct hfi1_netdev_rx *rx = dd->netdev_rx;
  
-       return xa_erase(&priv->dev_tbl, id);
+       return xa_erase(&rx->dev_tbl, id);
  }
  
  /**
   */
  void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
  {
-       struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+       struct hfi1_netdev_rx *rx = dd->netdev_rx;
  
-       return xa_load(&priv->dev_tbl, id);
+       return xa_load(&rx->dev_tbl, id);
  }
  
  /**
-  * hfi1_netdev_get_first_dat - Gets first entry with greater or equal id.
+  * hfi1_netdev_get_first_data - Gets first entry with greater or equal id.
   *
   * @dd: hfi1 dev data
   * @start_id: requested integer id up to INT_MAX
   */
  void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
  {
-       struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
+       struct hfi1_netdev_rx *rx = dd->netdev_rx;
        unsigned long index = *start_id;
        void *ret;
  
-       ret = xa_find(&priv->dev_tbl, &index, UINT_MAX, XA_PRESENT);
+       ret = xa_find(&rx->dev_tbl, &index, UINT_MAX, XA_PRESENT);
        *start_id = (int)index;
        return ret;
  }
  #include "hns_roce_hem.h"
  #include "hns_roce_hw_v2.h"
  
+ enum {
+       CMD_RST_PRC_OTHERS,
+       CMD_RST_PRC_SUCCESS,
+       CMD_RST_PRC_EBUSY,
+ };
  static inline void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
                                   struct ib_sge *sg)
  {
@@@ -632,24 -638,60 +638,60 @@@ static inline void update_sq_db(struct 
         * around the mailbox calls. Hence, use the deferred flush for
         * now.
         */
-       if (qp->state == IB_QPS_ERR) {
+       if (unlikely(qp->state == IB_QPS_ERR)) {
                if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
                        init_flush_work(hr_dev, qp);
        } else {
                struct hns_roce_v2_db sq_db = {};
  
-               roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
-                              V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
-               roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
-                              V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
+               roce_set_field(sq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
+                              qp->doorbell_qpn);
+               roce_set_field(sq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+                              HNS_ROCE_V2_SQ_DB);
                /* indicates data on new BAR, 0 : SQ doorbell, 1 : DWQE */
                roce_set_bit(sq_db.byte_4, V2_DB_FLAG_S, 0);
-               roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
-                              V2_DB_PARAMETER_IDX_S, qp->sq.head);
-               roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
-                              V2_DB_PARAMETER_SL_S, qp->sl);
+               roce_set_field(sq_db.parameter, V2_DB_PRODUCER_IDX_M,
+                              V2_DB_PRODUCER_IDX_S, qp->sq.head);
+               roce_set_field(sq_db.parameter, V2_DB_SL_M, V2_DB_SL_S,
+                              qp->sl);
+               hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg);
+       }
+ }
+ static inline void update_rq_db(struct hns_roce_dev *hr_dev,
+                               struct hns_roce_qp *qp)
+ {
+       /*
+        * Hip08 hardware cannot flush the WQEs in RQ if the QP state
+        * gets into errored mode. Hence, as a workaround to this
+        * hardware limitation, driver needs to assist in flushing. But
+        * the flushing operation uses mailbox to convey the QP state to
+        * the hardware and which can sleep due to the mutex protection
+        * around the mailbox calls. Hence, use the deferred flush for
+        * now.
+        */
+       if (unlikely(qp->state == IB_QPS_ERR)) {
+               if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
+                       init_flush_work(hr_dev, qp);
+       } else {
+               if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
+                       *qp->rdb.db_record =
+                                       qp->rq.head & V2_DB_PRODUCER_IDX_M;
+               } else {
+                       struct hns_roce_v2_db rq_db = {};
+                       roce_set_field(rq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
+                                      qp->qpn);
+                       roce_set_field(rq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+                                      HNS_ROCE_V2_RQ_DB);
+                       roce_set_field(rq_db.parameter, V2_DB_PRODUCER_IDX_M,
+                                      V2_DB_PRODUCER_IDX_S, qp->rq.head);
  
-               hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
+                       hns_roce_write64(hr_dev, (__le32 *)&rq_db,
+                                        qp->rq.db_reg);
+               }
        }
  }
  
@@@ -681,8 -723,7 +723,7 @@@ static void write_dwqe(struct hns_roce_
        roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M,
                       V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
  
-       hns_roce_write512(hr_dev, wqe, hr_dev->mem_base +
-                         HNS_ROCE_DWQE_SIZE * qp->ibqp.qp_num);
+       hns_roce_write512(hr_dev, wqe, qp->sq.db_reg);
  }
  
  static int hns_roce_v2_post_send(struct ib_qp *ibqp,
@@@ -879,22 -920,7 +920,7 @@@ out
        if (likely(nreq)) {
                hr_qp->rq.head += nreq;
  
-               /*
-                * Hip08 hardware cannot flush the WQEs in RQ if the QP state
-                * gets into errored mode. Hence, as a workaround to this
-                * hardware limitation, driver needs to assist in flushing. But
-                * the flushing operation uses mailbox to convey the QP state to
-                * the hardware and which can sleep due to the mutex protection
-                * around the mailbox calls. Hence, use the deferred flush for
-                * now.
-                */
-               if (hr_qp->state == IB_QPS_ERR) {
-                       if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG,
-                                             &hr_qp->flush_flag))
-                               init_flush_work(hr_dev, hr_qp);
-               } else {
-                       *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
-               }
+               update_rq_db(hr_dev, hr_qp);
        }
        spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
  
@@@ -1016,13 -1042,14 +1042,14 @@@ static int hns_roce_v2_post_srq_recv(st
        }
  
        if (likely(nreq)) {
-               srq_db.byte_4 =
-                       cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
-                                   (srq->srqn & V2_DB_BYTE_4_TAG_M));
-               srq_db.parameter =
-                       cpu_to_le32(srq->idx_que.head & V2_DB_PARAMETER_IDX_M);
+               roce_set_field(srq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
+                              srq->srqn);
+               roce_set_field(srq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+                              HNS_ROCE_V2_SRQ_DB);
+               roce_set_field(srq_db.parameter, V2_DB_PRODUCER_IDX_M,
+                              V2_DB_PRODUCER_IDX_S, srq->idx_que.head);
  
-               hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
+               hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg);
        }
  
        spin_unlock_irqrestore(&srq->lock, flags);
        return ret;
  }
  
- static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
+ static u32 hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
                                      unsigned long instance_stage,
                                      unsigned long reset_stage)
  {
        return CMD_RST_PRC_SUCCESS;
  }
  
- static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
+ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
                                        unsigned long instance_stage,
                                        unsigned long reset_stage)
  {
        return CMD_RST_PRC_SUCCESS;
  }
  
- static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
+ static u32 hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
  {
        struct hns_roce_v2_priv *priv = hr_dev->priv;
        struct hnae3_handle *handle = priv->handle;
        return CMD_RST_PRC_EBUSY;
  }
  
- static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
+ static u32 check_aedev_reset_status(struct hns_roce_dev *hr_dev,
+                                   struct hnae3_handle *handle)
  {
-       struct hns_roce_v2_priv *priv = hr_dev->priv;
-       struct hnae3_handle *handle = priv->handle;
        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
        unsigned long instance_stage; /* the current instance stage */
        unsigned long reset_stage; /* the current reset stage */
        bool sw_resetting;
        bool hw_resetting;
  
-       if (hr_dev->is_reset)
-               return CMD_RST_PRC_SUCCESS;
        /* Get information about reset from NIC driver or RoCE driver itself,
         * the meaning of the following variables from NIC driver are described
         * as below:
        instance_stage = handle->rinfo.instance_state;
        reset_stage = handle->rinfo.reset_state;
        reset_cnt = ops->ae_dev_reset_cnt(handle);
-       hw_resetting = ops->get_cmdq_stat(handle);
-       sw_resetting = ops->ae_dev_resetting(handle);
        if (reset_cnt != hr_dev->reset_cnt)
                return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
                                                  reset_stage);
-       else if (hw_resetting)
+       hw_resetting = ops->get_cmdq_stat(handle);
+       if (hw_resetting)
                return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
                                                    reset_stage);
-       else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
+       sw_resetting = ops->ae_dev_resetting(handle);
+       if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
                return hns_roce_v2_cmd_sw_resetting(hr_dev);
  
-       return 0;
+       return CMD_RST_PRC_OTHERS;
+ }
+ static bool check_device_is_in_reset(struct hns_roce_dev *hr_dev)
+ {
+       struct hns_roce_v2_priv *priv = hr_dev->priv;
+       struct hnae3_handle *handle = priv->handle;
+       const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+       if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle))
+               return true;
+       if (ops->get_hw_reset_stat(handle))
+               return true;
+       if (ops->ae_dev_resetting(handle))
+               return true;
+       return false;
+ }
+ static bool v2_chk_mbox_is_avail(struct hns_roce_dev *hr_dev, bool *busy)
+ {
+       struct hns_roce_v2_priv *priv = hr_dev->priv;
+       u32 status;
+       if (hr_dev->is_reset)
+               status = CMD_RST_PRC_SUCCESS;
+       else
+               status = check_aedev_reset_status(hr_dev, priv->handle);
+       *busy = (status == CMD_RST_PRC_EBUSY);
+       return status == CMD_RST_PRC_OTHERS;
  }
  
  static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
                ring->desc_dma_addr = 0;
                kfree(ring->desc);
                ring->desc = NULL;
+               dev_err_ratelimited(hr_dev->dev,
+                                   "failed to map cmq desc addr.\n");
                return -ENOMEM;
        }
  
@@@ -1194,10 -1254,8 +1254,10 @@@ static void hns_roce_cmq_init_regs(stru
                           upper_32_bits(dma));
                roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
                           (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
 -              roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
 +
 +              /* Make sure to write tail first and then head */
                roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
 +              roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
        } else {
                roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
                roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
@@@ -1228,14 -1286,16 +1288,16 @@@ static int hns_roce_v2_cmq_init(struct 
        /* Init CSQ */
        ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
        if (ret) {
-               dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
+               dev_err_ratelimited(hr_dev->dev,
+                                   "failed to init CSQ, ret = %d.\n", ret);
                return ret;
        }
  
        /* Init CRQ */
        ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
        if (ret) {
-               dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
+               dev_err_ratelimited(hr_dev->dev,
+                                   "failed to init CRQ, ret = %d.\n", ret);
                goto err_crq;
        }
  
@@@ -1352,27 -1412,36 +1414,36 @@@ static int __hns_roce_cmq_send(struct h
  static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
                             struct hns_roce_cmq_desc *desc, int num)
  {
-       int retval;
+       bool busy;
        int ret;
  
-       ret = hns_roce_v2_rst_process_cmd(hr_dev);
-       if (ret == CMD_RST_PRC_SUCCESS)
-               return 0;
-       if (ret == CMD_RST_PRC_EBUSY)
-               return -EBUSY;
+       if (!v2_chk_mbox_is_avail(hr_dev, &busy))
+               return busy ? -EBUSY : 0;
  
        ret = __hns_roce_cmq_send(hr_dev, desc, num);
        if (ret) {
-               retval = hns_roce_v2_rst_process_cmd(hr_dev);
-               if (retval == CMD_RST_PRC_SUCCESS)
-                       return 0;
-               else if (retval == CMD_RST_PRC_EBUSY)
-                       return -EBUSY;
+               if (!v2_chk_mbox_is_avail(hr_dev, &busy))
+                       return busy ? -EBUSY : 0;
        }
  
        return ret;
  }
  
+ static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
+                              dma_addr_t base_addr, u16 op)
+ {
+       struct hns_roce_cmd_mailbox *mbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+       int ret;
+       if (IS_ERR(mbox))
+               return PTR_ERR(mbox);
+       ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, obj, 0, op,
+                               HNS_ROCE_CMD_TIMEOUT_MSECS);
+       hns_roce_free_cmd_mailbox(hr_dev, mbox);
+       return ret;
+ }
  static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
  {
        struct hns_roce_query_version *resp;
        return 0;
  }
  
- static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
+ static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
+                                       struct hnae3_handle *handle)
  {
-       struct hns_roce_v2_priv *priv = hr_dev->priv;
-       struct hnae3_handle *handle = priv->handle;
        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
-       unsigned long reset_cnt;
-       bool sw_resetting;
-       bool hw_resetting;
+       unsigned long end;
  
-       reset_cnt = ops->ae_dev_reset_cnt(handle);
-       hw_resetting = ops->get_hw_reset_stat(handle);
-       sw_resetting = ops->ae_dev_resetting(handle);
+       hr_dev->dis_db = true;
  
-       if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting)
-               return true;
+       dev_warn(hr_dev->dev,
+                "Func clear is pending, device in resetting state.\n");
+       end = HNS_ROCE_V2_HW_RST_TIMEOUT;
+       while (end) {
+               if (!ops->get_hw_reset_stat(handle)) {
+                       hr_dev->is_reset = true;
+                       dev_info(hr_dev->dev,
+                                "Func clear success after reset.\n");
+                       return;
+               }
+               msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
+               end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
+       }
  
-       return false;
+       dev_warn(hr_dev->dev, "Func clear failed.\n");
  }
  
- static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
-                                     int flag)
+ static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
+                                       struct hnae3_handle *handle)
  {
-       struct hns_roce_v2_priv *priv = hr_dev->priv;
-       struct hnae3_handle *handle = priv->handle;
        const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
-       unsigned long instance_stage;
-       unsigned long reset_cnt;
        unsigned long end;
-       bool sw_resetting;
-       bool hw_resetting;
  
-       instance_stage = handle->rinfo.instance_state;
-       reset_cnt = ops->ae_dev_reset_cnt(handle);
-       hw_resetting = ops->get_hw_reset_stat(handle);
-       sw_resetting = ops->ae_dev_resetting(handle);
+       hr_dev->dis_db = true;
+       dev_warn(hr_dev->dev,
+                "Func clear is pending, device in resetting state.\n");
+       end = HNS_ROCE_V2_HW_RST_TIMEOUT;
+       while (end) {
+               if (ops->ae_dev_reset_cnt(handle) !=
+                   hr_dev->reset_cnt) {
+                       hr_dev->is_reset = true;
+                       dev_info(hr_dev->dev,
+                                "Func clear success after sw reset\n");
+                       return;
+               }
+               msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
+               end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
+       }
+       dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
+ }
  
-       if (reset_cnt != hr_dev->reset_cnt) {
+ static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
+                                      int flag)
+ {
+       struct hns_roce_v2_priv *priv = hr_dev->priv;
+       struct hnae3_handle *handle = priv->handle;
+       const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+       if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
                hr_dev->dis_db = true;
                hr_dev->is_reset = true;
                dev_info(hr_dev->dev, "Func clear success after reset.\n");
-       } else if (hw_resetting) {
-               hr_dev->dis_db = true;
+               return;
+       }
  
-               dev_warn(hr_dev->dev,
-                        "Func clear is pending, device in resetting state.\n");
-               end = HNS_ROCE_V2_HW_RST_TIMEOUT;
-               while (end) {
-                       if (!ops->get_hw_reset_stat(handle)) {
-                               hr_dev->is_reset = true;
-                               dev_info(hr_dev->dev,
-                                        "Func clear success after reset.\n");
-                               return;
-                       }
-                       msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
-                       end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
-               }
+       if (ops->get_hw_reset_stat(handle)) {
+               func_clr_hw_resetting_state(hr_dev, handle);
+               return;
+       }
  
-               dev_warn(hr_dev->dev, "Func clear failed.\n");
-       } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) {
-               hr_dev->dis_db = true;
+       if (ops->ae_dev_resetting(handle) &&
+           handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) {
+               func_clr_sw_resetting_state(hr_dev, handle);
+               return;
+       }
  
+       if (retval && !flag)
                dev_warn(hr_dev->dev,
-                        "Func clear is pending, device in resetting state.\n");
-               end = HNS_ROCE_V2_HW_RST_TIMEOUT;
-               while (end) {
-                       if (ops->ae_dev_reset_cnt(handle) !=
-                           hr_dev->reset_cnt) {
-                               hr_dev->is_reset = true;
-                               dev_info(hr_dev->dev,
-                                        "Func clear success after sw reset\n");
-                               return;
-                       }
-                       msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
-                       end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
-               }
-               dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
-       } else {
-               if (retval && !flag)
-                       dev_warn(hr_dev->dev,
-                                "Func clear read failed, ret = %d.\n", retval);
+                        "Func clear read failed, ret = %d.\n", retval);
  
-               dev_warn(hr_dev->dev, "Func clear failed.\n");
-       }
+       dev_warn(hr_dev->dev, "Func clear failed.\n");
  }
- static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
+ static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
  {
        bool fclr_write_fail_flag = false;
        struct hns_roce_func_clear *resp;
        unsigned long end;
        int ret = 0;
  
-       if (hns_roce_func_clr_chk_rst(hr_dev))
+       if (check_device_is_in_reset(hr_dev))
                goto out;
  
        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
        resp = (struct hns_roce_func_clear *)desc.data;
+       resp->rst_funcid_en = cpu_to_le32(vf_id);
  
        ret = hns_roce_cmq_send(hr_dev, &desc, 1);
        if (ret) {
        msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
        end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
        while (end) {
-               if (hns_roce_func_clr_chk_rst(hr_dev))
+               if (check_device_is_in_reset(hr_dev))
                        goto out;
                msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
                end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
                hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
                                              true);
  
+               resp->rst_funcid_en = cpu_to_le32(vf_id);
                ret = hns_roce_cmq_send(hr_dev, &desc, 1);
                if (ret)
                        continue;
  
                if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
-                       hr_dev->is_reset = true;
+                       if (vf_id == 0)
+                               hr_dev->is_reset = true;
                        return;
                }
        }
  
  out:
-       hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
+       hns_roce_func_clr_rst_proc(hr_dev, ret, fclr_write_fail_flag);
+ }
+ static void hns_roce_free_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
+ {
+       enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
+       struct hns_roce_cmq_desc desc[2];
+       struct hns_roce_cmq_req *req_a;
+       req_a = (struct hns_roce_cmq_req *)desc[0].data;
+       hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
+       desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+       hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
+       hr_reg_write(req_a, FUNC_RES_A_VF_ID, vf_id);
+       hns_roce_cmq_send(hr_dev, desc, 2);
+ }
+ static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
+ {
+       int i;
+       for (i = hr_dev->func_num - 1; i >= 0; i--) {
+               __hns_roce_function_clear(hr_dev, i);
+               if (i != 0)
+                       hns_roce_free_vf_resource(hr_dev, i);
+       }
  }
  
  static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
        return 0;
  }
  
+ static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
+ {
+       struct hns_roce_cmq_desc desc;
+       int ret;
+       if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) {
+               hr_dev->func_num = 1;
+               return 0;
+       }
+       hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_FUNC_INFO,
+                                     true);
+       ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+       if (ret) {
+               hr_dev->func_num = 1;
+               return ret;
+       }
+       hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num);
+       hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id);
+       return 0;
+ }
  static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
  {
-       struct hns_roce_cfg_global_param *req;
        struct hns_roce_cmq_desc desc;
+       struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
  
        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
                                      false);
  
-       req = (struct hns_roce_cfg_global_param *)desc.data;
-       memset(req, 0, sizeof(*req));
-       roce_set_field(req->time_cfg_udp_port,
-                      CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
-                      CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
-       roce_set_field(req->time_cfg_udp_port,
-                      CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
-                      CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S,
-                      ROCE_V2_UDP_DPORT);
+       hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, 0x3e8);
+       hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT);
  
        return hns_roce_cmq_send(hr_dev, &desc, 1);
  }
  
- static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
+ static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
  {
        struct hns_roce_cmq_desc desc[2];
-       struct hns_roce_pf_res_a *req_a;
-       struct hns_roce_pf_res_b *req_b;
+       struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
+       struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
+       struct hns_roce_caps *caps = &hr_dev->caps;
+       enum hns_roce_opcode_type opcode;
+       u32 func_num;
        int ret;
  
-       hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_QUERY_PF_RES,
-                                     true);
-       desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+       if (is_vf) {
+               opcode = HNS_ROCE_OPC_QUERY_VF_RES;
+               func_num = 1;
+       } else {
+               opcode = HNS_ROCE_OPC_QUERY_PF_RES;
+               func_num = hr_dev->func_num;
+       }
  
-       hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_QUERY_PF_RES,
-                                     true);
+       hns_roce_cmq_setup_basic_desc(&desc[0], opcode, true);
+       desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+       hns_roce_cmq_setup_basic_desc(&desc[1], opcode, true);
  
        ret = hns_roce_cmq_send(hr_dev, desc, 2);
        if (ret)
                return ret;
  
-       req_a = (struct hns_roce_pf_res_a *)desc[0].data;
-       req_b = (struct hns_roce_pf_res_b *)desc[1].data;
-       hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
-                                                PF_RES_DATA_1_PF_QPC_BT_NUM_M,
-                                                PF_RES_DATA_1_PF_QPC_BT_NUM_S);
-       hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
-                                               PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
-                                               PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
-       hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
-                                                PF_RES_DATA_3_PF_CQC_BT_NUM_M,
-                                                PF_RES_DATA_3_PF_CQC_BT_NUM_S);
-       hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
-                                                PF_RES_DATA_4_PF_MPT_BT_NUM_M,
-                                                PF_RES_DATA_4_PF_MPT_BT_NUM_S);
-       hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
-                                            PF_RES_DATA_3_PF_SL_NUM_M,
-                                            PF_RES_DATA_3_PF_SL_NUM_S);
-       hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
-                                            PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
-                                            PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
-       hr_dev->caps.gmv_bt_num = roce_get_field(req_b->gmv_idx_num,
-                                                PF_RES_DATA_5_PF_GMV_BT_NUM_M,
-                                                PF_RES_DATA_5_PF_GMV_BT_NUM_S);
+       caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num;
+       caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num;
+       caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num;
+       caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num;
+       caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num;
+       caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num;
+       caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num;
+       caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num;
+       if (is_vf) {
+               caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num;
+               caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) /
+                                              func_num;
+       } else {
+               caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num;
+               caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) /
+                                              func_num;
+       }
  
        return 0;
  }
  
+ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
+ {
+       return load_func_res_caps(hr_dev, false);
+ }
+ static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
+ {
+       return load_func_res_caps(hr_dev, true);
+ }
  static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
  {
-       struct hns_roce_pf_timer_res_a *req_a;
        struct hns_roce_cmq_desc desc;
+       struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+       struct hns_roce_caps *caps = &hr_dev->caps;
        int ret;
  
        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
        if (ret)
                return ret;
  
-       req_a = (struct hns_roce_pf_timer_res_a *)desc.data;
-       hr_dev->caps.qpc_timer_bt_num =
-               roce_get_field(req_a->qpc_timer_bt_idx_num,
-                              PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
-                              PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
-       hr_dev->caps.cqc_timer_bt_num =
-               roce_get_field(req_a->cqc_timer_bt_idx_num,
-                              PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
-                              PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
+       caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM);
+       caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM);
  
        return 0;
  }
  
- static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, int vf_id)
+ static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
+                                         u32 vf_id)
  {
-       struct hns_roce_cmq_desc desc;
        struct hns_roce_vf_switch *swt;
+       struct hns_roce_cmq_desc desc;
        int ret;
  
        swt = (struct hns_roce_vf_switch *)desc.data;
        return hns_roce_cmq_send(hr_dev, &desc, 1);
  }
  
- static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
+ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev)
  {
-       struct hns_roce_cmq_desc desc[2];
-       struct hns_roce_vf_res_a *req_a;
-       struct hns_roce_vf_res_b *req_b;
+       u32 vf_id;
+       int ret;
+       for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
+               ret = __hns_roce_set_vf_switch_param(hr_dev, vf_id);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+ }
  
-       req_a = (struct hns_roce_vf_res_a *)desc[0].data;
-       req_b = (struct hns_roce_vf_res_b *)desc[1].data;
+ static int __hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev, int vf_id)
+ {
+       struct hns_roce_cmq_desc desc[2];
+       struct hns_roce_cmq_req *r_a = (struct hns_roce_cmq_req *)desc[0].data;
+       struct hns_roce_cmq_req *r_b = (struct hns_roce_cmq_req *)desc[1].data;
+       enum hns_roce_opcode_type opcode = HNS_ROCE_OPC_ALLOC_VF_RES;
+       struct hns_roce_caps *caps = &hr_dev->caps;
  
-       hns_roce_cmq_setup_basic_desc(&desc[0], HNS_ROCE_OPC_ALLOC_VF_RES,
-                                     false);
+       hns_roce_cmq_setup_basic_desc(&desc[0], opcode, false);
        desc[0].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+       hns_roce_cmq_setup_basic_desc(&desc[1], opcode, false);
  
-       hns_roce_cmq_setup_basic_desc(&desc[1], HNS_ROCE_OPC_ALLOC_VF_RES,
-                                     false);
+       hr_reg_write(r_a, FUNC_RES_A_VF_ID, vf_id);
+       hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num);
+       hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num);
+       hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num);
+       hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num);
+       hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num);
+       hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num);
+       hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num);
+       hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num);
+       hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num);
+       hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num);
+       hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num);
+       hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num);
+       hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num);
+       hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num);
  
-       roce_set_field(req_a->vf_qpc_bt_idx_num,
-                      VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
-                      VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
-       roce_set_field(req_a->vf_qpc_bt_idx_num,
-                      VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
-                      VF_RES_A_DATA_1_VF_QPC_BT_NUM_S, HNS_ROCE_VF_QPC_BT_NUM);
-       roce_set_field(req_a->vf_srqc_bt_idx_num,
-                      VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
-                      VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
-       roce_set_field(req_a->vf_srqc_bt_idx_num,
-                      VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
-                      VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
-                      HNS_ROCE_VF_SRQC_BT_NUM);
-       roce_set_field(req_a->vf_cqc_bt_idx_num,
-                      VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
-                      VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
-       roce_set_field(req_a->vf_cqc_bt_idx_num,
-                      VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
-                      VF_RES_A_DATA_3_VF_CQC_BT_NUM_S, HNS_ROCE_VF_CQC_BT_NUM);
-       roce_set_field(req_a->vf_mpt_bt_idx_num,
-                      VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
-                      VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
-       roce_set_field(req_a->vf_mpt_bt_idx_num,
-                      VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
-                      VF_RES_A_DATA_4_VF_MPT_BT_NUM_S, HNS_ROCE_VF_MPT_BT_NUM);
-       roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_IDX_M,
-                      VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
-       roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_NUM_M,
-                      VF_RES_A_DATA_5_VF_EQC_NUM_S, HNS_ROCE_VF_EQC_NUM);
-       roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_IDX_M,
-                      VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
-       roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_NUM_M,
-                      VF_RES_B_DATA_1_VF_SMAC_NUM_S, HNS_ROCE_VF_SMAC_NUM);
-       roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_IDX_M,
-                      VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
-       roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_NUM_M,
-                      VF_RES_B_DATA_2_VF_SGID_NUM_S, HNS_ROCE_VF_SGID_NUM);
-       roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_QID_IDX_M,
-                      VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
-       roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_SL_NUM_M,
-                      VF_RES_B_DATA_3_VF_SL_NUM_S, HNS_ROCE_VF_SL_NUM);
-       roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
-                      VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
-       roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
-                      VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
-                      HNS_ROCE_VF_SCCC_BT_NUM);
+       if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+               hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num);
+               hr_reg_write(r_b, FUNC_RES_B_GMV_BT_IDX,
+                            vf_id * caps->gmv_bt_num);
+       } else {
+               hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num);
+               hr_reg_write(r_b, FUNC_RES_B_SGID_IDX,
+                            vf_id * caps->sgid_bt_num);
+               hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num);
+               hr_reg_write(r_b, FUNC_RES_B_SMAC_IDX,
+                            vf_id * caps->smac_bt_num);
+       }
  
        return hns_roce_cmq_send(hr_dev, desc, 2);
  }
  
+ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
+ {
+       int vf_id;
+       int ret;
+       for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) {
+               ret = __hns_roce_alloc_vf_resource(hr_dev, vf_id);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+ }
  static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
  {
-       u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
-       u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
-       u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
-       u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
-       u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
-       struct hns_roce_cfg_bt_attr *req;
        struct hns_roce_cmq_desc desc;
+       struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+       struct hns_roce_caps *caps = &hr_dev->caps;
  
        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
-       req = (struct hns_roce_cfg_bt_attr *)desc.data;
-       memset(req, 0, sizeof(*req));
-       roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
-                      CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
-                      hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
-       roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
-                      CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
-                      hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
-       roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
-                      CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
-                      qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
-       roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
-                      CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
-                      hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
-       roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
-                      CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
-                      hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
-       roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
-                      CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
-                      srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
-       roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
-                      CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
-                      hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
-       roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
-                      CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
-                      hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
-       roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
-                      CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
-                      cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
-       roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
-                      CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
-                      hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
-       roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
-                      CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
-                      hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
-       roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
-                      CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
-                      mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
-       roce_set_field(req->vf_sccc_cfg,
-                      CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
-                      CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
-                      hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
-       roce_set_field(req->vf_sccc_cfg,
-                      CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
-                      CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
-                      hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
-       roce_set_field(req->vf_sccc_cfg,
-                      CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
-                      CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
-                      sccc_hop_num ==
-                             HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
+       hr_reg_write(req, CFG_BT_ATTR_QPC_BA_PGSZ,
+                    caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET);
+       hr_reg_write(req, CFG_BT_ATTR_QPC_BUF_PGSZ,
+                    caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET);
+       hr_reg_write(req, CFG_BT_ATTR_QPC_HOPNUM,
+                    to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps));
+       hr_reg_write(req, CFG_BT_ATTR_SRQC_BA_PGSZ,
+                    caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET);
+       hr_reg_write(req, CFG_BT_ATTR_SRQC_BUF_PGSZ,
+                    caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET);
+       hr_reg_write(req, CFG_BT_ATTR_SRQC_HOPNUM,
+                    to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs));
+       hr_reg_write(req, CFG_BT_ATTR_CQC_BA_PGSZ,
+                    caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET);
+       hr_reg_write(req, CFG_BT_ATTR_CQC_BUF_PGSZ,
+                    caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET);
+       hr_reg_write(req, CFG_BT_ATTR_CQC_HOPNUM,
+                    to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs));
+       hr_reg_write(req, CFG_BT_ATTR_MPT_BA_PGSZ,
+                    caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET);
+       hr_reg_write(req, CFG_BT_ATTR_MPT_BUF_PGSZ,
+                    caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET);
+       hr_reg_write(req, CFG_BT_ATTR_MPT_HOPNUM,
+                    to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts));
+       hr_reg_write(req, CFG_BT_ATTR_SCCC_BA_PGSZ,
+                    caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET);
+       hr_reg_write(req, CFG_BT_ATTR_SCCC_BUF_PGSZ,
+                    caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET);
+       hr_reg_write(req, CFG_BT_ATTR_SCCC_HOPNUM,
+                    to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps));
  
        return hns_roce_cmq_send(hr_dev, &desc, 1);
  }
  
  static void set_default_caps(struct hns_roce_dev *hr_dev)
  {
+       struct hns_roce_v2_priv *priv = hr_dev->priv;
        struct hns_roce_caps *caps = &hr_dev->caps;
  
        caps->num_qps           = HNS_ROCE_V2_MAX_QP_NUM;
        caps->max_sq_sg         = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
        caps->max_extend_sg     = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
        caps->max_rq_sg         = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
-       caps->max_sq_inline     = HNS_ROCE_V2_MAX_SQ_INLINE;
        caps->num_uars          = HNS_ROCE_V2_UAR_NUM;
        caps->phy_num_uars      = HNS_ROCE_V2_PHY_UAR_NUM;
        caps->num_aeq_vectors   = HNS_ROCE_V2_AEQE_VEC_NUM;
-       caps->num_comp_vectors  = HNS_ROCE_V2_COMP_VEC_NUM;
+       caps->num_comp_vectors  =
+                       min_t(u32, caps->eqc_bt_num - 1,
+                             (u32)priv->handle->rinfo.num_vectors - 2);
        caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
        caps->num_mtpts         = HNS_ROCE_V2_MAX_MTPT_NUM;
        caps->num_mtt_segs      = HNS_ROCE_V2_MAX_MTT_SEGS;
-       caps->num_cqe_segs      = HNS_ROCE_V2_MAX_CQE_SEGS;
        caps->num_srqwqe_segs   = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
        caps->num_idx_segs      = HNS_ROCE_V2_MAX_IDX_SEGS;
        caps->num_pds           = HNS_ROCE_V2_MAX_PD_NUM;
+       caps->num_xrcds         = HNS_ROCE_V2_MAX_XRCD_NUM;
        caps->max_qp_init_rdma  = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
        caps->max_qp_dest_rdma  = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
        caps->max_sq_desc_sz    = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
        caps->max_rq_desc_sz    = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
        caps->max_srq_desc_sz   = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
-       caps->qpc_sz            = HNS_ROCE_V2_QPC_SZ;
        caps->irrl_entry_sz     = HNS_ROCE_V2_IRRL_ENTRY_SZ;
        caps->trrl_entry_sz     = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
        caps->cqc_entry_sz      = HNS_ROCE_V2_CQC_ENTRY_SZ;
        caps->mtpt_entry_sz     = HNS_ROCE_V2_MTPT_ENTRY_SZ;
        caps->mtt_entry_sz      = HNS_ROCE_V2_MTT_ENTRY_SZ;
        caps->idx_entry_sz      = HNS_ROCE_V2_IDX_ENTRY_SZ;
-       caps->cqe_sz            = HNS_ROCE_V2_CQE_SIZE;
        caps->page_size_cap     = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
        caps->reserved_lkey     = 0;
        caps->reserved_pds      = 0;
+       caps->reserved_xrcds    = HNS_ROCE_V2_RSV_XRCD_NUM;
        caps->reserved_mrws     = 1;
        caps->reserved_uars     = 0;
        caps->reserved_cqs      = 0;
        caps->reserved_srqs     = 0;
        caps->reserved_qps      = HNS_ROCE_V2_RSV_QPS;
  
-       caps->qpc_ba_pg_sz      = 0;
-       caps->qpc_buf_pg_sz     = 0;
        caps->qpc_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
-       caps->srqc_ba_pg_sz     = 0;
-       caps->srqc_buf_pg_sz    = 0;
        caps->srqc_hop_num      = HNS_ROCE_CONTEXT_HOP_NUM;
-       caps->cqc_ba_pg_sz      = 0;
-       caps->cqc_buf_pg_sz     = 0;
        caps->cqc_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
-       caps->mpt_ba_pg_sz      = 0;
-       caps->mpt_buf_pg_sz     = 0;
        caps->mpt_hop_num       = HNS_ROCE_CONTEXT_HOP_NUM;
-       caps->mtt_ba_pg_sz      = 0;
-       caps->mtt_buf_pg_sz     = 0;
        caps->mtt_hop_num       = HNS_ROCE_MTT_HOP_NUM;
+       caps->pbl_hop_num       = HNS_ROCE_PBL_HOP_NUM;
        caps->wqe_sq_hop_num    = HNS_ROCE_SQWQE_HOP_NUM;
        caps->wqe_sge_hop_num   = HNS_ROCE_EXT_SGE_HOP_NUM;
        caps->wqe_rq_hop_num    = HNS_ROCE_RQWQE_HOP_NUM;
-       caps->cqe_ba_pg_sz      = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
-       caps->cqe_buf_pg_sz     = 0;
        caps->cqe_hop_num       = HNS_ROCE_CQE_HOP_NUM;
-       caps->srqwqe_ba_pg_sz   = 0;
-       caps->srqwqe_buf_pg_sz  = 0;
        caps->srqwqe_hop_num    = HNS_ROCE_SRQWQE_HOP_NUM;
-       caps->idx_ba_pg_sz      = 0;
-       caps->idx_buf_pg_sz     = 0;
        caps->idx_hop_num       = HNS_ROCE_IDX_HOP_NUM;
-       caps->chunk_sz          = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
+       caps->eqe_hop_num       = HNS_ROCE_EQE_HOP_NUM;
+       caps->chunk_sz          = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
  
        caps->flags             = HNS_ROCE_CAP_FLAG_REREG_MR |
                                  HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
-                                 HNS_ROCE_CAP_FLAG_RECORD_DB |
-                                 HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
+                                 HNS_ROCE_CAP_FLAG_CQ_RECORD_DB |
+                                 HNS_ROCE_CAP_FLAG_QP_RECORD_DB;
  
        caps->pkey_table_len[0] = 1;
-       caps->gid_table_len[0]  = HNS_ROCE_V2_GID_INDEX_NUM;
        caps->ceqe_depth        = HNS_ROCE_V2_COMP_EQE_NUM;
        caps->aeqe_depth        = HNS_ROCE_V2_ASYNC_EQE_NUM;
-       caps->aeqe_size         = HNS_ROCE_AEQE_SIZE;
-       caps->ceqe_size         = HNS_ROCE_CEQE_SIZE;
        caps->local_ca_ack_delay = 0;
        caps->max_mtu = IB_MTU_4096;
  
  
        caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
                       HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
-                      HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
+                      HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL | HNS_ROCE_CAP_FLAG_XRC;
  
        caps->num_qpc_timer       = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
        caps->qpc_timer_entry_sz  = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
-       caps->qpc_timer_ba_pg_sz  = 0;
-       caps->qpc_timer_buf_pg_sz = 0;
        caps->qpc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
        caps->num_cqc_timer       = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
        caps->cqc_timer_entry_sz  = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
-       caps->cqc_timer_ba_pg_sz  = 0;
-       caps->cqc_timer_buf_pg_sz = 0;
        caps->cqc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
  
-       caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
-       caps->sccc_ba_pg_sz       = 0;
-       caps->sccc_buf_pg_sz      = 0;
        caps->sccc_hop_num        = HNS_ROCE_SCCC_HOP_NUM;
  
        if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
                caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
                                                          caps->gmv_entry_sz);
                caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
-               caps->gmv_ba_pg_sz = 0;
-               caps->gmv_buf_pg_sz = 0;
                caps->gid_table_len[0] = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
                                         caps->gmv_entry_sz);
+               caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INL_EXT;
+       } else {
+               caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
+               caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
+               caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE;
+               caps->qpc_sz = HNS_ROCE_V2_QPC_SZ;
+               caps->sccc_sz = HNS_ROCE_V2_SCCC_SZ;
+               caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
+               caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
        }
  }
  
@@@ -1979,6 -2052,70 +2054,70 @@@ static void calc_pg_sz(u32 obj_num, u3
                *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
  }
  
+ static void set_hem_page_size(struct hns_roce_dev *hr_dev)
+ {
+       struct hns_roce_caps *caps = &hr_dev->caps;
+       /* EQ */
+       caps->eqe_ba_pg_sz = 0;
+       caps->eqe_buf_pg_sz = 0;
+       /* Link Table */
+       caps->tsq_buf_pg_sz = 0;
+       /* MR */
+       caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
+       caps->pbl_buf_pg_sz = 0;
+       calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
+                  caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
+                  HEM_TYPE_MTPT);
+       /* QP */
+       caps->qpc_timer_ba_pg_sz  = 0;
+       caps->qpc_timer_buf_pg_sz = 0;
+       caps->mtt_ba_pg_sz = 0;
+       caps->mtt_buf_pg_sz = 0;
+       calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
+                  caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
+                  HEM_TYPE_QPC);
+       if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
+               calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num,
+                          caps->sccc_bt_num, &caps->sccc_buf_pg_sz,
+                          &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC);
+       /* CQ */
+       calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
+                  caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
+                  HEM_TYPE_CQC);
+       calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num,
+                  1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
+       if (caps->cqc_timer_entry_sz)
+               calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
+                          caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
+                          &caps->cqc_timer_buf_pg_sz,
+                          &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
+       /* SRQ */
+       if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) {
+               calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz,
+                          caps->srqc_hop_num, caps->srqc_bt_num,
+                          &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz,
+                          HEM_TYPE_SRQC);
+               calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
+                          caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
+                          &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
+               calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz,
+                          caps->idx_hop_num, 1, &caps->idx_buf_pg_sz,
+                          &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
+       }
+       /* GMV */
+       caps->gmv_ba_pg_sz = 0;
+       caps->gmv_buf_pg_sz = 0;
+ }
  static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
  {
        struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
        caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
                                                V2_QUERY_PF_CAPS_C_MAX_GID_M,
                                                V2_QUERY_PF_CAPS_C_MAX_GID_S);
+       caps->gid_table_len[0] /= hr_dev->func_num;
        caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
                                             V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
                                             V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
        caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
                                             V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
                                             V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
+       caps->cong_type = roce_get_field(resp_d->wq_hop_num_max_srqs,
+                                        V2_QUERY_PF_CAPS_D_CONG_TYPE_M,
+                                        V2_QUERY_PF_CAPS_D_CONG_TYPE_S);
        caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
        caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
                                               V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
                                               V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
        caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
                                                V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
                                                V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
        caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
                                               V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
                                               V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
        caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
        caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
        caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
-       caps->mtt_ba_pg_sz = 0;
-       caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
+       caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
+       caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
        caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
        caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
  
                caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
                                                    caps->gmv_entry_sz);
                caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0;
-               caps->gmv_ba_pg_sz = 0;
-               caps->gmv_buf_pg_sz = 0;
                caps->gid_table_len[0] = caps->gmv_bt_num *
                                (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz);
        }
  
-       calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num,
-                  caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
-                  HEM_TYPE_QPC);
-       calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
-                  caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
-                  HEM_TYPE_MTPT);
-       calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
-                  caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
-                  HEM_TYPE_CQC);
-       calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, caps->srqc_hop_num,
-                  caps->srqc_bt_num, &caps->srqc_buf_pg_sz,
-                  &caps->srqc_ba_pg_sz, HEM_TYPE_SRQC);
-       caps->sccc_hop_num = ctx_hop_num;
        caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
        caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
  
-       calc_pg_sz(caps->num_qps, caps->sccc_sz,
-                  caps->sccc_hop_num, caps->sccc_bt_num,
-                  &caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz,
-                  HEM_TYPE_SCCC);
-       calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
-                  caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
-                  &caps->cqc_timer_buf_pg_sz,
-                  &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
-       calc_pg_sz(caps->num_cqe_segs, caps->mtt_entry_sz, caps->cqe_hop_num,
-                  1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
-       calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
-                  caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
-                  &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
-       calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num,
-                  1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
        return 0;
  }
  
- static int hns_roce_config_qpc_size(struct hns_roce_dev *hr_dev)
+ static int config_hem_entry_size(struct hns_roce_dev *hr_dev, u32 type, u32 val)
  {
        struct hns_roce_cmq_desc desc;
-       struct hns_roce_cfg_entry_size *cfg_size =
-                                 (struct hns_roce_cfg_entry_size *)desc.data;
+       struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
  
        hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
                                      false);
  
-       cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_QPC_SIZE);
-       cfg_size->size = cpu_to_le32(hr_dev->caps.qpc_sz);
-       return hns_roce_cmq_send(hr_dev, &desc, 1);
- }
- static int hns_roce_config_sccc_size(struct hns_roce_dev *hr_dev)
- {
-       struct hns_roce_cmq_desc desc;
-       struct hns_roce_cfg_entry_size *cfg_size =
-                                 (struct hns_roce_cfg_entry_size *)desc.data;
-       hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_ENTRY_SIZE,
-                                     false);
-       cfg_size->type = cpu_to_le32(HNS_ROCE_CFG_SCCC_SIZE);
-       cfg_size->size = cpu_to_le32(hr_dev->caps.sccc_sz);
+       hr_reg_write(req, CFG_HEM_ENTRY_SIZE_TYPE, type);
+       hr_reg_write(req, CFG_HEM_ENTRY_SIZE_VALUE, val);
  
        return hns_roce_cmq_send(hr_dev, &desc, 1);
  }
  
  static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
  {
+       struct hns_roce_caps *caps = &hr_dev->caps;
        int ret;
  
        if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
                return 0;
  
-       ret = hns_roce_config_qpc_size(hr_dev);
+       ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
+                                   caps->qpc_sz);
        if (ret) {
                dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret);
                return ret;
        }
  
-       ret = hns_roce_config_sccc_size(hr_dev);
+       ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_SCCC_SIZE,
+                                   caps->sccc_sz);
        if (ret)
                dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret);
  
        return ret;
  }
  
+ static int hns_roce_v2_vf_profile(struct hns_roce_dev *hr_dev)
+ {
+       int ret;
+       hr_dev->vendor_part_id = hr_dev->pci_dev->device;
+       hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
+       hr_dev->func_num = 1;
+       ret = hns_roce_query_vf_resource(hr_dev);
+       if (ret) {
+               dev_err(hr_dev->dev,
+                       "Query the VF resource fail, ret = %d.\n", ret);
+               return ret;
+       }
+       set_default_caps(hr_dev);
+       set_hem_page_size(hr_dev);
+       ret = hns_roce_v2_set_bt(hr_dev);
+       if (ret) {
+               dev_err(hr_dev->dev,
+                       "Configure the VF bt attribute fail, ret = %d.\n",
+                       ret);
+               return ret;
+       }
+       return 0;
+ }
  static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
  {
        struct hns_roce_caps *caps = &hr_dev->caps;
                return ret;
        }
  
+       if (hr_dev->is_vf)
+               return hns_roce_v2_vf_profile(hr_dev);
+       ret = hns_roce_query_func_info(hr_dev);
+       if (ret) {
+               dev_err(hr_dev->dev, "Query function info fail, ret = %d.\n",
+                       ret);
+               return ret;
+       }
        ret = hns_roce_config_global_param(hr_dev);
        if (ret) {
                dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
                return ret;
        }
  
-       ret = hns_roce_set_vf_switch_param(hr_dev, 0);
+       ret = hns_roce_set_vf_switch_param(hr_dev);
        if (ret) {
                dev_err(hr_dev->dev,
                        "failed to set function switch param, ret = %d.\n",
        hr_dev->vendor_part_id = hr_dev->pci_dev->device;
        hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
  
-       caps->pbl_ba_pg_sz      = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
-       caps->pbl_buf_pg_sz     = 0;
        caps->pbl_hop_num       = HNS_ROCE_PBL_HOP_NUM;
-       caps->eqe_ba_pg_sz      = 0;
-       caps->eqe_buf_pg_sz     = 0;
        caps->eqe_hop_num       = HNS_ROCE_EQE_HOP_NUM;
-       caps->tsq_buf_pg_sz     = 0;
  
        ret = hns_roce_query_pf_caps(hr_dev);
        if (ret)
                return ret;
        }
  
+       set_hem_page_size(hr_dev);
        ret = hns_roce_v2_set_bt(hr_dev);
        if (ret) {
                dev_err(hr_dev->dev,
@@@ -2507,6 -2641,22 +2643,22 @@@ static void hns_roce_free_link_table(st
                          link_tbl->table.map);
  }
  
+ static void free_dip_list(struct hns_roce_dev *hr_dev)
+ {
+       struct hns_roce_dip *hr_dip;
+       struct hns_roce_dip *tmp;
+       unsigned long flags;
+       spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
+       list_for_each_entry_safe(hr_dip, tmp, &hr_dev->dip_list, node) {
+               list_del(&hr_dip->node);
+               kfree(hr_dip);
+       }
+       spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
+ }
  static int get_hem_table(struct hns_roce_dev *hr_dev)
  {
        unsigned int qpc_count;
        int ret;
        int i;
  
+       /* Alloc memory for source address table buffer space chunk */
+       for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
+            gmv_count++) {
+               ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
+               if (ret)
+                       goto err_gmv_failed;
+       }
+       if (hr_dev->is_vf)
+               return 0;
        /* Alloc memory for QPC Timer buffer space chunk */
        for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
             qpc_count++) {
                }
        }
  
-       /* Alloc memory for GMV(GID/MAC/VLAN) table buffer space chunk */
-       for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num;
-            gmv_count++) {
-               ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count);
-               if (ret) {
-                       dev_err(hr_dev->dev,
-                               "failed to get gmv table, ret = %d.\n", ret);
-                       goto err_gmv_failed;
-               }
-       }
        return 0;
  
- err_gmv_failed:
-       for (i = 0; i < gmv_count; i++)
-               hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
  err_cqc_timer_failed:
        for (i = 0; i < cqc_count; i++)
                hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
@@@ -2562,19 -2708,47 +2710,47 @@@ err_qpc_timer_failed
        for (i = 0; i < qpc_count; i++)
                hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
  
+ err_gmv_failed:
+       for (i = 0; i < gmv_count; i++)
+               hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
        return ret;
  }
  
+ static void put_hem_table(struct hns_roce_dev *hr_dev)
+ {
+       int i;
+       for (i = 0; i < hr_dev->caps.gmv_entry_num; i++)
+               hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i);
+       if (hr_dev->is_vf)
+               return;
+       for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++)
+               hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
+       for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++)
+               hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
+ }
  static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
  {
        struct hns_roce_v2_priv *priv = hr_dev->priv;
        int ret;
  
+       ret = get_hem_table(hr_dev);
+       if (ret)
+               return ret;
+       if (hr_dev->is_vf)
+               return 0;
        /* TSQ includes SQ doorbell and ack doorbell */
        ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
        if (ret) {
                dev_err(hr_dev->dev, "failed to init TSQ, ret = %d.\n", ret);
-               return ret;
+               goto err_tsq_init_failed;
        }
  
        ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
                goto err_tpq_init_failed;
        }
  
-       ret = get_hem_table(hr_dev);
-       if (ret)
-               goto err_get_hem_table_failed;
        return 0;
  
- err_get_hem_table_failed:
-       hns_roce_free_link_table(hr_dev, &priv->tpq);
+ err_tsq_init_failed:
+       put_hem_table(hr_dev);
  
  err_tpq_init_failed:
-       hns_roce_free_link_table(hr_dev, &priv->tsq);
+       hns_roce_free_link_table(hr_dev, &priv->tpq);
  
        return ret;
  }
@@@ -2604,38 -2774,13 +2776,13 @@@ static void hns_roce_v2_exit(struct hns
  
        hns_roce_function_clear(hr_dev);
  
-       hns_roce_free_link_table(hr_dev, &priv->tpq);
-       hns_roce_free_link_table(hr_dev, &priv->tsq);
- }
- static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
- {
-       struct hns_roce_cmq_desc desc;
-       struct hns_roce_mbox_status *mb_st =
-                                      (struct hns_roce_mbox_status *)desc.data;
-       int status;
-       hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
-       status = hns_roce_cmq_send(hr_dev, &desc, 1);
-       if (status)
-               return status;
-       return le32_to_cpu(mb_st->mb_status_hw_run);
- }
- static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
- {
-       u32 status = hns_roce_query_mbox_status(hr_dev);
-       return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
- }
- static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
- {
-       u32 status = hns_roce_query_mbox_status(hr_dev);
+       if (!hr_dev->is_vf) {
+               hns_roce_free_link_table(hr_dev, &priv->tpq);
+               hns_roce_free_link_table(hr_dev, &priv->tsq);
+       }
  
-       return status & HNS_ROCE_HW_MB_STATUS_MASK;
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
+               free_dip_list(hr_dev);
  }
  
  static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
        return hns_roce_cmq_send(hr_dev, &desc, 1);
  }
  
- static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
-                                u64 out_param, u32 in_modifier, u8 op_modifier,
-                                u16 op, u16 token, int event)
+ static int v2_wait_mbox_complete(struct hns_roce_dev *hr_dev, u32 timeout,
+                                u8 *complete_status)
  {
-       struct device *dev = hr_dev->dev;
+       struct hns_roce_mbox_status *mb_st;
+       struct hns_roce_cmq_desc desc;
        unsigned long end;
-       int ret;
+       int ret = -EBUSY;
+       u32 status;
+       bool busy;
+       mb_st = (struct hns_roce_mbox_status *)desc.data;
+       end = msecs_to_jiffies(timeout) + jiffies;
+       while (v2_chk_mbox_is_avail(hr_dev, &busy)) {
+               status = 0;
+               hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST,
+                                             true);
+               ret = __hns_roce_cmq_send(hr_dev, &desc, 1);
+               if (!ret) {
+                       status = le32_to_cpu(mb_st->mb_status_hw_run);
+                       /* No pending message exists in ROCEE mbox. */
+                       if (!(status & MB_ST_HW_RUN_M))
+                               break;
+               } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
+                       break;
+               }
  
-       end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
-       while (hns_roce_v2_cmd_pending(hr_dev)) {
                if (time_after(jiffies, end)) {
-                       dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
-                               (int)end);
-                       return -EAGAIN;
+                       dev_err_ratelimited(hr_dev->dev,
+                                           "failed to wait mbox status 0x%x\n",
+                                           status);
+                       return -ETIMEDOUT;
                }
                cond_resched();
+               ret = -EBUSY;
        }
  
-       ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
-                                op_modifier, op, token, event);
-       if (ret)
-               dev_err(dev, "Post mailbox fail(%d)\n", ret);
+       if (!ret) {
+               *complete_status = (u8)(status & MB_ST_COMPLETE_M);
+       } else if (!v2_chk_mbox_is_avail(hr_dev, &busy)) {
+               /* Ignore all errors if the mbox is unavailable. */
+               ret = 0;
+               *complete_status = MB_ST_COMPLETE_M;
+       }
  
        return ret;
  }
  
- static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
-                               unsigned int timeout)
+ static int v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
+                       u64 out_param, u32 in_modifier, u8 op_modifier,
+                       u16 op, u16 token, int event)
  {
-       struct device *dev = hr_dev->dev;
-       unsigned long end;
-       u32 status;
-       end = msecs_to_jiffies(timeout) + jiffies;
-       while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
-               cond_resched();
+       u8 status = 0;
+       int ret;
  
-       if (hns_roce_v2_cmd_pending(hr_dev)) {
-               dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
-               return -ETIMEDOUT;
+       /* Waiting for the mbox to be idle */
+       ret = v2_wait_mbox_complete(hr_dev, HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS,
+                                   &status);
+       if (unlikely(ret)) {
+               dev_err_ratelimited(hr_dev->dev,
+                                   "failed to check post mbox status = 0x%x, ret = %d.\n",
+                                   status, ret);
+               return ret;
        }
  
-       status = hns_roce_v2_cmd_complete(hr_dev);
-       if (status != 0x1) {
-               if (status == CMD_RST_PRC_EBUSY)
-                       return status;
+       /* Post new message to mbox */
+       ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
+                                op_modifier, op, token, event);
+       if (ret)
+               dev_err_ratelimited(hr_dev->dev,
+                                   "failed to post mailbox, ret = %d.\n", ret);
+       return ret;
+ }
+ static int v2_poll_mbox_done(struct hns_roce_dev *hr_dev, unsigned int timeout)
+ {
+       u8 status = 0;
+       int ret;
  
-               dev_err(dev, "mailbox status 0x%x!\n", status);
-               return -EBUSY;
+       ret = v2_wait_mbox_complete(hr_dev, timeout, &status);
+       if (!ret) {
+               if (status != MB_ST_COMPLETE_SUCC)
+                       return -EBUSY;
+       } else {
+               dev_err_ratelimited(hr_dev->dev,
+                                   "failed to check mbox status = 0x%x, ret = %d.\n",
+                                   status, ret);
        }
  
-       return 0;
+       return ret;
  }
  
  static void copy_gid(void *dest, const union ib_gid *gid)
@@@ -2790,7 -2974,7 +2976,7 @@@ static int config_gmv_table(struct hns_
        return hns_roce_cmq_send(hr_dev, desc, 2);
  }
  
- static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
+ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port,
                               int gid_index, const union ib_gid *gid,
                               const struct ib_gid_attr *attr)
  {
@@@ -3079,14 -3263,31 +3265,31 @@@ static void *get_sw_cqe_v2(struct hns_r
                !!(n & hr_cq->cq_depth)) ? cqe : NULL;
  }
  
- static inline void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 ci)
+ static inline void update_cq_db(struct hns_roce_dev *hr_dev,
+                               struct hns_roce_cq *hr_cq)
  {
-       *hr_cq->set_ci_db = ci & V2_CQ_DB_PARAMETER_CONS_IDX_M;
+       if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) {
+               *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M;
+       } else {
+               struct hns_roce_v2_db cq_db = {};
+               roce_set_field(cq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S,
+                              hr_cq->cqn);
+               roce_set_field(cq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+                              HNS_ROCE_V2_CQ_DB);
+               roce_set_field(cq_db.parameter, V2_CQ_DB_CONS_IDX_M,
+                              V2_CQ_DB_CONS_IDX_S, hr_cq->cons_index);
+               roce_set_field(cq_db.parameter, V2_CQ_DB_CMD_SN_M,
+                              V2_CQ_DB_CMD_SN_S, 1);
+               hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
+       }
  }
  
  static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
                                   struct hns_roce_srq *srq)
  {
+       struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
        struct hns_roce_v2_cqe *cqe, *dest;
        u32 prod_index;
        int nfreed = 0;
  
        if (nfreed) {
                hr_cq->cons_index += nfreed;
-               hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
+               update_cq_db(hr_dev, hr_cq);
        }
  }
  
@@@ -3224,37 -3425,33 +3427,33 @@@ static int hns_roce_v2_req_notify_cq(st
  {
        struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
        struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
-       u32 notification_flag;
-       __le32 doorbell[2];
+       struct hns_roce_v2_db cq_db = {};
+       u32 notify_flag;
  
-       doorbell[0] = 0;
-       doorbell[1] = 0;
-       notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
-                            V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
        /*
-        * flags = 0; Notification Flag = 1, next
-        * flags = 1; Notification Flag = 0, solocited
+        * flags = 0, then notify_flag : next
+        * flags = 1, then notify flag : solocited
         */
-       roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
-                      hr_cq->cqn);
-       roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
-                      HNS_ROCE_V2_CQ_DB_NTR);
-       roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
-                      V2_CQ_DB_PARAMETER_CONS_IDX_S, hr_cq->cons_index);
-       roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
-                      V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
-       roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
-                    notification_flag);
-       hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
+       notify_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
+                     V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
+       roce_set_field(cq_db.byte_4, V2_DB_TAG_M, V2_DB_TAG_S, hr_cq->cqn);
+       roce_set_field(cq_db.byte_4, V2_DB_CMD_M, V2_DB_CMD_S,
+                      HNS_ROCE_V2_CQ_DB_NOTIFY);
+       roce_set_field(cq_db.parameter, V2_CQ_DB_CONS_IDX_M,
+                      V2_CQ_DB_CONS_IDX_S, hr_cq->cons_index);
+       roce_set_field(cq_db.parameter, V2_CQ_DB_CMD_SN_M,
+                      V2_CQ_DB_CMD_SN_S, hr_cq->arm_sn);
+       roce_set_bit(cq_db.parameter, V2_CQ_DB_NOTIFY_TYPE_S, notify_flag);
+       hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg);
  
        return 0;
  }
  
  static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
-                                                   struct hns_roce_qp **cur_qp,
-                                                   struct ib_wc *wc)
+                                       struct hns_roce_qp *qp,
+                                       struct ib_wc *wc)
  {
        struct hns_roce_rinl_sge *sge_list;
        u32 wr_num, wr_cnt, sge_num;
  
        wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
                                V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
-       wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
+       wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
  
-       sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
-       sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
-       wqe_buf = hns_roce_get_recv_wqe(*cur_qp, wr_cnt);
+       sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
+       sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
+       wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt);
        data_len = wc->byte_len;
  
        for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
@@@ -3401,21 -3598,205 +3600,205 @@@ static void get_cqe_status(struct hns_r
                init_flush_work(hr_dev, qp);
  }
  
+ static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
+                     struct hns_roce_qp **cur_qp)
+ {
+       struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+       struct hns_roce_qp *hr_qp = *cur_qp;
+       u32 qpn;
+       qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
+                            V2_CQE_BYTE_16_LCL_QPN_S) &
+             HNS_ROCE_V2_CQE_QPN_MASK;
+       if (!hr_qp || qpn != hr_qp->qpn) {
+               hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
+               if (unlikely(!hr_qp)) {
+                       ibdev_err(&hr_dev->ib_dev,
+                                 "CQ %06lx with entry for unknown QPN %06x\n",
+                                 hr_cq->cqn, qpn);
+                       return -EINVAL;
+               }
+               *cur_qp = hr_qp;
+       }
+       return 0;
+ }
+ /*
+  * mapped-value = 1 + real-value
+  * The ib wc opcode's real value is start from 0, In order to distinguish
+  * between initialized and uninitialized map values, we plus 1 to the actual
+  * value when defining the mapping, so that the validity can be identified by
+  * checking whether the mapped value is greater than 0.
+  */
+ #define HR_WC_OP_MAP(hr_key, ib_key) \
+               [HNS_ROCE_V2_WQE_OP_ ## hr_key] = 1 + IB_WC_ ## ib_key
+ static const u32 wc_send_op_map[] = {
+       HR_WC_OP_MAP(SEND,                      SEND),
+       HR_WC_OP_MAP(SEND_WITH_INV,             SEND),
+       HR_WC_OP_MAP(SEND_WITH_IMM,             SEND),
+       HR_WC_OP_MAP(RDMA_READ,                 RDMA_READ),
+       HR_WC_OP_MAP(RDMA_WRITE,                RDMA_WRITE),
+       HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM,       RDMA_WRITE),
+       HR_WC_OP_MAP(LOCAL_INV,                 LOCAL_INV),
+       HR_WC_OP_MAP(ATOM_CMP_AND_SWAP,         COMP_SWAP),
+       HR_WC_OP_MAP(ATOM_FETCH_AND_ADD,        FETCH_ADD),
+       HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP,     MASKED_COMP_SWAP),
+       HR_WC_OP_MAP(ATOM_MSK_FETCH_AND_ADD,    MASKED_FETCH_ADD),
+       HR_WC_OP_MAP(FAST_REG_PMR,              REG_MR),
+       HR_WC_OP_MAP(BIND_MW,                   REG_MR),
+ };
+ static int to_ib_wc_send_op(u32 hr_opcode)
+ {
+       if (hr_opcode >= ARRAY_SIZE(wc_send_op_map))
+               return -EINVAL;
+       return wc_send_op_map[hr_opcode] ? wc_send_op_map[hr_opcode] - 1 :
+                                          -EINVAL;
+ }
+ static const u32 wc_recv_op_map[] = {
+       HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM,               WITH_IMM),
+       HR_WC_OP_MAP(SEND,                              RECV),
+       HR_WC_OP_MAP(SEND_WITH_IMM,                     WITH_IMM),
+       HR_WC_OP_MAP(SEND_WITH_INV,                     RECV),
+ };
+ static int to_ib_wc_recv_op(u32 hr_opcode)
+ {
+       if (hr_opcode >= ARRAY_SIZE(wc_recv_op_map))
+               return -EINVAL;
+       return wc_recv_op_map[hr_opcode] ? wc_recv_op_map[hr_opcode] - 1 :
+                                          -EINVAL;
+ }
+ static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
+ {
+       u32 hr_opcode;
+       int ib_opcode;
+       wc->wc_flags = 0;
+       hr_opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
+                                  V2_CQE_BYTE_4_OPCODE_S) & 0x1f;
+       switch (hr_opcode) {
+       case HNS_ROCE_V2_WQE_OP_RDMA_READ:
+               wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+               break;
+       case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
+       case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
+               wc->wc_flags |= IB_WC_WITH_IMM;
+               break;
+       case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
+               wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+               break;
+       case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
+       case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
+       case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
+       case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
+               wc->byte_len  = 8;
+               break;
+       default:
+               break;
+       }
+       ib_opcode = to_ib_wc_send_op(hr_opcode);
+       if (ib_opcode < 0)
+               wc->status = IB_WC_GENERAL_ERR;
+       else
+               wc->opcode = ib_opcode;
+ }
+ static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
+                                    struct hns_roce_v2_cqe *cqe)
+ {
+       return wc->qp->qp_type != IB_QPT_UD &&
+              wc->qp->qp_type != IB_QPT_GSI &&
+              (hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
+               hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
+               hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
+              roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S);
+ }
+ static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
+ {
+       struct hns_roce_qp *qp = to_hr_qp(wc->qp);
+       u32 hr_opcode;
+       int ib_opcode;
+       int ret;
+       wc->byte_len = le32_to_cpu(cqe->byte_cnt);
+       hr_opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
+                                  V2_CQE_BYTE_4_OPCODE_S) & 0x1f;
+       switch (hr_opcode) {
+       case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
+       case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
+               wc->wc_flags = IB_WC_WITH_IMM;
+               wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata));
+               break;
+       case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
+               wc->wc_flags = IB_WC_WITH_INVALIDATE;
+               wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
+               break;
+       default:
+               wc->wc_flags = 0;
+       }
+       ib_opcode = to_ib_wc_recv_op(hr_opcode);
+       if (ib_opcode < 0)
+               wc->status = IB_WC_GENERAL_ERR;
+       else
+               wc->opcode = ib_opcode;
+       if (is_rq_inl_enabled(wc, hr_opcode, cqe)) {
+               ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc);
+               if (unlikely(ret))
+                       return ret;
+       }
+       wc->sl = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
+                               V2_CQE_BYTE_32_SL_S);
+       wc->src_qp = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_RMT_QPN_M,
+                                   V2_CQE_BYTE_32_RMT_QPN_S);
+       wc->slid = 0;
+       wc->wc_flags |= roce_get_bit(cqe->byte_32, V2_CQE_BYTE_32_GRH_S) ?
+                                    IB_WC_GRH : 0;
+       wc->port_num = roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_PORTN_M,
+                                     V2_CQE_BYTE_32_PORTN_S);
+       wc->pkey_index = 0;
+       if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
+               wc->vlan_id = roce_get_field(cqe->byte_28, V2_CQE_BYTE_28_VID_M,
+                                            V2_CQE_BYTE_28_VID_S);
+               wc->wc_flags |= IB_WC_WITH_VLAN;
+       } else {
+               wc->vlan_id = 0xffff;
+       }
+       wc->network_hdr_type = roce_get_field(cqe->byte_28,
+                                             V2_CQE_BYTE_28_PORT_TYPE_M,
+                                             V2_CQE_BYTE_28_PORT_TYPE_S);
+       return 0;
+ }
  static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
                                struct hns_roce_qp **cur_qp, struct ib_wc *wc)
  {
        struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
+       struct hns_roce_qp *qp = *cur_qp;
        struct hns_roce_srq *srq = NULL;
        struct hns_roce_v2_cqe *cqe;
-       struct hns_roce_qp *hr_qp;
        struct hns_roce_wq *wq;
        int is_send;
-       u16 wqe_ctr;
-       u32 opcode;
-       u32 qpn;
+       u16 wqe_idx;
        int ret;
  
-       /* Find cqe according to consumer index */
        cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
        if (!cqe)
                return -EAGAIN;
        /* Memory barrier */
        rmb();
  
-       /* 0->SQ, 1->RQ */
-       is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
-       qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
-                               V2_CQE_BYTE_16_LCL_QPN_S);
-       if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
-               hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
-               if (unlikely(!hr_qp)) {
-                       ibdev_err(&hr_dev->ib_dev,
-                                 "CQ %06lx with entry for unknown QPN %06x\n",
-                                 hr_cq->cqn, qpn & HNS_ROCE_V2_CQE_QPN_MASK);
-                       return -EINVAL;
-               }
-               *cur_qp = hr_qp;
-       }
+       ret = get_cur_qp(hr_cq, cqe, &qp);
+       if (ret)
+               return ret;
  
-       wc->qp = &(*cur_qp)->ibqp;
+       wc->qp = &qp->ibqp;
        wc->vendor_err = 0;
  
+       wqe_idx = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
+                                V2_CQE_BYTE_4_WQE_INDX_S);
+       is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
        if (is_send) {
-               wq = &(*cur_qp)->sq;
-               if ((*cur_qp)->sq_signal_bits) {
-                       /*
-                        * If sg_signal_bit is 1,
-                        * firstly tail pointer updated to wqe
-                        * which current cqe correspond to
-                        */
-                       wqe_ctr = (u16)roce_get_field(cqe->byte_4,
-                                                     V2_CQE_BYTE_4_WQE_INDX_M,
-                                                     V2_CQE_BYTE_4_WQE_INDX_S);
-                       wq->tail += (wqe_ctr - (u16)wq->tail) &
+               wq = &qp->sq;
+               /* If sg_signal_bit is set, tail pointer will be updated to
+                * the WQE corresponding to the current CQE.
+                */
+               if (qp->sq_signal_bits)
+                       wq->tail += (wqe_idx - (u16)wq->tail) &
                                    (wq->wqe_cnt - 1);
-               }
  
                wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
                ++wq->tail;
-       } else if ((*cur_qp)->ibqp.srq) {
-               srq = to_hr_srq((*cur_qp)->ibqp.srq);
-               wqe_ctr = (u16)roce_get_field(cqe->byte_4,
-                                             V2_CQE_BYTE_4_WQE_INDX_M,
-                                             V2_CQE_BYTE_4_WQE_INDX_S);
-               wc->wr_id = srq->wrid[wqe_ctr];
-               hns_roce_free_srq_wqe(srq, wqe_ctr);
-       } else {
-               /* Update tail pointer, record wr_id */
-               wq = &(*cur_qp)->rq;
-               wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
-               ++wq->tail;
-       }
-       get_cqe_status(hr_dev, *cur_qp, hr_cq, cqe, wc);
-       if (unlikely(wc->status != IB_WC_SUCCESS))
-               return 0;
  
-       if (is_send) {
-               wc->wc_flags = 0;
-               /* SQ corresponding to CQE */
-               switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
-                                      V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
-               case HNS_ROCE_V2_WQE_OP_SEND:
-                       wc->opcode = IB_WC_SEND;
-                       break;
-               case HNS_ROCE_V2_WQE_OP_SEND_WITH_INV:
-                       wc->opcode = IB_WC_SEND;
-                       break;
-               case HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM:
-                       wc->opcode = IB_WC_SEND;
-                       wc->wc_flags |= IB_WC_WITH_IMM;
-                       break;
-               case HNS_ROCE_V2_WQE_OP_RDMA_READ:
-                       wc->opcode = IB_WC_RDMA_READ;
-                       wc->byte_len = le32_to_cpu(cqe->byte_cnt);
-                       break;
-               case HNS_ROCE_V2_WQE_OP_RDMA_WRITE:
-                       wc->opcode = IB_WC_RDMA_WRITE;
-                       break;
-               case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM:
-                       wc->opcode = IB_WC_RDMA_WRITE;
-                       wc->wc_flags |= IB_WC_WITH_IMM;
-                       break;
-               case HNS_ROCE_V2_WQE_OP_LOCAL_INV:
-                       wc->opcode = IB_WC_LOCAL_INV;
-                       wc->wc_flags |= IB_WC_WITH_INVALIDATE;
-                       break;
-               case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP:
-                       wc->opcode = IB_WC_COMP_SWAP;
-                       wc->byte_len  = 8;
-                       break;
-               case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD:
-                       wc->opcode = IB_WC_FETCH_ADD;
-                       wc->byte_len  = 8;
-                       break;
-               case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP:
-                       wc->opcode = IB_WC_MASKED_COMP_SWAP;
-                       wc->byte_len  = 8;
-                       break;
-               case HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD:
-                       wc->opcode = IB_WC_MASKED_FETCH_ADD;
-                       wc->byte_len  = 8;
-                       break;
-               case HNS_ROCE_V2_WQE_OP_FAST_REG_PMR:
-                       wc->opcode = IB_WC_REG_MR;
-                       break;
-               case HNS_ROCE_V2_WQE_OP_BIND_MW:
-                       wc->opcode = IB_WC_REG_MR;
-                       break;
-               default:
-                       wc->status = IB_WC_GENERAL_ERR;
-                       break;
-               }
+               fill_send_wc(wc, cqe);
        } else {
-               /* RQ correspond to CQE */
-               wc->byte_len = le32_to_cpu(cqe->byte_cnt);
-               opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
-                                       V2_CQE_BYTE_4_OPCODE_S);
-               switch (opcode & 0x1f) {
-               case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
-                       wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
-                       wc->wc_flags = IB_WC_WITH_IMM;
-                       wc->ex.imm_data =
-                               cpu_to_be32(le32_to_cpu(cqe->immtdata));
-                       break;
-               case HNS_ROCE_V2_OPCODE_SEND:
-                       wc->opcode = IB_WC_RECV;
-                       wc->wc_flags = 0;
-                       break;
-               case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
-                       wc->opcode = IB_WC_RECV;
-                       wc->wc_flags = IB_WC_WITH_IMM;
-                       wc->ex.imm_data =
-                               cpu_to_be32(le32_to_cpu(cqe->immtdata));
-                       break;
-               case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
-                       wc->opcode = IB_WC_RECV;
-                       wc->wc_flags = IB_WC_WITH_INVALIDATE;
-                       wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
-                       break;
-               default:
-                       wc->status = IB_WC_GENERAL_ERR;
-                       break;
-               }
-               if ((wc->qp->qp_type == IB_QPT_RC ||
-                    wc->qp->qp_type == IB_QPT_UC) &&
-                   (opcode == HNS_ROCE_V2_OPCODE_SEND ||
-                   opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
-                   opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
-                   (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
-                       ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
-                       if (unlikely(ret))
-                               return -EAGAIN;
-               }
-               wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
-                                           V2_CQE_BYTE_32_SL_S);
-               wc->src_qp = (u8)roce_get_field(cqe->byte_32,
-                                               V2_CQE_BYTE_32_RMT_QPN_M,
-                                               V2_CQE_BYTE_32_RMT_QPN_S);
-               wc->slid = 0;
-               wc->wc_flags |= (roce_get_bit(cqe->byte_32,
-                                             V2_CQE_BYTE_32_GRH_S) ?
-                                             IB_WC_GRH : 0);
-               wc->port_num = roce_get_field(cqe->byte_32,
-                               V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
-               wc->pkey_index = 0;
-               if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
-                       wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
-                                                         V2_CQE_BYTE_28_VID_M,
-                                                         V2_CQE_BYTE_28_VID_S);
-                       wc->wc_flags |= IB_WC_WITH_VLAN;
+               if (qp->ibqp.srq) {
+                       srq = to_hr_srq(qp->ibqp.srq);
+                       wc->wr_id = srq->wrid[wqe_idx];
+                       hns_roce_free_srq_wqe(srq, wqe_idx);
                } else {
-                       wc->vlan_id = 0xffff;
+                       wq = &qp->rq;
+                       wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+                       ++wq->tail;
                }
  
-               wc->network_hdr_type = roce_get_field(cqe->byte_28,
-                                                   V2_CQE_BYTE_28_PORT_TYPE_M,
-                                                   V2_CQE_BYTE_28_PORT_TYPE_S);
+               ret = fill_recv_wc(wc, cqe);
        }
  
-       return 0;
+       get_cqe_status(hr_dev, qp, hr_cq, cqe, wc);
+       if (unlikely(wc->status != IB_WC_SUCCESS))
+               return 0;
+       return ret;
  }
  
  static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
        }
  
        if (npolled)
-               hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
+               update_cq_db(hr_dev, hr_cq);
  
  out:
        spin_unlock_irqrestore(&hr_cq->lock, flags);
  }
  
  static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
-                             int step_idx)
+                             int step_idx, u16 *mbox_op)
  {
-       int op;
-       if (type == HEM_TYPE_SCCC && step_idx)
-               return -EINVAL;
+       u16 op;
  
        switch (type) {
        case HEM_TYPE_QPC:
                op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
                break;
        default:
-               dev_warn(hr_dev->dev,
-                        "table %u not to be written by mailbox!\n", type);
+               dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type);
                return -EINVAL;
        }
  
-       return op + step_idx;
+       *mbox_op = op + step_idx;
+       return 0;
  }
  
- static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj, u64 bt_ba,
-                        u32 hem_type, int step_idx)
+ static int config_gmv_ba_to_hw(struct hns_roce_dev *hr_dev, unsigned long obj,
+                              dma_addr_t base_addr)
  {
-       struct hns_roce_cmd_mailbox *mailbox;
        struct hns_roce_cmq_desc desc;
-       struct hns_roce_cfg_gmv_bt *gmv_bt =
-                               (struct hns_roce_cfg_gmv_bt *)desc.data;
-       int ret;
-       int op;
+       struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
+       u32 idx = obj / (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz);
+       u64 addr = to_hr_hw_page_addr(base_addr);
  
-       if (hem_type == HEM_TYPE_GMV) {
-               hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT,
-                                             false);
+       hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GMV_BT, false);
  
-               gmv_bt->gmv_ba_l = cpu_to_le32(bt_ba >> HNS_HW_PAGE_SHIFT);
-               gmv_bt->gmv_ba_h = cpu_to_le32(bt_ba >> (HNS_HW_PAGE_SHIFT +
-                                                        32));
-               gmv_bt->gmv_bt_idx = cpu_to_le32(obj /
-                       (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz));
+       hr_reg_write(req, CFG_GMV_BT_BA_L, lower_32_bits(addr));
+       hr_reg_write(req, CFG_GMV_BT_BA_H, upper_32_bits(addr));
+       hr_reg_write(req, CFG_GMV_BT_IDX, idx);
  
-               return hns_roce_cmq_send(hr_dev, &desc, 1);
      }
+       return hns_roce_cmq_send(hr_dev, &desc, 1);
+ }
  
-       op = get_op_for_set_hem(hr_dev, hem_type, step_idx);
-       if (op < 0)
-               return 0;
+ static int set_hem_to_hw(struct hns_roce_dev *hr_dev, int obj,
+                        dma_addr_t base_addr, u32 hem_type, int step_idx)
+ {
+       int ret;
+       u16 op;
  
-       mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
+       if (unlikely(hem_type == HEM_TYPE_GMV))
+               return config_gmv_ba_to_hw(hr_dev, obj, base_addr);
  
-       ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
-                               0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
+       if (unlikely(hem_type == HEM_TYPE_SCCC && step_idx))
+               return 0;
  
-       hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+       ret = get_op_for_set_hem(hr_dev, hem_type, step_idx, &op);
+       if (ret < 0)
+               return ret;
  
-       return ret;
+       return config_hem_ba_to_hw(hr_dev, obj, base_addr, op);
  }
  
  static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
@@@ -3911,6 -4148,16 +4150,16 @@@ static void set_qpc_wqe_cnt(struct hns_
                       ilog2(hr_qp->rq.wqe_cnt));
  }
  
+ static inline int get_cqn(struct ib_cq *ib_cq)
+ {
+       return ib_cq ? to_hr_cq(ib_cq)->cqn : 0;
+ }
+ static inline int get_pdn(struct ib_pd *ib_pd)
+ {
+       return ib_pd ? to_hr_pd(ib_pd)->pdn : 0;
+ }
  static void modify_qp_reset_to_init(struct ib_qp *ibqp,
                                    const struct ib_qp_attr *attr,
                                    int attr_mask,
         * 0 at the same time, else set them to 0x1.
         */
        roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
-                      V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
+                      V2_QPC_BYTE_4_TST_S, to_hr_qp_type(ibqp->qp_type));
  
        roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
                       V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
  
        roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
-                      V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
+                      V2_QPC_BYTE_16_PD_S, get_pdn(ibqp->pd));
  
        roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
                       V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
        roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
                       V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
  
+       if (ibqp->qp_type == IB_QPT_XRC_TGT) {
+               context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn);
+               roce_set_bit(context->byte_80_rnr_rx_cqn,
+                            V2_QPC_BYTE_80_XRC_QP_TYPE_S, 1);
+       }
        if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
                roce_set_bit(context->byte_68_rq_db,
                             V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
                       ((u32)hr_qp->rdb.dma) >> 1);
        context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
  
-       roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
-                   (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
+       if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
+               roce_set_bit(context->byte_76_srqn_op_en,
+                            V2_QPC_BYTE_76_RQIE_S,
+                            !!(hr_dev->caps.flags &
+                               HNS_ROCE_CAP_FLAG_RQ_INLINE));
  
        roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
-                      V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+                      V2_QPC_BYTE_80_RX_CQN_S, get_cqn(ibqp->recv_cq));
        if (ibqp->srq) {
+               roce_set_bit(context->byte_76_srqn_op_en,
+                            V2_QPC_BYTE_76_SRQ_EN_S, 1);
                roce_set_field(context->byte_76_srqn_op_en,
                               V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
                               to_hr_srq(ibqp->srq)->srqn);
-               roce_set_bit(context->byte_76_srqn_op_en,
-                            V2_QPC_BYTE_76_SRQ_EN_S, 1);
        }
  
        roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
  
        roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
-                      V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
+                      V2_QPC_BYTE_252_TX_CQN_S, get_cqn(ibqp->send_cq));
  
        if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ)
                return;
@@@ -3993,22 -4251,23 +4253,23 @@@ static void modify_qp_init_to_init(stru
         * 0 at the same time, else set them to 0x1.
         */
        roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
-                      V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
+                      V2_QPC_BYTE_4_TST_S, to_hr_qp_type(ibqp->qp_type));
        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
                       V2_QPC_BYTE_4_TST_S, 0);
  
        roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
-                      V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
+                      V2_QPC_BYTE_16_PD_S, get_pdn(ibqp->pd));
        roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
                       V2_QPC_BYTE_16_PD_S, 0);
  
        roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
-                      V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
+                      V2_QPC_BYTE_80_RX_CQN_S, get_cqn(ibqp->recv_cq));
        roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
                       V2_QPC_BYTE_80_RX_CQN_S, 0);
  
        roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
-                      V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
+                      V2_QPC_BYTE_252_TX_CQN_S, get_cqn(ibqp->send_cq));
        roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
                       V2_QPC_BYTE_252_TX_CQN_S, 0);
  
@@@ -4133,17 -4392,6 +4394,6 @@@ static int config_qp_rq_buf(struct hns_
                       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
                       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
  
-       roce_set_field(context->byte_84_rq_ci_pi,
-                      V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
-                      V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
-       roce_set_field(qpc_mask->byte_84_rq_ci_pi,
-                      V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
-                      V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
-       roce_set_field(qpc_mask->byte_84_rq_ci_pi,
-                      V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
-                      V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
        return 0;
  }
  
@@@ -4240,7 -4488,7 +4490,7 @@@ static int modify_qp_init_to_rtr(struc
        u64 *mtts;
        u8 *dmac;
        u8 *smac;
-       int port;
+       u32 port;
        int ret;
  
        ret = config_qp_rq_buf(hr_dev, hr_qp, context, qpc_mask);
@@@ -4454,6 -4702,143 +4704,143 @@@ static inline u16 get_udp_sport(u32 fl
        return rdma_flow_label_to_udp_sport(fl);
  }
  
+ static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+                          u32 *dip_idx)
+ {
+       const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+       struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+       struct hns_roce_dip *hr_dip;
+       unsigned long flags;
+       int ret = 0;
+       spin_lock_irqsave(&hr_dev->dip_list_lock, flags);
+       list_for_each_entry(hr_dip, &hr_dev->dip_list, node) {
+               if (!memcmp(grh->dgid.raw, hr_dip->dgid, 16))
+                       goto out;
+       }
+       /* If no dgid is found, a new dip and a mapping between dgid and
+        * dip_idx will be created.
+        */
+       hr_dip = kzalloc(sizeof(*hr_dip), GFP_ATOMIC);
+       if (!hr_dip) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
+       hr_dip->dip_idx = *dip_idx = ibqp->qp_num;
+       list_add_tail(&hr_dip->node, &hr_dev->dip_list);
+ out:
+       spin_unlock_irqrestore(&hr_dev->dip_list_lock, flags);
+       return ret;
+ }
+ enum {
+       CONG_DCQCN,
+       CONG_WINDOW,
+ };
+ enum {
+       UNSUPPORT_CONG_LEVEL,
+       SUPPORT_CONG_LEVEL,
+ };
+ enum {
+       CONG_LDCP,
+       CONG_HC3,
+ };
+ enum {
+       DIP_INVALID,
+       DIP_VALID,
+ };
+ static int check_cong_type(struct ib_qp *ibqp,
+                          struct hns_roce_congestion_algorithm *cong_alg)
+ {
+       struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+       /* different congestion types match different configurations */
+       switch (hr_dev->caps.cong_type) {
+       case CONG_TYPE_DCQCN:
+               cong_alg->alg_sel = CONG_DCQCN;
+               cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
+               cong_alg->dip_vld = DIP_INVALID;
+               break;
+       case CONG_TYPE_LDCP:
+               cong_alg->alg_sel = CONG_WINDOW;
+               cong_alg->alg_sub_sel = CONG_LDCP;
+               cong_alg->dip_vld = DIP_INVALID;
+               break;
+       case CONG_TYPE_HC3:
+               cong_alg->alg_sel = CONG_WINDOW;
+               cong_alg->alg_sub_sel = CONG_HC3;
+               cong_alg->dip_vld = DIP_INVALID;
+               break;
+       case CONG_TYPE_DIP:
+               cong_alg->alg_sel = CONG_DCQCN;
+               cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
+               cong_alg->dip_vld = DIP_VALID;
+               break;
+       default:
+               ibdev_err(&hr_dev->ib_dev,
+                         "error type(%u) for congestion selection.\n",
+                         hr_dev->caps.cong_type);
+               return -EINVAL;
+       }
+       return 0;
+ }
+ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
+                          struct hns_roce_v2_qp_context *context,
+                          struct hns_roce_v2_qp_context *qpc_mask)
+ {
+       const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+       struct hns_roce_congestion_algorithm cong_field;
+       struct ib_device *ibdev = ibqp->device;
+       struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+       u32 dip_idx = 0;
+       int ret;
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ||
+           grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE)
+               return 0;
+       ret = check_cong_type(ibqp, &cong_field);
+       if (ret)
+               return ret;
+       hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
+                    hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
+       hr_reg_write(qpc_mask, QPC_CONG_ALGO_TMPL_ID, 0);
+       hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
+       hr_reg_write(&qpc_mask->ext, QPCEX_CONG_ALG_SEL, 0);
+       hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL,
+                    cong_field.alg_sub_sel);
+       hr_reg_write(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL, 0);
+       hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld);
+       hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD, 0);
+       /* if dip is disabled, there is no need to set dip idx */
+       if (cong_field.dip_vld == 0)
+               return 0;
+       ret = get_dip_ctx_idx(ibqp, attr, &dip_idx);
+       if (ret) {
+               ibdev_err(ibdev, "failed to fill cong field, ret = %d.\n", ret);
+               return ret;
+       }
+       hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX, dip_idx);
+       hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX, 0);
+       return 0;
+ }
  static int hns_roce_v2_set_path(struct ib_qp *ibqp,
                                const struct ib_qp_attr *attr,
                                int attr_mask,
        roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
                       V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
  
+       ret = fill_cong_field(ibqp, attr, context, qpc_mask);
+       if (ret)
+               return ret;
        roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
                       V2_QPC_BYTE_24_TC_S, get_tclass(&attr->ah_attr.grh));
        roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
@@@ -4687,7 -5076,6 +5078,6 @@@ static int hns_roce_v2_set_opt_fields(s
                               V2_QPC_BYTE_244_RNR_CNT_S, 0);
        }
  
-       /* RC&UC&UD required attr */
        if (attr_mask & IB_QP_SQ_PSN) {
                roce_set_field(context->byte_172_sq_psn,
                               V2_QPC_BYTE_172_SQ_CUR_PSN_M,
                               V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
        }
  
-       /* RC&UC required attr */
        if (attr_mask & IB_QP_RQ_PSN) {
                roce_set_field(context->byte_108_rx_reqepsn,
                               V2_QPC_BYTE_108_RX_REQ_EPSN_M,
@@@ -4808,6 -5195,29 +5197,29 @@@ static void hns_roce_v2_record_opt_fiel
        }
  }
  
+ static void clear_qp(struct hns_roce_qp *hr_qp)
+ {
+       struct ib_qp *ibqp = &hr_qp->ibqp;
+       if (ibqp->send_cq)
+               hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
+                                    hr_qp->qpn, NULL);
+       if (ibqp->recv_cq  && ibqp->recv_cq != ibqp->send_cq)
+               hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq),
+                                    hr_qp->qpn, ibqp->srq ?
+                                    to_hr_srq(ibqp->srq) : NULL);
+       if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
+               *hr_qp->rdb.db_record = 0;
+       hr_qp->rq.head = 0;
+       hr_qp->rq.tail = 0;
+       hr_qp->sq.head = 0;
+       hr_qp->sq.tail = 0;
+       hr_qp->next_sge = 0;
+ }
  static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
                                 const struct ib_qp_attr *attr,
                                 int attr_mask, enum ib_qp_state cur_state,
  
        /* When QP state is err, SQ and RQ WQE should be flushed */
        if (new_state == IB_QPS_ERR) {
-               spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
-               hr_qp->state = IB_QPS_ERR;
-               roce_set_field(context->byte_160_sq_ci_pi,
-                              V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
-                              V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
-                              hr_qp->sq.head);
-               roce_set_field(qpc_mask->byte_160_sq_ci_pi,
-                              V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
-                              V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
-               spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
-               if (!ibqp->srq) {
+               if (ibqp->qp_type != IB_QPT_XRC_TGT) {
+                       spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
+                       hr_qp->state = IB_QPS_ERR;
+                       roce_set_field(context->byte_160_sq_ci_pi,
+                                      V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+                                      V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
+                                      hr_qp->sq.head);
+                       roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+                                      V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+                                      V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+                       spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
+               }
+               if (!ibqp->srq && ibqp->qp_type != IB_QPT_XRC_INI &&
+                   ibqp->qp_type != IB_QPT_XRC_TGT) {
                        spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
+                       hr_qp->state = IB_QPS_ERR;
                        roce_set_field(context->byte_84_rq_ci_pi,
                               V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
                               V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
                goto out;
  
        roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
-                    ibqp->srq ? 1 : 0);
+                    ((to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC) ||
+                    ibqp->srq) ? 1 : 0);
        roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
                     V2_QPC_BYTE_108_INV_CREDIT_S, 0);
  
  
        hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
  
-       if (new_state == IB_QPS_RESET && !ibqp->uobject) {
-               hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
-                                    ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
-               if (ibqp->send_cq != ibqp->recv_cq)
-                       hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
-                                            hr_qp->qpn, NULL);
-               hr_qp->rq.head = 0;
-               hr_qp->rq.tail = 0;
-               hr_qp->sq.head = 0;
-               hr_qp->sq.tail = 0;
-               hr_qp->next_sge = 0;
-               if (hr_qp->rq.wqe_cnt)
-                       *hr_qp->rdb.db_record = 0;
-       }
+       if (new_state == IB_QPS_RESET && !ibqp->uobject)
+               clear_qp(hr_qp);
  
  out:
        return ret;
@@@ -5019,7 -5421,8 +5423,8 @@@ static int hns_roce_v2_query_qp(struct 
                                    V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
  
        if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
-           hr_qp->ibqp.qp_type == IB_QPT_UC) {
+           hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+           hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) {
                struct ib_global_route *grh =
                                rdma_ah_retrieve_grh(&qp_attr->ah_attr);
  
        qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
                                                     V2_QPC_BYTE_140_RR_MAX_M,
                                                     V2_QPC_BYTE_140_RR_MAX_S);
        qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
                                                 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
                                                 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
@@@ -5068,6 -5472,7 +5474,7 @@@ done
        qp_attr->cur_qp_state = qp_attr->qp_state;
        qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
        qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
+       qp_attr->cap.max_inline_data = hr_qp->max_inline_data;
  
        if (!ibqp->uobject) {
                qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
        return ret;
  }
  
+ static inline int modify_qp_is_ok(struct hns_roce_qp *hr_qp)
+ {
+       return ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
+                hr_qp->ibqp.qp_type == IB_QPT_UD ||
+                hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
+                hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) &&
+               hr_qp->state != IB_QPS_RESET);
+ }
  static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
                                         struct hns_roce_qp *hr_qp,
                                         struct ib_udata *udata)
        unsigned long flags;
        int ret = 0;
  
-       if ((hr_qp->ibqp.qp_type == IB_QPT_RC ||
-            hr_qp->ibqp.qp_type == IB_QPT_UD) &&
-          hr_qp->state != IB_QPS_RESET) {
+       if (modify_qp_is_ok(hr_qp)) {
                /* Modify qp to reset before destroying qp */
                ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
                                            hr_qp->state, IB_QPS_RESET);
@@@ -5275,9 -5687,11 +5689,11 @@@ static int hns_roce_v2_write_srqc(struc
        }
  
        hr_reg_write(ctx, SRQC_SRQ_ST, 1);
+       hr_reg_write(ctx, SRQC_SRQ_TYPE,
+                    !!(srq->ibsrq.srq_type == IB_SRQT_XRC));
        hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn);
        hr_reg_write(ctx, SRQC_SRQN, srq->srqn);
-       hr_reg_write(ctx, SRQC_XRCD, 0);
+       hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn);
        hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn);
        hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt));
        hr_reg_write(ctx, SRQC_RQWS,
@@@ -5481,6 -5895,12 +5897,12 @@@ static void hns_roce_irq_work_handle(st
        case HNS_ROCE_EVENT_TYPE_FLR:
                ibdev_warn(ibdev, "Function level reset.\n");
                break;
+       case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
+               ibdev_err(ibdev, "xrc domain violation error.\n");
+               break;
+       case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
+               ibdev_err(ibdev, "invalid xrceth error.\n");
+               break;
        default:
                break;
        }
@@@ -5505,33 -5925,30 +5927,30 @@@ static void hns_roce_v2_init_irq_work(s
        queue_work(hr_dev->irq_workq, &(irq_work->work));
  }
  
- static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
+ static void update_eq_db(struct hns_roce_eq *eq)
  {
        struct hns_roce_dev *hr_dev = eq->hr_dev;
-       __le32 doorbell[2] = {};
+       struct hns_roce_v2_db eq_db = {};
  
        if (eq->type_flag == HNS_ROCE_AEQ) {
-               roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
-                              HNS_ROCE_V2_EQ_DB_CMD_S,
+               roce_set_field(eq_db.byte_4, V2_EQ_DB_CMD_M, V2_EQ_DB_CMD_S,
                               eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
                               HNS_ROCE_EQ_DB_CMD_AEQ :
                               HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
        } else {
-               roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
-                              HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
+               roce_set_field(eq_db.byte_4, V2_EQ_DB_TAG_M, V2_EQ_DB_TAG_S,
+                              eq->eqn);
  
-               roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
-                              HNS_ROCE_V2_EQ_DB_CMD_S,
+               roce_set_field(eq_db.byte_4, V2_EQ_DB_CMD_M, V2_EQ_DB_CMD_S,
                               eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
                               HNS_ROCE_EQ_DB_CMD_CEQ :
                               HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
        }
  
-       roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
-                      HNS_ROCE_V2_EQ_DB_PARA_S,
-                      (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
+       roce_set_field(eq_db.parameter, V2_EQ_DB_CONS_IDX_M,
+                      V2_EQ_DB_CONS_IDX_S, eq->cons_index);
  
-       hns_roce_write64(hr_dev, doorbell, eq->doorbell);
+       hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg);
  }
  
  static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
@@@ -5581,6 -5998,8 +6000,8 @@@ static int hns_roce_v2_aeq_int(struct h
                case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
                case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
                case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+               case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
+               case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
                        hns_roce_qp_event(hr_dev, queue_num, event_type);
                        break;
                case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
                aeqe = next_aeqe_sw_v2(eq);
        }
  
-       set_eq_cons_index_v2(eq);
+       update_eq_db(eq);
        return aeqe_found;
  }
  
@@@ -5656,7 -6075,7 +6077,7 @@@ static int hns_roce_v2_ceq_int(struct h
                ceqe = next_ceqe_sw_v2(eq);
        }
  
-       set_eq_cons_index_v2(eq);
+       update_eq_db(eq);
  
        return ceqe_found;
  }
@@@ -5710,58 -6129,34 +6131,34 @@@ static irqreturn_t hns_roce_v2_msix_int
                roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
  
                int_work = 1;
-       } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
-               dev_err(dev, "BUS ERR!\n");
-               int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S;
-               roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
-               int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
-               roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
-               int_work = 1;
-       } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
-               dev_err(dev, "OTHER ERR!\n");
+       } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_RAS_INT_S)) {
+               dev_err(dev, "RAS interrupt!\n");
  
-               int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S;
+               int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_RAS_INT_S;
                roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
  
                int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
                roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
  
                int_work = 1;
-       } else
+       } else {
                dev_err(dev, "There is no abnormal irq found!\n");
+       }
  
        return IRQ_RETVAL(int_work);
  }
  
  static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
-                                       int eq_num, int enable_flag)
+                                       int eq_num, u32 enable_flag)
  {
        int i;
  
-       if (enable_flag == EQ_ENABLE) {
-               for (i = 0; i < eq_num; i++)
-                       roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
-                                  i * EQ_REG_OFFSET,
-                                  HNS_ROCE_V2_VF_EVENT_INT_EN_M);
-               roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
-                          HNS_ROCE_V2_VF_ABN_INT_EN_M);
-               roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
-                          HNS_ROCE_V2_VF_ABN_INT_CFG_M);
-       } else {
-               for (i = 0; i < eq_num; i++)
-                       roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
-                                  i * EQ_REG_OFFSET,
-                                  HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
+       for (i = 0; i < eq_num; i++)
+               roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
+                          i * EQ_REG_OFFSET, enable_flag);
  
-               roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
-                          HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
-               roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
-                          HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
-       }
+       roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, enable_flag);
+       roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
  }
  
  static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
@@@ -5786,6 -6181,16 +6183,16 @@@ static void free_eq_buf(struct hns_roce
        hns_roce_mtr_destroy(hr_dev, &eq->mtr);
  }
  
+ static void init_eq_config(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
+ {
+       eq->db_reg = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
+       eq->cons_index = 0;
+       eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
+       eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
+       eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
+       eq->shift = ilog2((unsigned int)eq->entries);
+ }
  static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
                      void *mb_buf)
  {
        eqc = mb_buf;
        memset(eqc, 0, sizeof(struct hns_roce_eq_context));
  
-       /* init eqc */
-       eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
-       eq->cons_index = 0;
-       eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
-       eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
-       eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
-       eq->shift = ilog2((unsigned int)eq->entries);
+       init_eq_config(hr_dev, eq);
  
        /* if not multi-hop, eqe buffer only use one trunk */
        count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
                return -ENOBUFS;
        }
  
-       /* set eqc state */
-       roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S,
-                      HNS_ROCE_V2_EQ_STATE_VALID);
-       /* set eqe hop num */
-       roce_set_field(eqc->byte_4, HNS_ROCE_EQC_HOP_NUM_M,
-                      HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
-       /* set eqc over_ignore */
-       roce_set_field(eqc->byte_4, HNS_ROCE_EQC_OVER_IGNORE_M,
-                      HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
-       /* set eqc coalesce */
-       roce_set_field(eqc->byte_4, HNS_ROCE_EQC_COALESCE_M,
-                      HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
-       /* set eqc arm_state */
-       roce_set_field(eqc->byte_4, HNS_ROCE_EQC_ARM_ST_M,
-                      HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
-       /* set eqn */
-       roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQN_M, HNS_ROCE_EQC_EQN_S,
-                      eq->eqn);
-       /* set eqe_cnt */
-       roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQE_CNT_M,
-                      HNS_ROCE_EQC_EQE_CNT_S, HNS_ROCE_EQ_INIT_EQE_CNT);
-       /* set eqe_ba_pg_sz */
-       roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M,
-                      HNS_ROCE_EQC_BA_PG_SZ_S,
-                      to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
-       /* set eqe_buf_pg_sz */
-       roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M,
-                      HNS_ROCE_EQC_BUF_PG_SZ_S,
-                      to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
-       /* set eq_producer_idx */
-       roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M,
-                      HNS_ROCE_EQC_PROD_INDX_S, HNS_ROCE_EQ_INIT_PROD_IDX);
-       /* set eq_max_cnt */
-       roce_set_field(eqc->byte_12, HNS_ROCE_EQC_MAX_CNT_M,
-                      HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
-       /* set eq_period */
-       roce_set_field(eqc->byte_12, HNS_ROCE_EQC_PERIOD_M,
-                      HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
-       /* set eqe_report_timer */
-       roce_set_field(eqc->eqe_report_timer, HNS_ROCE_EQC_REPORT_TIMER_M,
-                      HNS_ROCE_EQC_REPORT_TIMER_S,
-                      HNS_ROCE_EQ_INIT_REPORT_TIMER);
-       /* set bt_ba [34:3] */
-       roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M,
-                      HNS_ROCE_EQC_EQE_BA_L_S, bt_ba >> 3);
-       /* set bt_ba [64:35] */
-       roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M,
-                      HNS_ROCE_EQC_EQE_BA_H_S, bt_ba >> 35);
-       /* set eq shift */
-       roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S,
-                      eq->shift);
-       /* set eq MSI_IDX */
-       roce_set_field(eqc->byte_28, HNS_ROCE_EQC_MSI_INDX_M,
-                      HNS_ROCE_EQC_MSI_INDX_S, HNS_ROCE_EQ_INIT_MSI_IDX);
-       /* set cur_eqe_ba [27:12] */
-       roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M,
-                      HNS_ROCE_EQC_CUR_EQE_BA_L_S, eqe_ba[0] >> 12);
-       /* set cur_eqe_ba [59:28] */
-       roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M,
-                      HNS_ROCE_EQC_CUR_EQE_BA_M_S, eqe_ba[0] >> 28);
-       /* set cur_eqe_ba [63:60] */
-       roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M,
-                      HNS_ROCE_EQC_CUR_EQE_BA_H_S, eqe_ba[0] >> 60);
-       /* set eq consumer idx */
-       roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
-                      HNS_ROCE_EQC_CONS_INDX_S, HNS_ROCE_EQ_INIT_CONS_IDX);
-       roce_set_field(eqc->byte_40, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
-                      HNS_ROCE_EQC_NXT_EQE_BA_L_S, eqe_ba[1] >> 12);
-       roce_set_field(eqc->byte_44, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
-                      HNS_ROCE_EQC_NXT_EQE_BA_H_S, eqe_ba[1] >> 44);
-       roce_set_field(eqc->byte_44, HNS_ROCE_EQC_EQE_SIZE_M,
-                      HNS_ROCE_EQC_EQE_SIZE_S,
-                      eq->eqe_size == HNS_ROCE_V3_EQE_SIZE ? 1 : 0);
+       hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
+       hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
+       hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
+       hr_reg_write(eqc, EQC_COALESCE, eq->coalesce);
+       hr_reg_write(eqc, EQC_ARM_ST, eq->arm_st);
+       hr_reg_write(eqc, EQC_EQN, eq->eqn);
+       hr_reg_write(eqc, EQC_EQE_CNT, HNS_ROCE_EQ_INIT_EQE_CNT);
+       hr_reg_write(eqc, EQC_EQE_BA_PG_SZ,
+                    to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift));
+       hr_reg_write(eqc, EQC_EQE_BUF_PG_SZ,
+                    to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift));
+       hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX);
+       hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt);
+       hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period);
+       hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER);
+       hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3);
+       hr_reg_write(eqc, EQC_EQE_BA_H, bt_ba >> 35);
+       hr_reg_write(eqc, EQC_SHIFT, eq->shift);
+       hr_reg_write(eqc, EQC_MSI_INDX, HNS_ROCE_EQ_INIT_MSI_IDX);
+       hr_reg_write(eqc, EQC_CUR_EQE_BA_L, eqe_ba[0] >> 12);
+       hr_reg_write(eqc, EQC_CUR_EQE_BA_M, eqe_ba[0] >> 28);
+       hr_reg_write(eqc, EQC_CUR_EQE_BA_H, eqe_ba[0] >> 60);
+       hr_reg_write(eqc, EQC_EQ_CONS_INDX, HNS_ROCE_EQ_INIT_CONS_IDX);
+       hr_reg_write(eqc, EQC_NEX_EQE_BA_L, eqe_ba[1] >> 12);
+       hr_reg_write(eqc, EQC_NEX_EQE_BA_H, eqe_ba[1] >> 44);
+       hr_reg_write(eqc, EQC_EQE_SIZE,
+                    !!(eq->eqe_size == HNS_ROCE_V3_EQE_SIZE));
  
        return 0;
  }
@@@ -6166,6 -6497,7 +6499,7 @@@ static void hns_roce_v2_cleanup_eq_tabl
        hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
  
        __hns_roce_free_irq(hr_dev);
+       destroy_workqueue(hr_dev->irq_workq);
  
        for (i = 0; i < eq_num; i++) {
                hns_roce_v2_destroy_eqc(hr_dev, i);
        }
  
        kfree(eq_table->eq);
-       flush_workqueue(hr_dev->irq_workq);
-       destroy_workqueue(hr_dev->irq_workq);
  }
  
  static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
@@@ -6205,9 -6534,9 +6536,9 @@@ static const struct hns_roce_hw hns_roc
        .hw_profile = hns_roce_v2_profile,
        .hw_init = hns_roce_v2_init,
        .hw_exit = hns_roce_v2_exit,
-       .post_mbox = hns_roce_v2_post_mbox,
-       .chk_mbox = hns_roce_v2_chk_mbox,
-       .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
+       .post_mbox = v2_post_mbox,
+       .poll_mbox_done = v2_poll_mbox_done,
+       .chk_mbox_avail = v2_chk_mbox_is_avail,
        .set_gid = hns_roce_v2_set_gid,
        .set_mac = hns_roce_v2_set_mac,
        .write_mtpt = hns_roce_v2_write_mtpt,
        .set_hem = hns_roce_v2_set_hem,
        .clear_hem = hns_roce_v2_clear_hem,
        .modify_qp = hns_roce_v2_modify_qp,
-       .query_qp = hns_roce_v2_query_qp,
-       .destroy_qp = hns_roce_v2_destroy_qp,
        .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
-       .modify_cq = hns_roce_v2_modify_cq,
-       .post_send = hns_roce_v2_post_send,
-       .post_recv = hns_roce_v2_post_recv,
-       .req_notify_cq = hns_roce_v2_req_notify_cq,
-       .poll_cq = hns_roce_v2_poll_cq,
        .init_eq = hns_roce_v2_init_eq_table,
        .cleanup_eq = hns_roce_v2_cleanup_eq_table,
        .write_srqc = hns_roce_v2_write_srqc,
-       .modify_srq = hns_roce_v2_modify_srq,
-       .query_srq = hns_roce_v2_query_srq,
-       .post_srq_recv = hns_roce_v2_post_srq_recv,
        .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
        .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
  };
@@@ -6243,6 -6562,8 +6564,8 @@@ static const struct pci_device_id hns_r
        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
        {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
+       {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
+        HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
        /* required last entry */
        {0, }
  };
@@@ -6253,9 -6574,12 +6576,12 @@@ static void hns_roce_hw_v2_get_cfg(stru
                                  struct hnae3_handle *handle)
  {
        struct hns_roce_v2_priv *priv = hr_dev->priv;
+       const struct pci_device_id *id;
        int i;
  
        hr_dev->pci_dev = handle->pdev;
+       id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
+       hr_dev->is_vf = id->driver_data;
        hr_dev->dev = &handle->pdev->dev;
        hr_dev->hw = &hns_roce_hw_v2;
        hr_dev->dfx = &hns_roce_dfx_hw_v2;
        addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
                            hr_dev->iboe.netdevs[0]->dev_addr);
  
-       for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
+       for (i = 0; i < handle->rinfo.num_vectors; i++)
                hr_dev->irq[i] = pci_irq_vector(handle->pdev,
                                                i + handle->rinfo.base_vector);
  
@@@ -6356,6 -6680,9 +6682,9 @@@ static int hns_roce_hw_v2_init_instance
        if (!id)
                return 0;
  
+       if (id->driver_data && handle->pdev->revision < PCI_REVISION_ID_HIP09)
+               return 0;
        ret = __hns_roce_hw_v2_init_instance(handle);
        if (ret) {
                handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
@@@ -1116,7 -1116,7 +1116,7 @@@ static void devx_obj_build_destroy_cmd(
        case MLX5_CMD_OP_CREATE_MKEY:
                MLX5_SET(destroy_mkey_in, din, opcode,
                         MLX5_CMD_OP_DESTROY_MKEY);
 -              MLX5_SET(destroy_mkey_in, in, mkey_index, *obj_id);
 +              MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id);
                break;
        case MLX5_CMD_OP_CREATE_CQ:
                MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
@@@ -2185,27 -2185,69 +2185,69 @@@ static int devx_umem_get(struct mlx5_ib
        return 0;
  }
  
+ static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
+                                              unsigned long pgsz_bitmap)
+ {
+       unsigned long page_size;
+       /* Don't bother checking larger page sizes as offset must be zero and
+        * total DEVX umem length must be equal to total umem length.
+        */
+       pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length),
+                                        PAGE_SHIFT),
+                                  MLX5_ADAPTER_PAGE_SHIFT);
+       if (!pgsz_bitmap)
+               return 0;
+       page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, U64_MAX);
+       if (!page_size)
+               return 0;
+       /* If the page_size is less than the CPU page size then we can use the
+        * offset and create a umem which is a subset of the page list.
+        * For larger page sizes we can't be sure the DMA  list reflects the
+        * VA so we must ensure that the umem extent is exactly equal to the
+        * page list. Reduce the page size until one of these cases is true.
+        */
+       while ((ib_umem_dma_offset(umem, page_size) != 0 ||
+               (umem->length % page_size) != 0) &&
+               page_size > PAGE_SIZE)
+               page_size /= 2;
+       return page_size;
+ }
  static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
                                   struct uverbs_attr_bundle *attrs,
                                   struct devx_umem *obj,
                                   struct devx_umem_reg_cmd *cmd)
  {
+       unsigned long pgsz_bitmap;
        unsigned int page_size;
        __be64 *mtt;
        void *umem;
+       int ret;
  
        /*
-        * We don't know what the user intends to use this umem for, but the HW
-        * restrictions must be met. MR, doorbell records, QP, WQ and CQ all
-        * have different requirements. Since we have no idea how to sort this
-        * out, only support PAGE_SIZE with the expectation that userspace will
-        * provide the necessary alignments inside the known PAGE_SIZE and that
-        * FW will check everything.
+        * If the user does not pass in pgsz_bitmap then the user promises not
+        * to use umem_offset!=0 in any commands that allocate on top of the
+        * umem.
+        *
+        * If the user wants to use a umem_offset then it must pass in
+        * pgsz_bitmap which guides the maximum page size and thus maximum
+        * object alignment inside the umem. See the PRM.
+        *
+        * Users are not allowed to use IOVA here, mkeys are not supported on
+        * umem.
         */
-       page_size = ib_umem_find_best_pgoff(
-               obj->umem, PAGE_SIZE,
-               __mlx5_page_offset_to_bitmask(__mlx5_bit_sz(umem, page_offset),
-                                             0));
+       ret = uverbs_get_const_default(&pgsz_bitmap, attrs,
+                       MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
+                       GENMASK_ULL(63,
+                                   min(PAGE_SHIFT, MLX5_ADAPTER_PAGE_SHIFT)));
+       if (ret)
+               return ret;
+       page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap);
        if (!page_size)
                return -EINVAL;
  
@@@ -2791,6 -2833,8 +2833,8 @@@ DECLARE_UVERBS_NAMED_METHOD
                           UA_MANDATORY),
        UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
                             enum ib_access_flags),
+       UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
+                            u64),
        UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
                            UVERBS_ATTR_TYPE(u32),
                            UA_MANDATORY));
@@@ -67,7 -67,7 +67,7 @@@ struct mlx5_modify_raw_qp_param 
        struct mlx5_rate_limit rl;
  
        u8 rq_q_ctr_id;
-       u16 port;
+       u32 port;
  };
  
  static void get_cqs(enum ib_qp_type qp_type,
@@@ -1078,7 -1078,7 +1078,7 @@@ static int _create_kernel_qp(struct mlx
  
        qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
        MLX5_SET(qpc, qpc, uar_page, uar_index);
 -      MLX5_SET(qpc, qpc, ts_format, MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT);
 +      MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
        MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
  
        /* Set "fast registration enabled" for all kernel QPs */
@@@ -1188,8 -1188,7 +1188,8 @@@ static int get_rq_ts_format(struct mlx5
                }
                return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING;
        }
 -      return MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT;
 +      return fr_supported ? MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING :
 +                            MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT;
  }
  
  static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
                }
                return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING;
        }
 -      return MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT;
 +      return fr_supported ? MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING :
 +                            MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT;
  }
  
  static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
                        MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
                MLX5_CAP_ROCE(dev->mdev, qp_ts_format) ==
                        MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
 -      int ts_format = MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
 +      int ts_format = fr_supported ? MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING :
 +                                     MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
  
        if (recv_cq &&
            recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)
@@@ -1933,7 -1930,6 +1933,7 @@@ static int create_xrc_tgt_qp(struct mlx
        if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
                MLX5_SET(qpc, qpc, cd_slave_receive, 1);
  
 +      MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
        MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ);
        MLX5_SET(qpc, qpc, no_sq, 1);
        MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
@@@ -3146,6 -3142,19 +3146,19 @@@ enum 
        MLX5_PATH_FLAG_COUNTER  = 1 << 2,
  };
  
+ static int mlx5_to_ib_rate_map(u8 rate)
+ {
+       static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS,
+                                    IB_RATE_25_GBPS,      IB_RATE_100_GBPS,
+                                    IB_RATE_200_GBPS,     IB_RATE_50_GBPS,
+                                    IB_RATE_400_GBPS };
+       if (rate < ARRAY_SIZE(rates))
+               return rates[rate];
+       return rate - MLX5_STAT_RATE_OFFSET;
+ }
  static int ib_to_mlx5_rate_map(u8 rate)
  {
        switch (rate) {
@@@ -4485,7 -4494,7 +4498,7 @@@ static void to_rdma_ah_attr(struct mlx5
        rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
  
        static_rate = MLX5_GET(ads, path, stat_rate);
-       rdma_ah_set_static_rate(ah_attr, static_rate ? static_rate - 5 : 0);
+       rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate));
        if (MLX5_GET(ads, path, grh) ||
            ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
                rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
@@@ -4877,7 -4886,6 +4890,7 @@@ static int  create_rq(struct mlx5_ib_rw
        struct mlx5_ib_dev *dev;
        int has_net_offloads;
        __be64 *rq_pas0;
 +      int ts_format;
        void *in;
        void *rqc;
        void *wq;
  
        dev = to_mdev(pd->device);
  
 +      ts_format = get_rq_ts_format(dev, to_mcq(init_attr->cq));
 +      if (ts_format < 0)
 +              return ts_format;
 +
        inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in)
        rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
        MLX5_SET(rqc,  rqc, mem_rq_type,
                 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
 +      MLX5_SET(rqc, rqc, ts_format, ts_format);
        MLX5_SET(rqc, rqc, user_index, rwq->user_index);
        MLX5_SET(rqc,  rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
        MLX5_SET(rqc,  rqc, state, MLX5_RQC_STATE_RST);
@@@ -72,7 -72,7 +72,7 @@@ static inline int qedr_ib_copy_to_udata
        return ib_copy_to_udata(udata, src, min_len);
  }
  
- int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+ int qedr_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
  {
        if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
                return -EINVAL;
@@@ -81,7 -81,7 +81,7 @@@
        return 0;
  }
  
- int qedr_iw_query_gid(struct ib_device *ibdev, u8 port,
+ int qedr_iw_query_gid(struct ib_device *ibdev, u32 port,
                      int index, union ib_gid *sgid)
  {
        struct qedr_dev *dev = get_qedr_dev(ibdev);
@@@ -210,7 -210,8 +210,8 @@@ static inline void get_link_speed_and_w
        }
  }
  
- int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
+ int qedr_query_port(struct ib_device *ibdev, u32 port,
+                   struct ib_port_attr *attr)
  {
        struct qedr_dev *dev;
        struct qed_rdma_port *rdma_port;
@@@ -1244,8 -1245,7 +1245,8 @@@ static int qedr_check_qp_attrs(struct i
         * TGT QP isn't associated with RQ/SQ
         */
        if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
 -          (attrs->qp_type != IB_QPT_XRC_TGT)) {
 +          (attrs->qp_type != IB_QPT_XRC_TGT) &&
 +          (attrs->qp_type != IB_QPT_XRC_INI)) {
                struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
                struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
  
@@@ -4483,7 -4483,7 +4484,7 @@@ int qedr_poll_cq(struct ib_cq *ibcq, in
  }
  
  int qedr_process_mad(struct ib_device *ibdev, int process_mad_flags,
-                    u8 port_num, const struct ib_wc *in_wc,
+                    u32 port_num, const struct ib_wc *in_wc,
                     const struct ib_grh *in_grh, const struct ib_mad *in,
                     struct ib_mad *out_mad, size_t *out_mad_size,
                     u16 *out_mad_pkey_index)
@@@ -103,11 -103,11 +103,11 @@@ static inline void __rtrs_put_permit(st
   *    up earlier.
   *
   * Context:
 - *    Can sleep if @wait == RTRS_TAG_WAIT
 + *    Can sleep if @wait == RTRS_PERMIT_WAIT
   */
  struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt,
                                          enum rtrs_clt_con_type con_type,
 -                                        int can_wait)
 +                                        enum wait_type can_wait)
  {
        struct rtrs_permit *permit;
        DEFINE_WAIT(wait);
@@@ -174,7 -174,7 +174,7 @@@ struct rtrs_clt_con *rtrs_permit_to_clt
        int id = 0;
  
        if (likely(permit->con_type == RTRS_IO_CON))
 -              id = (permit->cpu_id % (sess->s.con_num - 1)) + 1;
 +              id = (permit->cpu_id % (sess->s.irq_con_num - 1)) + 1;
  
        return to_clt_con(sess->s.con[id]);
  }
@@@ -325,7 -325,7 +325,7 @@@ static void rtrs_rdma_error_recovery(st
  
  static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
  {
-       struct rtrs_clt_con *con = cq->cq_context;
+       struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
  
        if (unlikely(wc->status != IB_WC_SUCCESS)) {
                rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
@@@ -345,7 -345,7 +345,7 @@@ static void rtrs_clt_inv_rkey_done(stru
  {
        struct rtrs_clt_io_req *req =
                container_of(wc->wr_cqe, typeof(*req), inv_cqe);
-       struct rtrs_clt_con *con = cq->cq_context;
+       struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
  
        if (unlikely(wc->status != IB_WC_SUCCESS)) {
                rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
@@@ -437,6 -437,13 +437,13 @@@ static void complete_rdma_req(struct rt
        req->in_use = false;
        req->con = NULL;
  
+       if (errno) {
+               rtrs_err_rl(con->c.sess,
+                           "IO request failed: error=%d path=%s [%s:%u]\n",
+                           errno, kobject_name(&sess->kobj), sess->hca_name,
+                           sess->hca_port);
+       }
        if (notify)
                req->conf(req->priv, errno);
  }
@@@ -586,7 -593,7 +593,7 @@@ static int rtrs_post_recv_empty_x2(stru
  
  static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
  {
-       struct rtrs_clt_con *con = cq->cq_context;
+       struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
        struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
        u32 imm_type, imm_payload;
        bool w_inval = false;
                } else if (imm_type == RTRS_HB_ACK_IMM) {
                        WARN_ON(con->c.cid);
                        sess->s.hb_missed_cnt = 0;
+                       sess->s.hb_cur_latency =
+                               ktime_sub(ktime_get(), sess->s.hb_last_sent);
                        if (sess->flags & RTRS_MSG_NEW_RKEY_F)
                                return  rtrs_clt_recv_done(con, wc);
                } else {
@@@ -826,6 -835,57 +835,57 @@@ static struct rtrs_clt_sess *get_next_p
        return min_path;
  }
  
+ /**
+  * get_next_path_min_latency() - Returns path with minimal latency.
+  * @it:       the path pointer
+  *
+  * Return: a path with the lowest latency or NULL if all paths are tried
+  *
+  * Locks:
+  *    rcu_read_lock() must be hold.
+  *
+  * Related to @MP_POLICY_MIN_LATENCY
+  *
+  * This DOES skip an already-tried path.
+  * There is a skip-list to skip a path if the path has tried but failed.
+  * It will try the minimum latency path and then the second minimum latency
+  * path and so on. Finally it will return NULL if all paths are tried.
+  * Therefore the caller MUST check the returned
+  * path is NULL and trigger the IO error.
+  */
+ static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it)
+ {
+       struct rtrs_clt_sess *min_path = NULL;
+       struct rtrs_clt *clt = it->clt;
+       struct rtrs_clt_sess *sess;
+       ktime_t min_latency = INT_MAX;
+       ktime_t latency;
+       list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
+               if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
+                       continue;
+               if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
+                       continue;
+               latency = sess->s.hb_cur_latency;
+               if (latency < min_latency) {
+                       min_latency = latency;
+                       min_path = sess;
+               }
+       }
+       /*
+        * add the path to the skip list, so that next time we can get
+        * a different one
+        */
+       if (min_path)
+               list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
+       return min_path;
+ }
  static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
  {
        INIT_LIST_HEAD(&it->skip_list);
  
        if (clt->mp_policy == MP_POLICY_RR)
                it->next_path = get_next_path_rr;
-       else
+       else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
                it->next_path = get_next_path_min_inflight;
+       else
+               it->next_path = get_next_path_min_latency;
  }
  
  static inline void path_it_deinit(struct path_it *it)
@@@ -1020,7 -1082,10 +1082,10 @@@ static int rtrs_clt_write_req(struct rt
                                       req->usr_len + sizeof(*msg),
                                       imm);
        if (unlikely(ret)) {
-               rtrs_err(s, "Write request failed: %d\n", ret);
+               rtrs_err_rl(s,
+                           "Write request failed: error=%d path=%s [%s:%u]\n",
+                           ret, kobject_name(&sess->kobj), sess->hca_name,
+                           sess->hca_port);
                if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
                        atomic_dec(&sess->stats->inflight);
                if (req->sg_cnt)
@@@ -1052,7 -1117,7 +1117,7 @@@ static int rtrs_clt_read_req(struct rtr
        struct rtrs_sess *s = con->c.sess;
        struct rtrs_clt_sess *sess = to_clt_sess(s);
        struct rtrs_msg_rdma_read *msg;
-       struct rtrs_ib_dev *dev;
+       struct rtrs_ib_dev *dev = sess->s.dev;
  
        struct ib_reg_wr rwr;
        struct ib_send_wr *wr = NULL;
  
        const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
  
-       s = &sess->s;
-       dev = sess->s.dev;
        if (unlikely(tsize > sess->chunk_size)) {
                rtrs_wrn(s,
                          "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
        ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
                                   req->data_len, imm, wr);
        if (unlikely(ret)) {
-               rtrs_err(s, "Read request failed: %d\n", ret);
+               rtrs_err_rl(s,
+                           "Read request failed: error=%d path=%s [%s:%u]\n",
+                           ret, kobject_name(&sess->kobj), sess->hca_name,
+                           sess->hca_port);
                if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
                        atomic_dec(&sess->stats->inflight);
                req->need_inv = false;
@@@ -1400,29 -1465,23 +1465,29 @@@ static void rtrs_clt_close_work(struct 
  static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
                                         const struct rtrs_addr *path,
                                         size_t con_num, u16 max_segments,
 -                                       size_t max_segment_size)
 +                                       u32 nr_poll_queues)
  {
        struct rtrs_clt_sess *sess;
        int err = -ENOMEM;
        int cpu;
 +      size_t total_con;
  
        sess = kzalloc(sizeof(*sess), GFP_KERNEL);
        if (!sess)
                goto err;
  
 -      /* Extra connection for user messages */
 -      con_num += 1;
 -
 -      sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
 +      /*
 +       * irqmode and poll
 +       * +1: Extra connection for user messages
 +       */
 +      total_con = con_num + nr_poll_queues + 1;
 +      sess->s.con = kcalloc(total_con, sizeof(*sess->s.con), GFP_KERNEL);
        if (!sess->s.con)
                goto err_free_sess;
  
 +      sess->s.con_num = total_con;
 +      sess->s.irq_con_num = con_num + 1;
 +
        sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
        if (!sess->stats)
                goto err_free_con;
                memcpy(&sess->s.src_addr, path->src,
                       rdma_addr_size((struct sockaddr *)path->src));
        strlcpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
 -      sess->s.con_num = con_num;
        sess->clt = clt;
 -      sess->max_pages_per_mr = max_segments * max_segment_size >> 12;
 +      sess->max_pages_per_mr = max_segments;
        init_waitqueue_head(&sess->state_wq);
        sess->state = RTRS_CLT_CONNECTING;
        atomic_set(&sess->connected_cnt, 0);
@@@ -1581,14 -1641,9 +1646,14 @@@ static int create_con_cq_qp(struct rtrs
        }
        cq_size = max_send_wr + max_recv_wr;
        cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
 -      err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
 -                               cq_vector, cq_size, max_send_wr,
 -                               max_recv_wr, IB_POLL_SOFTIRQ);
 +      if (con->c.cid >= sess->s.irq_con_num)
 +              err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
 +                                      cq_vector, cq_size, max_send_wr,
 +                                      max_recv_wr, IB_POLL_DIRECT);
 +      else
 +              err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
 +                                      cq_vector, cq_size, max_send_wr,
 +                                      max_recv_wr, IB_POLL_SOFTIRQ);
        /*
         * In case of error we do not bother to clean previous allocations,
         * since destroy_con_cq_qp() must be called.
@@@ -1863,12 -1918,14 +1928,14 @@@ static int rtrs_clt_rdma_cm_handler(str
        case RDMA_CM_EVENT_UNREACHABLE:
        case RDMA_CM_EVENT_ADDR_CHANGE:
        case RDMA_CM_EVENT_TIMEWAIT_EXIT:
-               rtrs_wrn(s, "CM error event %d\n", ev->event);
+               rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
+                        rdma_event_msg(ev->event), ev->status);
                cm_err = -ECONNRESET;
                break;
        case RDMA_CM_EVENT_ADDR_ERROR:
        case RDMA_CM_EVENT_ROUTE_ERROR:
-               rtrs_wrn(s, "CM error event %d\n", ev->event);
+               rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
+                        rdma_event_msg(ev->event), ev->status);
                cm_err = -EHOSTUNREACH;
                break;
        case RDMA_CM_EVENT_DEVICE_REMOVAL:
                rtrs_clt_close_conns(sess, false);
                return 0;
        default:
-               rtrs_err(s, "Unexpected RDMA CM event (%d)\n", ev->event);
+               rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n",
+                        rdma_event_msg(ev->event), ev->status);
                cm_err = -ECONNRESET;
                break;
        }
@@@ -2251,7 -2309,7 +2319,7 @@@ destroy
  
  static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
  {
-       struct rtrs_clt_con *con = cq->cq_context;
+       struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
        struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
        struct rtrs_iu *iu;
  
@@@ -2333,7 -2391,7 +2401,7 @@@ static int process_info_rsp(struct rtrs
  
  static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
  {
-       struct rtrs_clt_con *con = cq->cq_context;
+       struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
        struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
        struct rtrs_msg_info_rsp *msg;
        enum rtrs_clt_state state;
  static int init_sess(struct rtrs_clt_sess *sess)
  {
        int err;
+       char str[NAME_MAX];
+       struct rtrs_addr path = {
+               .src = &sess->s.src_addr,
+               .dst = &sess->s.dst_addr,
+       };
+       rtrs_addr_to_str(&path, str, sizeof(str));
  
        mutex_lock(&sess->init_mutex);
        err = init_conns(sess);
        if (err) {
-               rtrs_err(sess->clt, "init_conns(), err: %d\n", err);
+               rtrs_err(sess->clt,
+                        "init_conns() failed: err=%d path=%s [%s:%u]\n", err,
+                        str, sess->hca_name, sess->hca_port);
                goto out;
        }
        err = rtrs_send_sess_info(sess);
        if (err) {
-               rtrs_err(sess->clt, "rtrs_send_sess_info(), err: %d\n", err);
+               rtrs_err(
+                       sess->clt,
+                       "rtrs_send_sess_info() failed: err=%d path=%s [%s:%u]\n",
+                       err, str, sess->hca_name, sess->hca_port);
                goto out;
        }
        rtrs_clt_sess_up(sess);
@@@ -2538,6 -2608,7 +2618,6 @@@ static struct rtrs_clt *alloc_clt(cons
                                  void  (*link_ev)(void *priv,
                                                   enum rtrs_clt_link_ev ev),
                                  unsigned int max_segments,
 -                                size_t max_segment_size,
                                  unsigned int reconnect_delay_sec,
                                  unsigned int max_reconnect_attempts)
  {
        clt->port = port;
        clt->pdu_sz = pdu_sz;
        clt->max_segments = max_segments;
 -      clt->max_segment_size = max_segment_size;
        clt->reconnect_delay_sec = reconnect_delay_sec;
        clt->max_reconnect_attempts = max_reconnect_attempts;
        clt->priv = priv;
@@@ -2636,9 -2708,9 +2716,9 @@@ static void free_clt(struct rtrs_clt *c
   * @pdu_sz: Size of extra payload which can be accessed after permit allocation.
   * @reconnect_delay_sec: time between reconnect tries
   * @max_segments: Max. number of segments per IO request
 - * @max_segment_size: Max. size of one segment
   * @max_reconnect_attempts: Number of times to reconnect on error before giving
   *                        up, 0 for * disabled, -1 for forever
 + * @nr_poll_queues: number of polling mode connection using IB_POLL_DIRECT flag
   *
   * Starts session establishment with the rtrs_server. The function can block
   * up to ~2000ms before it returns.
@@@ -2651,7 -2723,8 +2731,7 @@@ struct rtrs_clt *rtrs_clt_open(struct r
                                 size_t paths_num, u16 port,
                                 size_t pdu_sz, u8 reconnect_delay_sec,
                                 u16 max_segments,
 -                               size_t max_segment_size,
 -                               s16 max_reconnect_attempts)
 +                               s16 max_reconnect_attempts, u32 nr_poll_queues)
  {
        struct rtrs_clt_sess *sess, *tmp;
        struct rtrs_clt *clt;
  
        clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
                        ops->link_ev,
 -                      max_segments, max_segment_size, reconnect_delay_sec,
 +                      max_segments, reconnect_delay_sec,
                        max_reconnect_attempts);
        if (IS_ERR(clt)) {
                err = PTR_ERR(clt);
                struct rtrs_clt_sess *sess;
  
                sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
 -                                max_segments, max_segment_size);
 +                                max_segments, nr_poll_queues);
                if (IS_ERR(sess)) {
                        err = PTR_ERR(sess);
                        goto close_all_sess;
@@@ -2727,8 -2800,8 +2807,8 @@@ void rtrs_clt_close(struct rtrs_clt *cl
  
        /* Now it is safe to iterate over all paths without locks */
        list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
 -              rtrs_clt_destroy_sess_files(sess, NULL);
                rtrs_clt_close_conns(sess, true);
 +              rtrs_clt_destroy_sess_files(sess, NULL);
                kobject_put(&sess->kobj);
        }
        free_clt(clt);
@@@ -2791,8 -2864,8 +2871,8 @@@ int rtrs_clt_remove_path_from_sysfs(str
        } while (!changed && old_state != RTRS_CLT_DEAD);
  
        if (likely(changed)) {
-               rtrs_clt_destroy_sess_files(sess, sysfs_self);
                rtrs_clt_remove_path_from_arr(sess);
+               rtrs_clt_destroy_sess_files(sess, sysfs_self);
                kobject_put(&sess->kobj);
        }
  
@@@ -2894,31 -2967,6 +2974,31 @@@ int rtrs_clt_request(int dir, struct rt
  }
  EXPORT_SYMBOL(rtrs_clt_request);
  
 +int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index)
 +{
 +      int cnt;
 +      struct rtrs_con *con;
 +      struct rtrs_clt_sess *sess;
 +      struct path_it it;
 +
 +      rcu_read_lock();
 +      for (path_it_init(&it, clt);
 +           (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
 +              if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
 +                      continue;
 +
 +              con = sess->s.con[index + 1];
 +              cnt = ib_process_cq_direct(con->cq, -1);
 +              if (cnt)
 +                      break;
 +      }
 +      path_it_deinit(&it);
 +      rcu_read_unlock();
 +
 +      return cnt;
 +}
 +EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct);
 +
  /**
   * rtrs_clt_query() - queries RTRS session attributes
   *@clt: session pointer
@@@ -2933,9 -2981,9 +3013,9 @@@ int rtrs_clt_query(struct rtrs_clt *clt
                return -ECOMM;
  
        attr->queue_depth      = clt->queue_depth;
-       attr->max_io_size      = clt->max_io_size;
-       attr->sess_kobj        = &clt->dev.kobj;
-       strlcpy(attr->sessname, clt->sessname, sizeof(attr->sessname));
+       /* Cap max_io_size to min of remote buffer size and the fr pages */
+       attr->max_io_size = min_t(int, clt->max_io_size,
+                                 clt->max_segments * SZ_4K);
  
        return 0;
  }
@@@ -2947,7 -2995,8 +3027,7 @@@ int rtrs_clt_create_path_from_sysfs(str
        struct rtrs_clt_sess *sess;
        int err;
  
 -      sess = alloc_sess(clt, addr, nr_cpu_ids, clt->max_segments,
 -                        clt->max_segment_size);
 +      sess = alloc_sess(clt, addr, nr_cpu_ids, clt->max_segments, 0);
        if (IS_ERR(sess))
                return PTR_ERR(sess);
  
@@@ -29,6 -29,7 +29,7 @@@ enum rtrs_clt_state 
  enum rtrs_mp_policy {
        MP_POLICY_RR,
        MP_POLICY_MIN_INFLIGHT,
+       MP_POLICY_MIN_LATENCY,
  };
  
  /* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
@@@ -166,6 -167,7 +167,6 @@@ struct rtrs_clt 
        unsigned int            max_reconnect_attempts;
        unsigned int            reconnect_delay_sec;
        unsigned int            max_segments;
 -      size_t                  max_segment_size;
        void                    *permits;
        unsigned long           *permits_map;
        size_t                  queue_depth;
@@@ -91,6 -91,7 +91,7 @@@ struct rtrs_con 
        struct ib_cq            *cq;
        struct rdma_cm_id       *cm_id;
        unsigned int            cid;
+       u16                     cq_size;
  };
  
  struct rtrs_sess {
        uuid_t                  uuid;
        struct rtrs_con **con;
        unsigned int            con_num;
 +      unsigned int            irq_con_num;
        unsigned int            recon_cnt;
        struct rtrs_ib_dev      *dev;
        int                     dev_ref;
        unsigned int            hb_interval_ms;
        unsigned int            hb_missed_cnt;
        unsigned int            hb_missed_max;
+       ktime_t                 hb_last_sent;
+       ktime_t                 hb_cur_latency;
  };
  
  /* rtrs information unit */
@@@ -199,7 -199,7 +199,7 @@@ static void rtrs_srv_wait_ops_ids(struc
  
  static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
  {
-       struct rtrs_srv_con *con = cq->cq_context;
+       struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
        struct rtrs_sess *s = con->c.sess;
        struct rtrs_srv_sess *sess = to_srv_sess(s);
  
@@@ -518,8 -518,9 +518,9 @@@ bool rtrs_srv_resp_rdma(struct rtrs_srv
  
        if (unlikely(sess->state != RTRS_SRV_CONNECTED)) {
                rtrs_err_rl(s,
-                            "Sending I/O response failed,  session is disconnected, sess state %s\n",
-                            rtrs_srv_state_str(sess->state));
+                           "Sending I/O response failed,  session %s is disconnected, sess state %s\n",
+                           kobject_name(&sess->kobj),
+                           rtrs_srv_state_str(sess->state));
                goto out;
        }
        if (always_invalidate) {
        }
        if (unlikely(atomic_sub_return(1,
                                       &con->sq_wr_avail) < 0)) {
-               pr_err("IB send queue full\n");
+               rtrs_err(s, "IB send queue full: sess=%s cid=%d\n",
+                        kobject_name(&sess->kobj),
+                        con->c.cid);
                atomic_add(1, &con->sq_wr_avail);
                spin_lock(&con->rsp_wr_wait_lock);
                list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
                err = rdma_write_sg(id);
  
        if (unlikely(err)) {
-               rtrs_err_rl(s, "IO response failed: %d\n", err);
+               rtrs_err_rl(s, "IO response failed: %d: sess=%s\n", err,
+                           kobject_name(&sess->kobj));
                close_sess(sess);
        }
  out:
@@@ -720,7 -724,7 +724,7 @@@ static void rtrs_srv_stop_hb(struct rtr
  
  static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
  {
-       struct rtrs_srv_con *con = cq->cq_context;
+       struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
        struct rtrs_sess *s = con->c.sess;
        struct rtrs_srv_sess *sess = to_srv_sess(s);
        struct rtrs_iu *iu;
@@@ -862,7 -866,7 +866,7 @@@ rwr_free
  
  static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
  {
-       struct rtrs_srv_con *con = cq->cq_context;
+       struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
        struct rtrs_sess *s = con->c.sess;
        struct rtrs_srv_sess *sess = to_srv_sess(s);
        struct rtrs_msg_info_req *msg;
@@@ -998,7 -1002,7 +1002,7 @@@ static void process_read(struct rtrs_sr
        usr_len = le16_to_cpu(msg->usr_len);
        data_len = off - usr_len;
        data = page_address(srv->chunks[buf_id]);
 -      ret = ctx->ops.rdma_ev(srv, srv->priv, id, READ, data, data_len,
 +      ret = ctx->ops.rdma_ev(srv->priv, id, READ, data, data_len,
                           data + data_len, usr_len);
  
        if (unlikely(ret)) {
@@@ -1051,7 -1055,7 +1055,7 @@@ static void process_write(struct rtrs_s
        usr_len = le16_to_cpu(req->usr_len);
        data_len = off - usr_len;
        data = page_address(srv->chunks[buf_id]);
 -      ret = ctx->ops.rdma_ev(srv, srv->priv, id, WRITE, data, data_len,
 +      ret = ctx->ops.rdma_ev(srv->priv, id, WRITE, data, data_len,
                           data + data_len, usr_len);
        if (unlikely(ret)) {
                rtrs_err_rl(s,
@@@ -1110,7 -1114,7 +1114,7 @@@ static void rtrs_srv_inv_rkey_done(stru
  {
        struct rtrs_srv_mr *mr =
                container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
-       struct rtrs_srv_con *con = cq->cq_context;
+       struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
        struct rtrs_sess *s = con->c.sess;
        struct rtrs_srv_sess *sess = to_srv_sess(s);
        struct rtrs_srv *srv = sess->srv;
@@@ -1167,7 -1171,7 +1171,7 @@@ static void rtrs_rdma_process_wr_wait_l
  
  static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
  {
-       struct rtrs_srv_con *con = cq->cq_context;
+       struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
        struct rtrs_sess *s = con->c.sess;
        struct rtrs_srv_sess *sess = to_srv_sess(s);
        struct rtrs_srv *srv = sess->srv;
@@@ -1683,6 -1687,8 +1687,8 @@@ static struct rtrs_srv_sess *__alloc_se
  {
        struct rtrs_srv_sess *sess;
        int err = -ENOMEM;
+       char str[NAME_MAX];
+       struct rtrs_addr path;
  
        if (srv->paths_num >= MAX_PATHS_NUM) {
                err = -ECONNRESET;
        sess->cur_cq_vector = -1;
        sess->s.dst_addr = cm_id->route.addr.dst_addr;
        sess->s.src_addr = cm_id->route.addr.src_addr;
+       /* temporary until receiving session-name from client */
+       path.src = &sess->s.src_addr;
+       path.dst = &sess->s.dst_addr;
+       rtrs_addr_to_str(&path, str, sizeof(str));
+       strlcpy(sess->s.sessname, str, sizeof(sess->s.sessname));
        sess->s.con_num = con_num;
        sess->s.recon_cnt = recon_cnt;
        uuid_copy(&sess->s.uuid, uuid);
@@@ -1908,13 -1921,10 +1921,10 @@@ static int rtrs_srv_rdma_cm_handler(str
        case RDMA_CM_EVENT_UNREACHABLE:
                rtrs_err(s, "CM error (CM event: %s, err: %d)\n",
                          rdma_event_msg(ev->event), ev->status);
-               close_sess(sess);
-               break;
+               fallthrough;
        case RDMA_CM_EVENT_DISCONNECTED:
        case RDMA_CM_EVENT_ADDR_CHANGE:
        case RDMA_CM_EVENT_TIMEWAIT_EXIT:
-               close_sess(sess);
-               break;
        case RDMA_CM_EVENT_DEVICE_REMOVAL:
                close_sess(sess);
                break;
@@@ -58,13 -58,14 +58,13 @@@ struct rtrs_clt *rtrs_clt_open(struct r
                                 size_t path_cnt, u16 port,
                                 size_t pdu_sz, u8 reconnect_delay_sec,
                                 u16 max_segments,
 -                               size_t max_segment_size,
 -                               s16 max_reconnect_attempts);
 +                               s16 max_reconnect_attempts, u32 nr_poll_queues);
  
  void rtrs_clt_close(struct rtrs_clt *sess);
  
 -enum {
 +enum wait_type {
        RTRS_PERMIT_NOWAIT = 0,
 -      RTRS_PERMIT_WAIT   = 1,
 +      RTRS_PERMIT_WAIT   = 1
  };
  
  /**
@@@ -80,7 -81,7 +80,7 @@@ enum rtrs_clt_con_type 
  
  struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *sess,
                                    enum rtrs_clt_con_type con_type,
 -                                  int wait);
 +                                  enum wait_type wait);
  
  void rtrs_clt_put_permit(struct rtrs_clt *sess, struct rtrs_permit *permit);
  
@@@ -102,7 -103,6 +102,7 @@@ int rtrs_clt_request(int dir, struct rt
                     struct rtrs_clt *sess, struct rtrs_permit *permit,
                     const struct kvec *vec, size_t nr, size_t len,
                     struct scatterlist *sg, unsigned int sg_cnt);
 +int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index);
  
  /**
   * rtrs_attrs - RTRS session attributes
  struct rtrs_attrs {
        u32             queue_depth;
        u32             max_io_size;
-       u8              sessname[NAME_MAX];
-       struct kobject  *sess_kobj;
  };
  
  int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr);
@@@ -138,6 -136,7 +136,6 @@@ struct rtrs_srv_ops 
         *                      message for the data transfer will be sent to
         *                      the client.
  
 -       *      @sess:          Session
         *      @priv:          Private data set by rtrs_srv_set_sess_priv()
         *      @id:            internal RTRS operation id
         *      @dir:           READ/WRITE
         *      @usr:           The extra user message sent by the client (%vec)
         *      @usrlen:        Size of the user message
         */
 -      int (*rdma_ev)(struct rtrs_srv *sess, void *priv,
 +      int (*rdma_ev)(void *priv,
                       struct rtrs_srv_op *id, int dir,
                       void *data, size_t datalen, const void *usr,
                       size_t usrlen);
@@@ -185,4 -184,5 +183,5 @@@ int rtrs_addr_to_sockaddr(const char *s
                          struct rtrs_addr *addr);
  
  int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len);
+ int rtrs_addr_to_str(const struct rtrs_addr *addr, char *buf, size_t len);
  #endif
@@@ -1528,20 -1528,16 +1528,20 @@@ static void srpt_handle_cmd(struct srpt
                goto busy;
        }
  
 -      rc = target_submit_cmd_map_sgls(cmd, ch->sess, srp_cmd->cdb,
 -                             &send_ioctx->sense_data[0],
 -                             scsilun_to_int(&srp_cmd->lun), data_len,
 -                             TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF,
 -                             sg, sg_cnt, NULL, 0, NULL, 0);
 +      rc = target_init_cmd(cmd, ch->sess, &send_ioctx->sense_data[0],
 +                           scsilun_to_int(&srp_cmd->lun), data_len,
 +                           TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
        if (rc != 0) {
                pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
                         srp_cmd->tag);
                goto busy;
        }
 +
 +      if (target_submit_prep(cmd, srp_cmd->cdb, sg, sg_cnt, NULL, 0, NULL, 0,
 +                             GFP_KERNEL))
 +              return;
 +
 +      target_submit(cmd);
        return;
  
  busy:
@@@ -2382,6 -2378,7 +2382,7 @@@ static int srpt_cm_req_recv(struct srpt
                pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
                        dev_name(&sdev->device->dev), port_num);
                mutex_unlock(&sport->mutex);
+               ret = -EINVAL;
                goto reject;
        }
  
@@@ -3109,7 -3106,8 +3110,8 @@@ static int srpt_add_one(struct ib_devic
  {
        struct srpt_device *sdev;
        struct srpt_port *sport;
-       int i, ret;
+       int ret;
+       u32 i;
  
        pr_debug("device = %p\n", device);
  
@@@ -122,10 -122,7 +122,10 @@@ enum board_idx 
        NETXTREME_E_VF,
        NETXTREME_C_VF,
        NETXTREME_S_VF,
 +      NETXTREME_C_VF_HV,
 +      NETXTREME_E_VF_HV,
        NETXTREME_E_P5_VF,
 +      NETXTREME_E_P5_VF_HV,
  };
  
  /* indexed by enum above */
@@@ -173,10 -170,7 +173,10 @@@ static const struct 
        [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
        [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
        [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
 +      [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
 +      [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
        [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
 +      [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
  };
  
  static const struct pci_device_id bnxt_pci_tbl[] = {
        { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
  #ifdef CONFIG_BNXT_SRIOV
        { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
 +      { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
 +      { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
        { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
 +      { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
        { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
 +      { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
 +      { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
 +      { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
 +      { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
        { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
        { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
        { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
        { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
        { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
 +      { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
        { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
        { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
 +      { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
 +      { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
        { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
  #endif
        { 0 }
@@@ -281,8 -265,7 +281,8 @@@ static struct workqueue_struct *bnxt_pf
  static bool bnxt_vf_pciid(enum board_idx idx)
  {
        return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
 -              idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
 +              idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
 +              idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
  }
  
  #define DB_CP_REARM_FLAGS     (DB_KEY_CP | DB_IDX_VALID)
@@@ -375,7 -358,6 +375,7 @@@ static netdev_tx_t bnxt_start_xmit(stru
        struct pci_dev *pdev = bp->pdev;
        struct bnxt_tx_ring_info *txr;
        struct bnxt_sw_tx_bd *tx_buf;
 +      __le32 lflags = 0;
  
        i = skb_get_queue_mapping(skb);
        if (unlikely(i >= bp->tx_nr_rings)) {
                        vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
        }
  
 +      if (unlikely(skb->no_fcs)) {
 +              lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
 +              goto normal_tx;
 +      }
 +
        if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
                struct tx_push_buffer *tx_push_buf = txr->tx_push;
                struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
@@@ -523,7 -500,7 +523,7 @@@ normal_tx
        txbd1 = (struct tx_bd_ext *)
                &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  
 -      txbd1->tx_bd_hsize_lflags = 0;
 +      txbd1->tx_bd_hsize_lflags = lflags;
        if (skb_is_gso(skb)) {
                u32 hdr_len;
  
                        hdr_len = skb_transport_offset(skb) +
                                tcp_hdrlen(skb);
  
 -              txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
 +              txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
                                        TX_BD_FLAGS_T_IPID |
                                        (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
                length = skb_shinfo(skb)->gso_size;
                txbd1->tx_bd_mss = cpu_to_le32(length);
                length += hdr_len;
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 -              txbd1->tx_bd_hsize_lflags =
 +              txbd1->tx_bd_hsize_lflags |=
                        cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
                txbd1->tx_bd_mss = 0;
        }
@@@ -1755,16 -1732,14 +1755,16 @@@ static int bnxt_rx_pkt(struct bnxt *bp
  
        cons = rxcmp->rx_cmp_opaque;
        if (unlikely(cons != rxr->rx_next_cons)) {
 -              int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
 +              int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
  
                /* 0xffff is forced error, don't print it */
                if (rxr->rx_next_cons != 0xffff)
                        netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
                                    cons, rxr->rx_next_cons);
                bnxt_sched_reset(bp, rxr);
 -              return rc1;
 +              if (rc1)
 +                      return rc1;
 +              goto next_rx_no_prod_no_len;
        }
        rx_buf = &rxr->rx_buf_ring[cons];
        data = rx_buf->data;
@@@ -4170,7 -4145,7 +4170,7 @@@ static void bnxt_free_mem(struct bnxt *
        bnxt_free_ntp_fltrs(bp, irq_re_init);
        if (irq_re_init) {
                bnxt_free_ring_stats(bp);
 -              if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) ||
 +              if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
                    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
                        bnxt_free_port_stats(bp);
                bnxt_free_ring_grps(bp);
@@@ -4495,7 -4470,7 +4495,7 @@@ static int bnxt_hwrm_do_send_msg(struc
        writel(1, bp->bar0 + doorbell_offset);
  
        if (!pci_is_enabled(bp->pdev))
 -              return 0;
 +              return -ENODEV;
  
        if (!timeout)
                timeout = DFLT_HWRM_CMD_TIMEOUT;
                        if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
                                return -EBUSY;
                        /* on first few passes, just barely sleep */
 -                      if (i < HWRM_SHORT_TIMEOUT_COUNTER)
 +                      if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
                                usleep_range(HWRM_SHORT_MIN_TIMEOUT,
                                             HWRM_SHORT_MAX_TIMEOUT);
 -                      else
 +                      } else {
 +                              if (HWRM_WAIT_MUST_ABORT(bp, req))
 +                                      break;
                                usleep_range(HWRM_MIN_TIMEOUT,
                                             HWRM_MAX_TIMEOUT);
 +                      }
                }
  
                if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
                        if (len)
                                break;
                        /* on first few passes, just barely sleep */
 -                      if (i < HWRM_SHORT_TIMEOUT_COUNTER)
 +                      if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
                                usleep_range(HWRM_SHORT_MIN_TIMEOUT,
                                             HWRM_SHORT_MAX_TIMEOUT);
 -                      else
 +                      } else {
 +                              if (HWRM_WAIT_MUST_ABORT(bp, req))
 +                                      goto timeout_abort;
                                usleep_range(HWRM_MIN_TIMEOUT,
                                             HWRM_MAX_TIMEOUT);
 +                      }
                }
  
                if (i >= tmo_count) {
 +timeout_abort:
                        if (!silent)
                                netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
                                           HWRM_TOTAL_TIMEOUT(i),
@@@ -7572,32 -7540,6 +7572,32 @@@ static void __bnxt_map_fw_health_reg(st
                                         BNXT_FW_HEALTH_WIN_MAP_OFF);
  }
  
 +bool bnxt_is_fw_healthy(struct bnxt *bp)
 +{
 +      if (bp->fw_health && bp->fw_health->status_reliable) {
 +              u32 fw_status;
 +
 +              fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
 +              if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
 +                      return false;
 +      }
 +
 +      return true;
 +}
 +
 +static void bnxt_inv_fw_health_reg(struct bnxt *bp)
 +{
 +      struct bnxt_fw_health *fw_health = bp->fw_health;
 +      u32 reg_type;
 +
 +      if (!fw_health || !fw_health->status_reliable)
 +              return;
 +
 +      reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
 +      if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
 +              fw_health->status_reliable = false;
 +}
 +
  static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
  {
        void __iomem *hs;
        u32 reg_type;
        u32 sig;
  
 +      if (bp->fw_health)
 +              bp->fw_health->status_reliable = false;
 +
        __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
        hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
  
                                             BNXT_FW_HEALTH_WIN_BASE +
                                             BNXT_GRC_REG_CHIP_NUM);
                }
 -              if (!BNXT_CHIP_P5(bp)) {
 -                      if (bp->fw_health)
 -                              bp->fw_health->status_reliable = false;
 +              if (!BNXT_CHIP_P5(bp))
                        return;
 -              }
 +
                status_loc = BNXT_GRC_REG_STATUS_P5 |
                             BNXT_FW_HEALTH_REG_TYPE_BAR0;
        } else {
@@@ -7651,7 -7592,6 +7651,7 @@@ static int bnxt_map_fw_health_regs(stru
        u32 reg_base = 0xffffffff;
        int i;
  
 +      bp->fw_health->status_reliable = false;
        /* Only pre-map the monitoring GRC registers using window 3 */
        for (i = 0; i < 4; i++) {
                u32 reg = fw_health->regs[i];
                        return -ERANGE;
                fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
        }
 +      bp->fw_health->status_reliable = true;
        if (reg_base == 0xffffffff)
                return 0;
  
@@@ -8365,11 -8304,11 +8365,11 @@@ static int bnxt_alloc_rfs_vnics(struct 
  #endif
  }
  
 -/* Allow PF and VF with default VLAN to be in promiscuous mode */
 +/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
  static bool bnxt_promisc_ok(struct bnxt *bp)
  {
  #ifdef CONFIG_BNXT_SRIOV
 -      if (BNXT_VF(bp) && !bp->vf.vlan)
 +      if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
                return false;
  #endif
        return true;
@@@ -8466,7 -8405,7 +8466,7 @@@ static int bnxt_init_chip(struct bnxt *
        if (bp->dev->flags & IFF_BROADCAST)
                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
  
 -      if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
 +      if (bp->dev->flags & IFF_PROMISC)
                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
  
        if (bp->dev->flags & IFF_ALLMULTI) {
@@@ -8617,18 -8556,10 +8617,18 @@@ static void bnxt_setup_inta(struct bnx
        bp->irq_tbl[0].handler = bnxt_inta;
  }
  
 +static int bnxt_init_int_mode(struct bnxt *bp);
 +
  static int bnxt_setup_int_mode(struct bnxt *bp)
  {
        int rc;
  
 +      if (!bp->irq_tbl) {
 +              rc = bnxt_init_int_mode(bp);
 +              if (rc || !bp->irq_tbl)
 +                      return rc ?: -ENODEV;
 +      }
 +
        if (bp->flags & BNXT_FLAG_USING_MSIX)
                bnxt_setup_msix(bp);
        else
@@@ -8813,7 -8744,7 +8813,7 @@@ static int bnxt_init_inta(struct bnxt *
  
  static int bnxt_init_int_mode(struct bnxt *bp)
  {
 -      int rc = 0;
 +      int rc = -ENODEV;
  
        if (bp->flags & BNXT_FLAG_MSIX_CAP)
                rc = bnxt_init_msix(bp);
@@@ -9100,9 -9031,8 +9100,9 @@@ static char *bnxt_report_fec(struct bnx
  static void bnxt_report_link(struct bnxt *bp)
  {
        if (bp->link_info.link_up) {
 -              const char *duplex;
 +              const char *signal = "";
                const char *flow_ctrl;
 +              const char *duplex;
                u32 speed;
                u16 fec;
  
                        flow_ctrl = "ON - receive";
                else
                        flow_ctrl = "none";
 -              netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
 -                          speed, duplex, flow_ctrl);
 -              if (bp->flags & BNXT_FLAG_EEE_CAP)
 +              if (bp->link_info.phy_qcfg_resp.option_flags &
 +                  PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
 +                      u8 sig_mode = bp->link_info.active_fec_sig_mode &
 +                                    PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
 +                      switch (sig_mode) {
 +                      case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
 +                              signal = "(NRZ) ";
 +                              break;
 +                      case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
 +                              signal = "(PAM4) ";
 +                              break;
 +                      default:
 +                              break;
 +                      }
 +              }
 +              netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
 +                          speed, signal, duplex, flow_ctrl);
 +              if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
                        netdev_info(bp->dev, "EEE is %s\n",
                                    bp->eee.eee_active ? "active" :
                                                         "not active");
@@@ -9173,6 -9088,10 +9173,6 @@@ static int bnxt_hwrm_phy_qcaps(struct b
        struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
        struct bnxt_link_info *link_info = &bp->link_info;
  
 -      bp->flags &= ~BNXT_FLAG_EEE_CAP;
 -      if (bp->test_info)
 -              bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
 -                                        BNXT_TEST_FL_AN_PHY_LPBK);
        if (bp->hwrm_spec_code < 0x10201)
                return 0;
  
        if (rc)
                goto hwrm_phy_qcaps_exit;
  
 +      bp->phy_flags = resp->flags;
        if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
                struct ethtool_eee *eee = &bp->eee;
                u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
  
 -              bp->flags |= BNXT_FLAG_EEE_CAP;
                eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
                bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
                                 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
                bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
                                 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
        }
 -      if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
 -              if (bp->test_info)
 -                      bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
 -      }
 -      if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
 -              if (bp->test_info)
 -                      bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
 -      }
 -      if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
 -              if (BNXT_PF(bp))
 -                      bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
 -      }
 -      if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET)
 -              bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
  
        if (bp->hwrm_spec_code >= 0x10a01) {
                if (bnxt_phy_qcaps_no_speed(resp)) {
@@@ -9284,7 -9217,7 +9284,7 @@@ int bnxt_update_link(struct bnxt *bp, b
                              PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
        link_info->module_status = resp->module_status;
  
 -      if (bp->flags & BNXT_FLAG_EEE_CAP) {
 +      if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
                struct ethtool_eee *eee = &bp->eee;
                u16 fw_speeds;
  
@@@ -9520,8 -9453,7 +9520,8 @@@ static int bnxt_hwrm_shutdown_link(stru
        if (!BNXT_SINGLE_PF(bp))
                return 0;
  
 -      if (pci_num_vf(bp->pdev))
 +      if (pci_num_vf(bp->pdev) &&
 +          !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
                return 0;
  
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
@@@ -9554,10 -9486,9 +9554,10 @@@ static int bnxt_try_recover_fw(struct b
  
                mutex_lock(&bp->hwrm_cmd_lock);
                do {
 -                      rc = __bnxt_hwrm_ver_get(bp, true);
                        sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
 -                      if (!sts || !BNXT_FW_IS_BOOTING(sts))
 +                      rc = __bnxt_hwrm_ver_get(bp, true);
 +                      if (!BNXT_FW_IS_BOOTING(sts) &&
 +                          !BNXT_FW_IS_RECOVERING(sts))
                                break;
                        retry++;
                } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
@@@ -9583,8 -9514,7 +9583,8 @@@ static int bnxt_hwrm_if_change(struct b
  {
        struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
        struct hwrm_func_drv_if_change_input req = {0};
 -      bool resc_reinit = false, fw_reset = false;
 +      bool fw_reset = !bp->irq_tbl;
 +      bool resc_reinit = false;
        int rc, retry = 0;
        u32 flags = 0;
  
        if (rc)
                return rc;
  
 -      if (!up)
 +      if (!up) {
 +              bnxt_inv_fw_health_reg(bp);
                return 0;
 +      }
  
        if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
                resc_reinit = true;
        if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
                fw_reset = true;
 +      else if (bp->fw_health && !bp->fw_health->status_reliable)
 +              bnxt_try_map_fw_health_reg(bp);
  
        if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
                netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
 +              set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
                return -ENODEV;
        }
        if (resc_reinit || fw_reset) {
                if (fw_reset) {
 +                      set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
                        if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
                                bnxt_ulp_stop(bp);
                        bnxt_free_ctx_mem(bp);
                        bnxt_dcb_free(bp);
                        rc = bnxt_fw_init_one(bp);
                        if (rc) {
 +                              clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
                                set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
                                return rc;
                        }
                        bnxt_clear_int_mode(bp);
                        rc = bnxt_init_int_mode(bp);
                        if (rc) {
 +                              clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
                                netdev_err(bp->dev, "init int mode failed\n");
                                return rc;
                        }
 -                      set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
                }
                if (BNXT_NEW_RM(bp)) {
                        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  
                        rc = bnxt_hwrm_func_resc_qcaps(bp, true);
 +                      if (rc)
 +                              netdev_err(bp->dev, "resc_qcaps failed\n");
 +
                        hw_resc->resv_cp_rings = 0;
                        hw_resc->resv_stat_ctxs = 0;
                        hw_resc->resv_irqs = 0;
                        }
                }
        }
 -      return 0;
 +      return rc;
  }
  
  static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
@@@ -9806,9 -9726,7 +9806,9 @@@ static ssize_t bnxt_show_temp(struct de
        if (!rc)
                len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
        mutex_unlock(&bp->hwrm_cmd_lock);
 -      return rc ?: len;
 +      if (rc)
 +              return rc;
 +      return len;
  }
  static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
  
@@@ -9865,7 -9783,7 +9865,7 @@@ static bool bnxt_eee_config_ok(struct b
        struct ethtool_eee *eee = &bp->eee;
        struct bnxt_link_info *link_info = &bp->link_info;
  
 -      if (!(bp->flags & BNXT_FLAG_EEE_CAP))
 +      if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
                return true;
  
        if (eee->eee_enabled) {
@@@ -9972,9 -9890,6 +9972,9 @@@ static int bnxt_reinit_after_abort(stru
        if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
                return -EBUSY;
  
 +      if (bp->dev->reg_state == NETREG_UNREGISTERED)
 +              return -ENODEV;
 +
        rc = bnxt_fw_init_one(bp);
        if (!rc) {
                bnxt_clear_int_mode(bp);
@@@ -10512,7 -10427,7 +10512,7 @@@ static void bnxt_set_rx_mode(struct net
                  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
                  CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
  
 -      if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
 +      if (dev->flags & IFF_PROMISC)
                mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
  
        uc_update = bnxt_uc_list_updated(bp);
@@@ -10588,9 -10503,6 +10588,9 @@@ static int bnxt_cfg_rx_mode(struct bnx
        }
  
  skip_uc:
 +      if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
 +          !bnxt_promisc_ok(bp))
 +              vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
        rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
        if (rc && vnic->mc_list_count) {
                netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
@@@ -10785,40 -10697,6 +10785,40 @@@ static int bnxt_set_features(struct net
        return rc;
  }
  
 +static netdev_features_t bnxt_features_check(struct sk_buff *skb,
 +                                           struct net_device *dev,
 +                                           netdev_features_t features)
 +{
 +      struct bnxt *bp;
 +      __be16 udp_port;
 +      u8 l4_proto = 0;
 +
 +      features = vlan_features_check(skb, features);
 +      if (!skb->encapsulation)
 +              return features;
 +
 +      switch (vlan_get_protocol(skb)) {
 +      case htons(ETH_P_IP):
 +              l4_proto = ip_hdr(skb)->protocol;
 +              break;
 +      case htons(ETH_P_IPV6):
 +              l4_proto = ipv6_hdr(skb)->nexthdr;
 +              break;
 +      default:
 +              return features;
 +      }
 +
 +      if (l4_proto != IPPROTO_UDP)
 +              return features;
 +
 +      bp = netdev_priv(dev);
 +      /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
 +      udp_port = udp_hdr(skb)->dest;
 +      if (udp_port == bp->vxlan_port || udp_port == bp->nge_port)
 +              return features;
 +      return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 +}
 +
  int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
                         u32 *reg_buf)
  {
@@@ -11144,7 -11022,6 +11144,7 @@@ static void bnxt_fw_reset_close(struct 
                pci_disable_device(bp->pdev);
        }
        __bnxt_close_nic(bp, true, false);
 +      bnxt_vf_reps_free(bp);
        bnxt_clear_int_mode(bp);
        bnxt_hwrm_func_drv_unrgtr(bp);
        if (pci_is_enabled(bp->pdev))
@@@ -11750,7 -11627,7 +11750,7 @@@ static void bnxt_reset_all(struct bnxt 
                req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
                req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
                rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 -              if (rc)
 +              if (rc != -ENODEV)
                        netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
        }
        bp->fw_reset_timestamp = jiffies;
@@@ -11833,20 -11710,28 +11833,20 @@@ static void bnxt_fw_reset_task(struct w
                bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
                return;
        case BNXT_FW_RESET_STATE_ENABLE_DEV:
 -              if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
 -                      u32 val;
 -
 -                      if (!bp->fw_reset_min_dsecs) {
 -                              u16 val;
 -
 -                              pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID,
 -                                                   &val);
 -                              if (val == 0xffff) {
 -                                      if (bnxt_fw_reset_timeout(bp)) {
 -                                              netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
 -                                              goto fw_reset_abort;
 -                                      }
 -                                      bnxt_queue_fw_reset_work(bp, HZ / 1000);
 -                                      return;
 +              bnxt_inv_fw_health_reg(bp);
 +              if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
 +                  !bp->fw_reset_min_dsecs) {
 +                      u16 val;
 +
 +                      pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
 +                      if (val == 0xffff) {
 +                              if (bnxt_fw_reset_timeout(bp)) {
 +                                      netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
 +                                      goto fw_reset_abort;
                                }
 +                              bnxt_queue_fw_reset_work(bp, HZ / 1000);
 +                              return;
                        }
 -                      val = bnxt_fw_health_readl(bp,
 -                                                 BNXT_FW_RESET_INPROG_REG);
 -                      if (val)
 -                              netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
 -                                          val);
                }
                clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
                if (pci_enable_device(bp->pdev)) {
                bnxt_ulp_start(bp, rc);
                if (!rc)
                        bnxt_reenable_sriov(bp);
 +              bnxt_vf_reps_alloc(bp);
 +              bnxt_vf_reps_open(bp);
                bnxt_dl_health_recovery_done(bp);
                bnxt_dl_health_status_update(bp, true);
                rtnl_unlock();
@@@ -12326,13 -12209,10 +12326,13 @@@ static int bnxt_udp_tunnel_sync(struct 
        unsigned int cmd;
  
        udp_tunnel_nic_get_port(netdev, table, 0, &ti);
 -      if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
 +      if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
 +              bp->vxlan_port = ti.port;
                cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
 -      else
 +      } else {
 +              bp->nge_port = ti.port;
                cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
 +      }
  
        if (ti.port)
                return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
@@@ -12432,7 -12312,6 +12432,7 @@@ static const struct net_device_ops bnxt
        .ndo_change_mtu         = bnxt_change_mtu,
        .ndo_fix_features       = bnxt_fix_features,
        .ndo_set_features       = bnxt_set_features,
 +      .ndo_features_check     = bnxt_features_check,
        .ndo_tx_timeout         = bnxt_tx_timeout,
  #ifdef CONFIG_BNXT_SRIOV
        .ndo_get_vf_config      = bnxt_get_vf_config,
@@@ -12501,17 -12380,12 +12501,17 @@@ static int bnxt_probe_phy(struct bnxt *
        int rc = 0;
        struct bnxt_link_info *link_info = &bp->link_info;
  
 +      bp->phy_flags = 0;
        rc = bnxt_hwrm_phy_qcaps(bp);
        if (rc) {
                netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
                           rc);
                return rc;
        }
 +      if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
 +              bp->dev->priv_flags |= IFF_SUPP_NOFCS;
 +      else
 +              bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
        if (!fw_dflt)
                return 0;
  
@@@ -12985,8 -12859,6 +12985,6 @@@ static int bnxt_init_one(struct pci_de
        if (!BNXT_CHIP_P4_PLUS(bp))
                bp->flags |= BNXT_FLAG_DOUBLE_DB;
  
-       bp->ulp_probe = bnxt_ulp_probe;
        rc = bnxt_init_mac_addr(bp);
        if (rc) {
                dev_err(&pdev->dev, "Unable to initialize mac address.\n");
                                   rc);
        }
  
 +      bnxt_inv_fw_health_reg(bp);
        bnxt_dl_register(bp);
  
        rc = register_netdev(dev);
@@@ -671,10 -671,6 +671,10 @@@ struct nqe_cn 
  #define HWRM_MIN_TIMEOUT              25
  #define HWRM_MAX_TIMEOUT              40
  
 +#define HWRM_WAIT_MUST_ABORT(bp, req)                                 \
 +      (le16_to_cpu((req)->req_type) != HWRM_VER_GET &&                \
 +       !bnxt_is_fw_healthy(bp))
 +
  #define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ?  \
        ((n) * HWRM_SHORT_MIN_TIMEOUT) :                                \
        (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT +          \
@@@ -1341,6 -1337,9 +1341,6 @@@ struct bnxt_led_info 
  
  struct bnxt_test_info {
        u8 offline_mask;
 -      u8 flags;
 -#define BNXT_TEST_FL_EXT_LPBK         0x1
 -#define BNXT_TEST_FL_AN_PHY_LPBK      0x2
        u16 timeout;
        char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
  };
@@@ -1561,7 -1560,6 +1561,7 @@@ struct bnxt_fw_reporter_ctx 
  #define BNXT_FW_STATUS_HEALTH_MSK     0xffff
  #define BNXT_FW_STATUS_HEALTHY                0x8000
  #define BNXT_FW_STATUS_SHUTDOWN               0x100000
 +#define BNXT_FW_STATUS_RECOVERING     0x400000
  
  #define BNXT_FW_IS_HEALTHY(sts)               (((sts) & BNXT_FW_STATUS_HEALTH_MSK) ==\
                                         BNXT_FW_STATUS_HEALTHY)
  #define BNXT_FW_IS_ERR(sts)           (((sts) & BNXT_FW_STATUS_HEALTH_MSK) > \
                                         BNXT_FW_STATUS_HEALTHY)
  
 +#define BNXT_FW_IS_RECOVERING(sts)    (BNXT_FW_IS_ERR(sts) &&                \
 +                                       ((sts) & BNXT_FW_STATUS_RECOVERING))
 +
  #define BNXT_FW_RETRY                 5
  #define BNXT_FW_IF_RETRY              10
  
@@@ -1690,6 -1685,7 +1690,6 @@@ struct bnxt 
        #define BNXT_FLAG_SHARED_RINGS  0x200
        #define BNXT_FLAG_PORT_STATS    0x400
        #define BNXT_FLAG_UDP_RSS_CAP   0x800
 -      #define BNXT_FLAG_EEE_CAP       0x1000
        #define BNXT_FLAG_NEW_RSS_CAP   0x2000
        #define BNXT_FLAG_WOL_CAP       0x4000
        #define BNXT_FLAG_ROCEV1_CAP    0x8000
  #define BNXT_NPAR(bp)         ((bp)->port_partition_type)
  #define BNXT_MH(bp)           ((bp)->flags & BNXT_FLAG_MULTI_HOST)
  #define BNXT_SINGLE_PF(bp)    (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
 +#define BNXT_SH_PORT_CFG_OK(bp)       (BNXT_PF(bp) &&                         \
 +                               ((bp)->phy_flags & BNXT_PHY_FL_SHARED_PORT_CFG))
  #define BNXT_PHY_CFG_ABLE(bp) ((BNXT_SINGLE_PF(bp) ||                 \
 -                                ((bp)->fw_cap & BNXT_FW_CAP_SHARED_PORT_CFG)) && \
 +                                BNXT_SH_PORT_CFG_OK(bp)) &&           \
                                 (bp)->link_info.phy_state == BNXT_PHY_STATE_ENABLED)
  #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
  #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
        (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
  
        struct bnxt_en_dev      *edev;
-       struct bnxt_en_dev *    (*ulp_probe)(struct net_device *);
  
        struct bnxt_napi        **bnapi;
  
        #define BNXT_FW_CAP_EXT_STATS_SUPPORTED         0x00040000
        #define BNXT_FW_CAP_ERR_RECOVER_RELOAD          0x00100000
        #define BNXT_FW_CAP_HOT_RESET                   0x00200000
 -      #define BNXT_FW_CAP_SHARED_PORT_CFG             0x00400000
        #define BNXT_FW_CAP_VLAN_RX_STRIP               0x01000000
        #define BNXT_FW_CAP_VLAN_TX_INSERT              0x02000000
        #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED      0x04000000
 -      #define BNXT_FW_CAP_PORT_STATS_NO_RESET         0x10000000
        #define BNXT_FW_CAP_RING_MONITOR                0x40000000
  
  #define BNXT_NEW_RM(bp)               ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
  
        u16                     vxlan_fw_dst_port_id;
        u16                     nge_fw_dst_port_id;
 +      __be16                  vxlan_port;
 +      __be16                  nge_port;
        u8                      port_partition_type;
        u8                      port_count;
        u16                     br_mode;
        u32                     lpi_tmr_lo;
        u32                     lpi_tmr_hi;
  
 +      /* copied from flags in hwrm_port_phy_qcaps_output */
 +      u8                      phy_flags;
 +#define BNXT_PHY_FL_EEE_CAP           PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED
 +#define BNXT_PHY_FL_EXT_LPBK          PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED
 +#define BNXT_PHY_FL_AN_PHY_LPBK               PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED
 +#define BNXT_PHY_FL_SHARED_PORT_CFG   PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED
 +#define BNXT_PHY_FL_PORT_STATS_NO_RESET       PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET
 +#define BNXT_PHY_FL_NO_PHY_LPBK               PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
 +#define BNXT_PHY_FL_FW_MANAGED_LKDN   PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN
 +#define BNXT_PHY_FL_NO_FCS            PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS
 +
        u8                      num_tests;
        struct bnxt_test_info   *test_info;
  
@@@ -2245,7 -2227,6 +2244,7 @@@ int bnxt_hwrm_set_link_setting(struct b
  int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
  int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
  int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
 +bool bnxt_is_fw_healthy(struct bnxt *bp);
  int bnxt_hwrm_fw_set_time(struct bnxt *);
  int bnxt_open_nic(struct bnxt *, bool, bool);
  int bnxt_half_open_nic(struct bnxt *bp);
@@@ -33,7 -33,6 +33,7 @@@
  #include <rdma/ib_verbs.h>
  #include <linux/mlx5/fs.h>
  #include "en.h"
 +#include "en/params.h"
  #include "ipoib.h"
  
  #define IB_DEFAULT_Q_KEY   0xb1b
@@@ -234,7 -233,6 +234,7 @@@ int mlx5i_create_underlay_qp(struct mlx
        }
  
        qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 +      MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(priv->mdev));
        MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
        MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
        MLX5_SET(qpc, qpc, ulp_stateless_offload_mode,
@@@ -373,7 -371,6 +373,7 @@@ static void mlx5i_destroy_flow_steering
  static int mlx5i_init_rx(struct mlx5e_priv *priv)
  {
        struct mlx5_core_dev *mdev = priv->mdev;
 +      u16 max_nch = priv->max_nch;
        int err;
  
        mlx5e_create_q_counters(priv);
        if (err)
                goto err_close_drop_rq;
  
 -      err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
 +      err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch);
        if (err)
                goto err_destroy_indirect_rqts;
  
        if (err)
                goto err_destroy_direct_rqts;
  
 -      err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
 +      err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch);
        if (err)
                goto err_destroy_indirect_tirs;
  
        return 0;
  
  err_destroy_direct_tirs:
 -      mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
 +      mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
  err_destroy_indirect_tirs:
        mlx5e_destroy_indirect_tirs(priv);
  err_destroy_direct_rqts:
 -      mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
 +      mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
  err_destroy_indirect_rqts:
        mlx5e_destroy_rqt(priv, &priv->indir_rqt);
  err_close_drop_rq:
@@@ -423,12 -420,10 +423,12 @@@ err_destroy_q_counters
  
  static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
  {
 +      u16 max_nch = priv->max_nch;
 +
        mlx5i_destroy_flow_steering(priv);
 -      mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
 +      mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
        mlx5e_destroy_indirect_tirs(priv);
 -      mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
 +      mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch);
        mlx5e_destroy_rqt(priv, &priv->indir_rqt);
        mlx5e_close_drop_rq(&priv->drop_rq);
        mlx5e_destroy_q_counters(priv);
@@@ -473,7 -468,6 +473,7 @@@ static const struct mlx5e_profile mlx5i
        .rq_groups         = MLX5E_NUM_RQ_GROUPS(REGULAR),
        .stats_grps        = mlx5i_stats_grps,
        .stats_grps_num    = mlx5i_stats_grps_num,
 +      .rx_ptp_support    = false,
  };
  
  /* mlx5i netdev NDos */
  static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
  {
        struct mlx5e_priv *priv = mlx5i_epriv(netdev);
 -      struct mlx5e_channels new_channels = {};
 -      struct mlx5e_params *params;
 +      struct mlx5e_params new_params;
        int err = 0;
  
        mutex_lock(&priv->state_lock);
  
 -      params = &priv->channels.params;
 -
 -      if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
 -              params->sw_mtu = new_mtu;
 -              netdev->mtu = params->sw_mtu;
 -              goto out;
 -      }
 -
 -      new_channels.params = *params;
 -      new_channels.params.sw_mtu = new_mtu;
 +      new_params = priv->channels.params;
 +      new_params.sw_mtu = new_mtu;
  
 -      err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
 +      err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
        if (err)
                goto out;
  
 -      netdev->mtu = new_channels.params.sw_mtu;
 +      netdev->mtu = new_params.sw_mtu;
  
  out:
        mutex_unlock(&priv->state_lock);
@@@ -691,7 -694,6 +691,7 @@@ static int mlx5i_check_required_hca_cap
  static void mlx5_rdma_netdev_free(struct net_device *netdev)
  {
        struct mlx5e_priv *priv = mlx5i_epriv(netdev);
 +      struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5i_priv *ipriv = priv->ppriv;
        const struct mlx5e_profile *profile = priv->profile;
  
  
        if (!ipriv->sub_interface) {
                mlx5i_pkey_qpn_ht_cleanup(netdev);
 -              mlx5e_destroy_mdev_resources(priv->mdev);
 +              mlx5e_destroy_mdev_resources(mdev);
        }
  }
  
  static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev)
  {
 -      return mdev->mlx5e_res.pdn != 0;
 +      return mdev->mlx5e_res.hw_objs.pdn != 0;
  }
  
  static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev)
        return &mlx5i_nic_profile;
  }
  
- static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
+ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
                              struct net_device *netdev, void *param)
  {
        struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param;
@@@ -55,6 -55,10 +55,6 @@@ void mlx5_cleanup_reserved_gids(struct 
  
  int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count)
  {
 -      if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
 -              mlx5_core_err(dev, "Cannot reserve GIDs when interfaces are up\n");
 -              return -EPERM;
 -      }
        if (dev->roce.reserved_gids.start < count) {
                mlx5_core_warn(dev, "GID table exhausted attempting to reserve %d more GIDs\n",
                               count);
@@@ -75,6 -79,7 +75,6 @@@
  
  void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count)
  {
 -      WARN(test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state), "Unreserving GIDs when interfaces are up");
        WARN(count > dev->roce.reserved_gids.count, "Unreserving %u GIDs when only %u reserved",
             count, dev->roce.reserved_gids.count);
  
  int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index)
  {
        int end = dev->roce.reserved_gids.start +
 -                dev->roce.reserved_gids.count;
 +                dev->roce.reserved_gids.count - 1;
        int index = 0;
  
 -      index = ida_simple_get(&dev->roce.reserved_gids.ida,
 -                             dev->roce.reserved_gids.start, end,
 -                             GFP_KERNEL);
 +      index = ida_alloc_range(&dev->roce.reserved_gids.ida,
 +                              dev->roce.reserved_gids.start, end,
 +                              GFP_KERNEL);
        if (index < 0)
                return index;
  
  void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index)
  {
        mlx5_core_dbg(dev, "Freeing reserved GID %u\n", gid_index);
 -      ida_simple_remove(&dev->roce.reserved_gids.ida, gid_index);
 +      ida_free(&dev->roce.reserved_gids.ida, gid_index);
  }
  
  unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev)
@@@ -137,10 -142,10 +137,10 @@@ int mlx5_core_roce_gid_set(struct mlx5_
                }
  
                ether_addr_copy(addr_mac, mac);
-               MLX5_SET_RA(in_addr, roce_version, roce_version);
-               MLX5_SET_RA(in_addr, roce_l3_type, roce_l3_type);
                memcpy(addr_l3_addr, gid, gidsz);
        }
+       MLX5_SET_RA(in_addr, roce_version, roce_version);
+       MLX5_SET_RA(in_addr, roce_l3_type, roce_l3_type);
  
        if (MLX5_CAP_GEN(dev, num_vhca_ports) > 0)
                MLX5_SET(set_roce_address_in, in, vhca_port_num, port_num);
@@@ -127,7 -127,6 +127,7 @@@ enum 
        MLX5_REG_PELC            = 0x500e,
        MLX5_REG_PVLC            = 0x500f,
        MLX5_REG_PCMR            = 0x5041,
 +      MLX5_REG_PDDR            = 0x5031,
        MLX5_REG_PMLP            = 0x5002,
        MLX5_REG_PPLM            = 0x5023,
        MLX5_REG_PCAM            = 0x507f,
@@@ -517,8 -516,8 +517,8 @@@ struct mlx5_rate_limit 
  
  struct mlx5_rl_entry {
        u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
 -      u16 index;
        u64 refcount;
 +      u16 index;
        u16 uid;
        u8 dedicated : 1;
  };
@@@ -530,7 -529,6 +530,7 @@@ struct mlx5_rl_table 
        u32                     max_rate;
        u32                     min_rate;
        struct mlx5_rl_entry   *rl_entry;
 +      u64 refcount;
  };
  
  struct mlx5_core_roce {
@@@ -645,14 -643,10 +645,14 @@@ struct mlx5_td 
  };
  
  struct mlx5e_resources {
 -      u32                        pdn;
 -      struct mlx5_td             td;
 -      struct mlx5_core_mkey      mkey;
 -      struct mlx5_sq_bfreg       bfreg;
 +      struct mlx5e_hw_objs {
 +              u32                        pdn;
 +              struct mlx5_td             td;
 +              struct mlx5_core_mkey      mkey;
 +              struct mlx5_sq_bfreg       bfreg;
 +      } hw_objs;
 +      struct devlink_port dl_port;
 +      struct net_device *uplink_netdev;
  };
  
  enum mlx5_sw_icm_type {
@@@ -1236,7 -1230,7 +1236,7 @@@ enum 
        MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
  };
  
- static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
+ static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
  {
        struct devlink *devlink = priv_to_devlink(dev);
        union devlink_param_value val;
@@@ -133,6 -133,7 +133,7 @@@ enum 
        MLX5_CMD_OP_PAGE_FAULT_RESUME             = 0x204,
        MLX5_CMD_OP_ALLOC_MEMIC                   = 0x205,
        MLX5_CMD_OP_DEALLOC_MEMIC                 = 0x206,
+       MLX5_CMD_OP_MODIFY_MEMIC                  = 0x207,
        MLX5_CMD_OP_CREATE_EQ                     = 0x301,
        MLX5_CMD_OP_DESTROY_EQ                    = 0x302,
        MLX5_CMD_OP_QUERY_EQ                      = 0x303,
@@@ -437,11 -438,11 +438,11 @@@ struct mlx5_ifc_flow_table_prop_layout_
        u8         reserved_at_60[0x18];
        u8         log_max_ft_num[0x8];
  
 -      u8         reserved_at_80[0x18];
 +      u8         reserved_at_80[0x10];
 +      u8         log_max_flow_counter[0x8];
        u8         log_max_destination[0x8];
  
 -      u8         log_max_flow_counter[0x8];
 -      u8         reserved_at_a8[0x10];
 +      u8         reserved_at_a0[0x18];
        u8         log_max_flow[0x8];
  
        u8         reserved_at_c0[0x40];
@@@ -622,19 -623,7 +623,19 @@@ struct mlx5_ifc_fte_match_set_misc3_bit
  
        u8         geneve_tlv_option_0_data[0x20];
  
 -      u8         reserved_at_140[0xc0];
 +      u8         gtpu_teid[0x20];
 +
 +      u8         gtpu_msg_type[0x8];
 +      u8         gtpu_msg_flags[0x8];
 +      u8         reserved_at_170[0x10];
 +
 +      u8         gtpu_dw_2[0x20];
 +
 +      u8         gtpu_first_ext_dw_0[0x20];
 +
 +      u8         gtpu_dw_0[0x20];
 +
 +      u8         reserved_at_1e0[0x20];
  };
  
  struct mlx5_ifc_fte_match_set_misc4_bits {
@@@ -961,9 -950,7 +962,9 @@@ struct mlx5_ifc_roce_cap_bits 
        u8         roce_apm[0x1];
        u8         reserved_at_1[0x3];
        u8         sw_r_roce_src_udp_port[0x1];
 -      u8         reserved_at_5[0x19];
 +      u8         fl_rc_qp_when_roce_disabled[0x1];
 +      u8         fl_rc_qp_when_roce_enabled[0x1];
 +      u8         reserved_at_7[0x17];
        u8         qp_ts_format[0x2];
  
        u8         reserved_at_20[0x60];
@@@ -1031,7 -1018,11 +1032,11 @@@ struct mlx5_ifc_device_mem_cap_bits 
  
        u8         header_modify_sw_icm_start_address[0x40];
  
-       u8         reserved_at_180[0x680];
+       u8         reserved_at_180[0x80];
+       u8         memic_operations[0x20];
+       u8         reserved_at_220[0x5e0];
  };
  
  struct mlx5_ifc_device_event_cap_bits {
@@@ -1251,17 -1242,9 +1256,17 @@@ enum 
  
  enum {
        MLX5_FLEX_PARSER_GENEVE_ENABLED         = 1 << 3,
 +      MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED  = 1 << 4,
 +      mlx5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED  = 1 << 5,
        MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED      = 1 << 7,
        MLX5_FLEX_PARSER_ICMP_V4_ENABLED        = 1 << 8,
        MLX5_FLEX_PARSER_ICMP_V6_ENABLED        = 1 << 9,
 +      MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED = 1 << 10,
 +      MLX5_FLEX_PARSER_GTPU_ENABLED           = 1 << 11,
 +      MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED      = 1 << 16,
 +      MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED = 1 << 17,
 +      MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED      = 1 << 18,
 +      MLX5_FLEX_PARSER_GTPU_TEID_ENABLED      = 1 << 19,
  };
  
  enum {
@@@ -1319,9 -1302,7 +1324,9 @@@ struct mlx5_ifc_cmd_hca_cap_bits 
        u8         log_max_srq_sz[0x8];
        u8         log_max_qp_sz[0x8];
        u8         event_cap[0x1];
 -      u8         reserved_at_91[0x7];
 +      u8         reserved_at_91[0x2];
 +      u8         isolate_vl_tc_new[0x1];
 +      u8         reserved_at_94[0x4];
        u8         prio_tag_required[0x1];
        u8         reserved_at_99[0x2];
        u8         log_max_qp[0x5];
        u8         cqe_compression_timeout[0x10];
        u8         cqe_compression_max_num[0x10];
  
 -      u8         reserved_at_5e0[0x10];
 +      u8         reserved_at_5e0[0x8];
 +      u8         flex_parser_id_gtpu_dw_0[0x4];
 +      u8         reserved_at_5ec[0x4];
        u8         tag_matching[0x1];
        u8         rndv_offload_rc[0x1];
        u8         rndv_offload_dc[0x1];
        u8         affiliate_nic_vport_criteria[0x8];
        u8         native_port_num[0x8];
        u8         num_vhca_ports[0x8];
 -      u8         reserved_at_618[0x6];
 +      u8         flex_parser_id_gtpu_teid[0x4];
 +      u8         reserved_at_61c[0x2];
        u8         sw_owner_id[0x1];
        u8         reserved_at_61f[0x1];
  
        u8         reserved_at_6e0[0x10];
        u8         sf_base_id[0x10];
  
 -      u8         reserved_at_700[0x8];
 +      u8         flex_parser_id_gtpu_dw_2[0x4];
 +      u8         flex_parser_id_gtpu_first_ext_dw_0[0x4];
        u8         num_total_dynamic_vf_msix[0x18];
        u8         reserved_at_720[0x14];
        u8         dynamic_msix_table_size[0xc];
@@@ -2946,8 -2923,7 +2951,8 @@@ struct mlx5_ifc_qpc_bits 
        u8         state[0x4];
        u8         lag_tx_port_affinity[0x4];
        u8         st[0x8];
 -      u8         reserved_at_10[0x3];
 +      u8         reserved_at_10[0x2];
 +      u8         isolate_vl_tc[0x1];
        u8         pm_state[0x2];
        u8         reserved_at_15[0x1];
        u8         req_e2e_credit_mode[0x2];
@@@ -8876,8 -8852,6 +8881,8 @@@ struct mlx5_ifc_pplm_reg_bits 
  
        u8         fec_override_admin_100g_2x[0x10];
        u8         fec_override_admin_50g_1x[0x10];
 +
 +      u8         reserved_at_140[0x140];
  };
  
  struct mlx5_ifc_ppcnt_reg_bits {
@@@ -9985,53 -9959,6 +9990,53 @@@ struct mlx5_ifc_mirc_reg_bits 
        u8         reserved_at_20[0x20];
  };
  
 +struct mlx5_ifc_pddr_monitor_opcode_bits {
 +      u8         reserved_at_0[0x10];
 +      u8         monitor_opcode[0x10];
 +};
 +
 +union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits {
 +      struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode;
 +      u8         reserved_at_0[0x20];
 +};
 +
 +enum {
 +      /* Monitor opcodes */
 +      MLX5_PDDR_REG_TRBLSH_GROUP_OPCODE_MONITOR = 0x0,
 +};
 +
 +struct mlx5_ifc_pddr_troubleshooting_page_bits {
 +      u8         reserved_at_0[0x10];
 +      u8         group_opcode[0x10];
 +
 +      union mlx5_ifc_pddr_troubleshooting_page_status_opcode_auto_bits status_opcode;
 +
 +      u8         reserved_at_40[0x20];
 +
 +      u8         status_message[59][0x20];
 +};
 +
 +union mlx5_ifc_pddr_reg_page_data_auto_bits {
 +      struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page;
 +      u8         reserved_at_0[0x7c0];
 +};
 +
 +enum {
 +      MLX5_PDDR_REG_PAGE_SELECT_TROUBLESHOOTING_INFO_PAGE      = 0x1,
 +};
 +
 +struct mlx5_ifc_pddr_reg_bits {
 +      u8         reserved_at_0[0x8];
 +      u8         local_port[0x8];
 +      u8         pnat[0x2];
 +      u8         reserved_at_12[0xe];
 +
 +      u8         reserved_at_20[0x18];
 +      u8         page_select[0x8];
 +
 +      union mlx5_ifc_pddr_reg_page_data_auto_bits page_data;
 +};
 +
  union mlx5_ifc_ports_control_registers_document_bits {
        struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
        struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
        struct mlx5_ifc_pamp_reg_bits pamp_reg;
        struct mlx5_ifc_paos_reg_bits paos_reg;
        struct mlx5_ifc_pcap_reg_bits pcap_reg;
 +      struct mlx5_ifc_pddr_monitor_opcode_bits pddr_monitor_opcode;
 +      struct mlx5_ifc_pddr_reg_bits pddr_reg;
 +      struct mlx5_ifc_pddr_troubleshooting_page_bits pddr_troubleshooting_page;
        struct mlx5_ifc_peir_reg_bits peir_reg;
        struct mlx5_ifc_pelc_reg_bits pelc_reg;
        struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
@@@ -10296,7 -10220,7 +10301,7 @@@ struct mlx5_ifc_pbmc_reg_bits 
  
        struct mlx5_ifc_bufferx_reg_bits buffer[10];
  
 -      u8         reserved_at_2e0[0x40];
 +      u8         reserved_at_2e0[0x80];
  };
  
  struct mlx5_ifc_qtct_reg_bits {
@@@ -10498,6 -10422,41 +10503,41 @@@ struct mlx5_ifc_destroy_vport_lag_in_bi
        u8         reserved_at_40[0x40];
  };
  
+ enum {
+       MLX5_MODIFY_MEMIC_OP_MOD_ALLOC,
+       MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC,
+ };
+ struct mlx5_ifc_modify_memic_in_bits {
+       u8         opcode[0x10];
+       u8         uid[0x10];
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+       u8         reserved_at_40[0x20];
+       u8         reserved_at_60[0x18];
+       u8         memic_operation_type[0x8];
+       u8         memic_start_addr[0x40];
+       u8         reserved_at_c0[0x140];
+ };
+ struct mlx5_ifc_modify_memic_out_bits {
+       u8         status[0x8];
+       u8         reserved_at_8[0x18];
+       u8         syndrome[0x20];
+       u8         reserved_at_40[0x40];
+       u8         memic_operation_addr[0x40];
+       u8         reserved_at_c0[0x140];
+ };
  struct mlx5_ifc_alloc_memic_in_bits {
        u8         opcode[0x10];
        u8         reserved_at_10[0x10];