Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 14 Dec 2016 21:31:29 +0000 (13:31 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 14 Dec 2016 21:31:29 +0000 (13:31 -0800)
Pull crypto updates from Herbert Xu:
 "Here is the crypto update for 4.10:

  API:
   - add skcipher walk interface
   - add asynchronous compression (acomp) interface
   - fix algif_aed AIO handling of zero buffer

  Algorithms:
   - fix unaligned access in poly1305
   - fix DRBG output to large buffers

  Drivers:
   - add support for iMX6UL to caam
   - fix givenc descriptors (used by IPsec) in caam
   - accelerated SHA256/SHA512 for ARM64 from OpenSSL
   - add SSE CRCT10DIF and CRC32 to ARM/ARM64
   - add AEAD support to Chelsio chcr
   - add Armada 8K support to omap-rng"

* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (148 commits)
  crypto: testmgr - fix overlap in chunked tests again
  crypto: arm/crc32 - accelerated support based on x86 SSE implementation
  crypto: arm64/crc32 - accelerated support based on x86 SSE implementation
  crypto: arm/crct10dif - port x86 SSE implementation to ARM
  crypto: arm64/crct10dif - port x86 SSE implementation to arm64
  crypto: testmgr - add/enhance test cases for CRC-T10DIF
  crypto: testmgr - avoid overlap in chunked tests
  crypto: chcr - checking for IS_ERR() instead of NULL
  crypto: caam - check caam_emi_slow instead of re-lookup platform
  crypto: algif_aead - fix AIO handling of zero buffer
  crypto: aes-ce - Make aes_simd_algs static
  crypto: algif_skcipher - set error code when kcalloc fails
  crypto: caam - make aamalg_desc a proper module
  crypto: caam - pass key buffers with typesafe pointers
  crypto: arm64/aes-ce-ccm - Fix AEAD decryption length
  MAINTAINERS: add crypto headers to crypto entry
  crypt: doc - remove misleading mention of async API
  crypto: doc - fix header file name
  crypto: api - fix comment typo
  crypto: skcipher - Add separate walker for AEAD decryption
  ..

1  2 
Documentation/devicetree/bindings/vendor-prefixes.txt
MAINTAINERS
arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
arch/x86/crypto/aesni-intel_glue.c
crypto/algif_aead.c
crypto/algif_skcipher.c
drivers/crypto/caam/ctrl.c
drivers/crypto/chelsio/chcr_algo.c
drivers/crypto/chelsio/chcr_core.c
drivers/crypto/marvell/hash.c

@@@ -39,7 -39,6 +39,7 @@@ auo   AU Optronics Corporatio
  auvidea Auvidea GmbH
  avago Avago Technologies
  avic  Shanghai AVIC Optoelectronics Co., Ltd.
 +axentia       Axentia Technologies AB
  axis  Axis Communications AB
  boe   BOE Technology Group Co., Ltd.
  bosch Bosch Sensortec GmbH
@@@ -127,7 -126,6 +127,7 @@@ hitex      Hitex Development Tool
  holt  Holt Integrated Circuits, Inc.
  honeywell     Honeywell
  hp    Hewlett Packard
 +holtek        Holtek Semiconductor, Inc.
  i2se  I2SE GmbH
  ibm   International Business Machines (IBM)
  idt   Integrated Device Technologies, Inc.
@@@ -137,6 -135,7 +137,7 @@@ infineon Infineon Technologie
  inforce       Inforce Computing
  ingenic       Ingenic Semiconductor
  innolux       Innolux Corporation
+ inside-secure INSIDE Secure
  intel Intel Corporation
  intercontrol  Inter Control Group
  invensense    InvenSense Inc.
@@@ -162,19 -161,16 +163,19 @@@ lltc    Linear Technology Corporatio
  lsi   LSI Corp. (LSI Logic)
  marvell       Marvell Technology Group Ltd.
  maxim Maxim Integrated Products
 +mcube mCube
  meas  Measurement Specialties
  mediatek      MediaTek Inc.
  melexis       Melexis N.V.
  melfas        MELFAS Inc.
 +memsic        MEMSIC Inc.
  merrii        Merrii Technology Co., Ltd.
  micrel        Micrel Inc.
  microchip     Microchip Technology Inc.
  microcrystal  Micro Crystal AG
  micron        Micron Technology Inc.
  minix MINIX Technology Ltd.
 +miramems      MiraMEMS Sensing Technology Co., Ltd.
  mitsubishi    Mitsubishi Electric Corporation
  mosaixtech    Mosaix Technologies, Inc.
  moxa  Moxa
@@@ -192,7 -188,6 +193,7 @@@ netgear    NETGEA
  netlogic      Broadcom Corporation (formerly NetLogic Microsystems)
  netxeon               Shenzhen Netxeon Technology CO., LTD
  newhaven      Newhaven Display International
 +nvd   New Vision Display
  nintendo      Nintendo
  nokia Nokia
  nuvoton       Nuvoton Technology Corporation
diff --combined MAINTAINERS
@@@ -35,13 -35,13 +35,13 @@@ trivial patch so apply some common sens
  
        PLEASE check your patch with the automated style checker
        (scripts/checkpatch.pl) to catch trivial style violations.
 -      See Documentation/CodingStyle for guidance here.
 +      See Documentation/process/coding-style.rst for guidance here.
  
        PLEASE CC: the maintainers and mailing lists that are generated
        by scripts/get_maintainer.pl.  The results returned by the
        script will be best if you have git installed and are making
        your changes in a branch derived from Linus' latest git tree.
 -      See Documentation/SubmittingPatches for details.
 +      See Documentation/process/submitting-patches.rst for details.
  
        PLEASE try to include any credit lines you want added with the
        patch. It avoids people being missed off by mistake and makes
@@@ -54,7 -54,7 +54,7 @@@
        of the Linux Foundation certificate of contribution and should
        include a Signed-off-by: line.  The current version of this
        "Developer's Certificate of Origin" (DCO) is listed in the file
 -      Documentation/SubmittingPatches.
 +      Documentation/process/submitting-patches.rst.
  
  6.    Make sure you have the right to send any changes you make. If you
        do changes at work you may find your employer owns the patch
@@@ -74,14 -74,9 +74,14 @@@ Descriptions of section entries
           These reviewers should be CCed on patches.
        L: Mailing list that is relevant to this area
        W: Web-page with status/info
 +      B: URI for where to file bugs. A web-page with detailed bug
 +         filing info, a direct bug tracker link, or a mailto: URI.
 +      C: URI for chat protocol, server and channel where developers
 +         usually hang out, for example irc://server/channel.
        Q: Patchwork web based patch tracking system site
        T: SCM tree type and location.
           Type is one of: git, hg, quilt, stgit, topgit
 +      B: Bug tracking system location.
        S: Status, one of the following:
           Supported:   Someone is actually paid to look after this.
           Maintained:  Someone actually looks after it.
@@@ -260,12 -255,6 +260,12 @@@ L:       linux-gpio@vger.kernel.or
  S:    Maintained
  F:    drivers/gpio/gpio-104-idio-16.c
  
 +ACCES 104-QUAD-8 IIO DRIVER
 +M:    William Breathitt Gray <vilhelm.gray@gmail.com>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/iio/counter/104-quad-8.c
 +
  ACENIC DRIVER
  M:    Jes Sorensen <jes@trained-monkey.org>
  L:    linux-acenic@sunsite.dk
@@@ -292,7 -281,6 +292,7 @@@ L: linux-acpi@vger.kernel.or
  W:    https://01.org/linux-acpi
  Q:    https://patchwork.kernel.org/project/linux-acpi/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    drivers/acpi/
  F:    drivers/pnp/pnpacpi/
@@@ -316,8 -304,6 +316,8 @@@ W: https://acpica.org
  W:    https://github.com/acpica/acpica/
  Q:    https://patchwork.kernel.org/project/linux-acpi/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
 +B:    https://bugzilla.kernel.org
 +B:    https://bugs.acpica.org
  S:    Supported
  F:    drivers/acpi/acpica/
  F:    include/acpi/
@@@ -327,7 -313,6 +327,7 @@@ ACPI FAN DRIVE
  M:    Zhang Rui <rui.zhang@intel.com>
  L:    linux-acpi@vger.kernel.org
  W:    https://01.org/linux-acpi
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    drivers/acpi/fan.c
  
@@@ -343,7 -328,6 +343,7 @@@ ACPI THERMAL DRIVE
  M:    Zhang Rui <rui.zhang@intel.com>
  L:    linux-acpi@vger.kernel.org
  W:    https://01.org/linux-acpi
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    drivers/acpi/*thermal*
  
@@@ -351,7 -335,6 +351,7 @@@ ACPI VIDEO DRIVE
  M:    Zhang Rui <rui.zhang@intel.com>
  L:    linux-acpi@vger.kernel.org
  W:    https://01.org/linux-acpi
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    drivers/acpi/acpi_video.c
  
@@@ -587,11 -570,6 +587,11 @@@ T:       git git://linuxtv.org/anttip/media_t
  S:    Maintained
  F:    drivers/media/usb/airspy/
  
 +ALACRITECH GIGABIT ETHERNET DRIVER
 +M:    Lino Sanfilippo <LinoSanfilippo@gmx.de>
 +S:    Maintained
 +F:    drivers/net/ethernet/alacritech/*
 +
  ALCATEL SPEEDTOUCH USB DRIVER
  M:    Duncan Sands <duncan.sands@free.fr>
  L:    linux-usb@vger.kernel.org
@@@ -809,7 -787,7 +809,7 @@@ S: Supporte
  F:    drivers/iio/*/ad*
  X:    drivers/iio/*/adjd*
  F:    drivers/staging/iio/*/ad*
 -F:    staging/iio/trigger/iio-trig-bfin-timer.c
 +F:    drivers/staging/iio/trigger/iio-trig-bfin-timer.c
  
  ANALOG DEVICES INC DMA DRIVERS
  M:    Lars-Peter Clausen <lars@metafoo.de>
@@@ -1058,7 -1036,6 +1058,7 @@@ F:      arch/arm/mach-meson
  F:    arch/arm/boot/dts/meson*
  F:    arch/arm64/boot/dts/amlogic/
  F:    drivers/pinctrl/meson/
 +F:    drivers/mmc/host/meson*
  N:    meson
  
  ARM/Annapurna Labs ALPINE ARCHITECTURE
@@@ -1465,7 -1442,6 +1465,7 @@@ F:      drivers/cpufreq/mvebu-cpufreq.
  F:    arch/arm/configs/mvebu_*_defconfig
  
  ARM/Marvell Berlin SoC support
 +M:    Jisheng Zhang <jszhang@marvell.com>
  M:    Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
@@@ -1798,7 -1774,6 +1798,7 @@@ F:      drivers/char/hw_random/st-rng.
  F:    drivers/clocksource/arm_global_timer.c
  F:    drivers/clocksource/clksrc_st_lpc.c
  F:    drivers/cpufreq/sti-cpufreq.c
 +F:    drivers/dma/st_fdma*
  F:    drivers/i2c/busses/i2c-st.c
  F:    drivers/media/rc/st_rc.c
  F:    drivers/media/platform/sti/c8sectpfe/
@@@ -1809,7 -1784,6 +1809,7 @@@ F:      drivers/phy/phy-stih407-usb.
  F:    drivers/phy/phy-stih41x-usb.c
  F:    drivers/pinctrl/pinctrl-st.c
  F:    drivers/remoteproc/st_remoteproc.c
 +F:    drivers/remoteproc/st_slim_rproc.c
  F:    drivers/reset/sti/
  F:    drivers/rtc/rtc-st-lpc.c
  F:    drivers/tty/serial/st-asc.c
@@@ -1818,7 -1792,6 +1818,7 @@@ F:      drivers/usb/host/ehci-st.
  F:    drivers/usb/host/ohci-st.c
  F:    drivers/watchdog/st_lpc_wdt.c
  F:    drivers/ata/ahci_st.c
 +F:    include/linux/remoteproc/st_slim_rproc.h
  
  ARM/STM32 ARCHITECTURE
  M:    Maxime Coquelin <mcoquelin.stm32@gmail.com>
@@@ -2344,13 -2317,6 +2344,13 @@@ F:    include/uapi/linux/ax25.
  F:    include/net/ax25.h
  F:    net/ax25/
  
 +AXENTIA ASOC DRIVERS
 +M:    Peter Rosin <peda@axentia.se>
 +L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/sound/axentia,*
 +F:    sound/soc/atmel/tse850-pcm5142.c
 +
  AZ6007 DVB DRIVER
  M:    Mauro Carvalho Chehab <mchehab@s-opensource.com>
  M:    Mauro Carvalho Chehab <mchehab@kernel.org>
@@@ -2563,8 -2529,6 +2563,8 @@@ L:      netdev@vger.kernel.or
  L:    linux-kernel@vger.kernel.org
  S:    Supported
  F:    kernel/bpf/
 +F:    tools/testing/selftests/bpf/
 +F:    lib/test_bpf.c
  
  BROADCOM B44 10/100 ETHERNET DRIVER
  M:    Michael Chan <michael.chan@broadcom.com>
@@@ -2587,18 -2551,15 +2587,18 @@@ S:   Supporte
  F:    drivers/net/ethernet/broadcom/genet/
  
  BROADCOM BNX2 GIGABIT ETHERNET DRIVER
 -M:    Sony Chacko <sony.chacko@qlogic.com>
 -M:    Dept-HSGLinuxNICDev@qlogic.com
 +M:    Rasesh Mody <rasesh.mody@cavium.com>
 +M:    Harish Patil <harish.patil@cavium.com>
 +M:    Dept-GELinuxNICDev@cavium.com
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/broadcom/bnx2.*
  F:    drivers/net/ethernet/broadcom/bnx2_*
  
  BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
 -M:    Ariel Elior <ariel.elior@qlogic.com>
 +M:    Yuval Mintz <Yuval.Mintz@cavium.com>
 +M:    Ariel Elior <ariel.elior@cavium.com>
 +M:    everest-linux-l2@cavium.com
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/broadcom/bnx2x/
@@@ -2625,7 -2586,6 +2625,7 @@@ L:      linux-arm-kernel@lists.infradead.or
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
  S:    Maintained
  N:    bcm2835
 +F:    drivers/staging/vc04_services
  
  BROADCOM BCM47XX MIPS ARCHITECTURE
  M:    Hauke Mehrtens <hauke@hauke-m.de>
@@@ -2778,14 -2738,6 +2778,14 @@@ L:    bcm-kernel-feedback-list@broadcom.co
  S:    Maintained
  F:    drivers/mtd/nand/brcmnand/
  
 +BROADCOM STB AVS CPUFREQ DRIVER
 +M:    Markus Mayer <mmayer@broadcom.com>
 +M:    bcm-kernel-feedback-list@broadcom.com
 +L:    linux-pm@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
 +F:    drivers/cpufreq/brcmstb*
 +
  BROADCOM SPECIFIC AMBA DRIVER (BCMA)
  M:    RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>
  L:    linux-wireless@vger.kernel.org
@@@ -2814,9 -2766,7 +2814,9 @@@ S:      Supporte
  F:    drivers/scsi/bfa/
  
  BROCADE BNA 10 GIGABIT ETHERNET DRIVER
 -M:    Rasesh Mody <rasesh.mody@qlogic.com>
 +M:    Rasesh Mody <rasesh.mody@cavium.com>
 +M:    Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
 +M:    Dept-GELinuxNICDev@cavium.com
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/brocade/bna/
@@@ -2974,7 -2924,7 +2974,7 @@@ CAPELLA MICROSYSTEMS LIGHT SENSOR DRIVE
  M:    Kevin Tsai <ktsai@capellamicro.com>
  S:    Maintained
  F:    drivers/iio/light/cm*
 -F:    Documentation/devicetree/bindings/i2c/trivial-devices.txt
 +F:    Documentation/devicetree/bindings/i2c/trivial-admin-guide/devices.rst
  
  CAVIUM I2C DRIVER
  M:    Jan Glauber <jglauber@cavium.com>
@@@ -3074,12 -3024,6 +3074,12 @@@ F:    drivers/usb/host/whci
  F:    drivers/usb/wusbcore/
  F:    include/linux/usb/wusb*
  
 +HT16K33 LED CONTROLLER DRIVER
 +M:    Robin van der Gracht <robin@protonic.nl>
 +S:    Maintained
 +F:    drivers/auxdisplay/ht16k33.c
 +F:    Documentation/devicetree/bindings/display/ht16k33.txt
 +
  CFAG12864B LCD DRIVER
  M:    Miguel Ojeda Sandonis <miguel.ojeda.sandonis@gmail.com>
  W:    http://miguelojeda.es/auxdisplay.htm
@@@ -3128,7 -3072,7 +3128,7 @@@ M:      Harry Wei <harryxiyou@gmail.com
  L:    xiyoulinuxkernelgroup@googlegroups.com (subscribers-only)
  L:    linux-kernel@zh-kernel.org (moderated for non-subscribers)
  S:    Maintained
 -F:    Documentation/zh_CN/
 +F:    Documentation/translations/zh_CN/
  
  CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
  M:    Peter Chen <Peter.Chen@nxp.com>
@@@ -3199,15 -3143,15 +3199,15 @@@ S:   Supporte
  F:    drivers/clocksource
  
  CISCO FCOE HBA DRIVER
 -M:    Hiral Patel <hiralpat@cisco.com>
 -M:    Suma Ramars <sramars@cisco.com>
 -M:    Brian Uchino <buchino@cisco.com>
 +M:    Satish Kharat <satishkh@cisco.com>
 +M:    Sesidhar Baddela <sebaddel@cisco.com>
 +M:    Karan Tilak Kumar <kartilak@cisco.com>
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    drivers/scsi/fnic/
  
  CISCO SCSI HBA DRIVER
 -M:    Narsimhulu Musini <nmusini@cisco.com>
 +M:    Karan Tilak Kumar <kartilak@cisco.com>
  M:    Sesidhar Baddela <sebaddel@cisco.com>
  L:    linux-scsi@vger.kernel.org
  S:    Supported
@@@ -3384,7 -3328,6 +3384,7 @@@ L:      linux-pm@vger.kernel.or
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
  T:    git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates)
 +B:    https://bugzilla.kernel.org
  F:    Documentation/cpu-freq/
  F:    drivers/cpufreq/
  F:    include/linux/cpufreq.h
@@@ -3424,7 -3367,6 +3424,7 @@@ M:      Daniel Lezcano <daniel.lezcano@linar
  L:    linux-pm@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
 +B:    https://bugzilla.kernel.org
  F:    drivers/cpuidle/*
  F:    include/linux/cpuidle.h
  
@@@ -3470,6 -3412,7 +3470,7 @@@ F:      arch/*/crypto
  F:    crypto/
  F:    drivers/crypto/
  F:    include/crypto/
+ F:    include/linux/crypto*
  
  CRYPTOGRAPHIC RANDOM NUMBER GENERATOR
  M:    Neil Horman <nhorman@tuxdriver.com>
@@@ -3963,7 -3906,7 +3964,7 @@@ F:      include/linux/dma-buf
  F:    include/linux/reservation.h
  F:    include/linux/*fence.h
  F:    Documentation/dma-buf-sharing.txt
 -T:    git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
  
  SYNC FILE FRAMEWORK
  M:    Sumit Semwal <sumit.semwal@linaro.org>
@@@ -3971,12 -3914,10 +3972,12 @@@ R:   Gustavo Padovan <gustavo@padovan.org
  S:    Maintained
  L:    linux-media@vger.kernel.org
  L:    dri-devel@lists.freedesktop.org
 -F:    drivers/dma-buf/sync_file.c
 +F:    drivers/dma-buf/sync_*
 +F:    drivers/dma-buf/sw_sync.c
  F:    include/linux/sync_file.h
 +F:    include/uapi/linux/sync_file.h
  F:    Documentation/sync_file.txt
 -T:    git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
  
  DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
  M:    Vinod Koul <vinod.koul@intel.com>
@@@ -4064,8 -4005,6 +4065,8 @@@ DRM DRIVER
  M:    David Airlie <airlied@linux.ie>
  L:    dri-devel@lists.freedesktop.org
  T:    git git://people.freedesktop.org/~airlied/linux
 +B:    https://bugs.freedesktop.org/
 +C:    irc://chat.freenode.net/dri-devel
  S:    Maintained
  F:    drivers/gpu/drm/
  F:    drivers/gpu/vga/
@@@ -4076,30 -4015,11 +4077,30 @@@ F:   Documentation/gpu
  F:    include/drm/
  F:    include/uapi/drm/
  
 +DRM DRIVERS AND MISC GPU PATCHES
 +M:    Daniel Vetter <daniel.vetter@intel.com>
 +M:    Jani Nikula <jani.nikula@linux.intel.com>
 +M:    Sean Paul <seanpaul@chromium.org>
 +W:    https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
 +S:    Maintained
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +F:    Documentation/gpu/
 +F:    drivers/gpu/vga/
 +F:    drivers/gpu/drm/*
 +F:    include/drm/drm*
 +F:    include/uapi/drm/drm*
 +
  DRM DRIVER FOR AST SERVER GRAPHICS CHIPS
  M:    Dave Airlie <airlied@redhat.com>
  S:    Odd Fixes
  F:    drivers/gpu/drm/ast/
  
 +DRM DRIVERS FOR BRIDGE CHIPS
 +M:    Archit Taneja <architt@codeaurora.org>
 +S:    Maintained
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +F:    drivers/gpu/drm/bridge/
 +
  DRM DRIVER FOR BOCHS VIRTUAL GPU
  M:    Gerd Hoffmann <kraxel@redhat.com>
  S:    Odd Fixes
@@@ -4135,9 -4055,8 +4136,9 @@@ INTEL DRM DRIVERS (excluding Poulsbo, M
  M:    Daniel Vetter <daniel.vetter@intel.com>
  M:    Jani Nikula <jani.nikula@linux.intel.com>
  L:    intel-gfx@lists.freedesktop.org
 -L:    dri-devel@lists.freedesktop.org
  W:    https://01.org/linuxgraphics/
 +B:    https://01.org/linuxgraphics/documentation/how-report-bugs
 +C:    irc://chat.freenode.net/intel-gfx
  Q:    http://patchwork.freedesktop.org/project/intel-gfx/
  T:    git git://anongit.freedesktop.org/drm-intel
  S:    Supported
@@@ -4146,16 -4065,6 +4147,16 @@@ F:    include/drm/i915
  F:    include/uapi/drm/i915_drm.h
  F:    Documentation/gpu/i915.rst
  
 +INTEL GVT-g DRIVERS (Intel GPU Virtualization)
 +M:      Zhenyu Wang <zhenyuw@linux.intel.com>
 +M:      Zhi Wang <zhi.a.wang@intel.com>
 +L:      igvt-g-dev@lists.01.org
 +L:      intel-gfx@lists.freedesktop.org
 +W:      https://01.org/igvt-g
 +T:      git https://github.com/01org/gvt-linux.git
 +S:      Supported
 +F:      drivers/gpu/drm/i915/gvt/
 +
  DRM DRIVERS FOR ATMEL HLCDC
  M:    Boris Brezillon <boris.brezillon@free-electrons.com>
  L:    dri-devel@lists.freedesktop.org
@@@ -4170,15 -4079,6 +4171,15 @@@ S:    Supporte
  F:    drivers/gpu/drm/sun4i/
  F:    Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
  
 +DRM DRIVERS FOR AMLOGIC SOCS
 +M:    Neil Armstrong <narmstrong@baylibre.com>
 +L:    dri-devel@lists.freedesktop.org
 +L:    linux-amlogic@lists.infradead.org
 +W:    http://linux-meson.com/
 +S:    Supported
 +F:    drivers/gpu/drm/meson/
 +F:    Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
 +
  DRM DRIVERS FOR EXYNOS
  M:    Inki Dae <inki.dae@samsung.com>
  M:    Joonyoung Shim <jy0922.shim@samsung.com>
@@@ -4218,7 -4118,6 +4219,7 @@@ F:      drivers/gpu/drm/gma500
  
  DRM DRIVERS FOR HISILICON
  M:    Xinliang Liu <z.liuxinliang@hisilicon.com>
 +M:    Rongrong Zou <zourongrong@gmail.com>
  R:    Xinwei Kong <kong.kongxinwei@hisilicon.com>
  R:    Chen Feng <puck.chen@hisilicon.com>
  L:    dri-devel@lists.freedesktop.org
@@@ -4343,7 -4242,6 +4344,7 @@@ DRM DRIVERS FOR VIVANTE GPU I
  M:    Lucas Stach <l.stach@pengutronix.de>
  R:    Russell King <linux+etnaviv@armlinux.org.uk>
  R:    Christian Gmeiner <christian.gmeiner@gmail.com>
 +L:    etnaviv@lists.freedesktop.org
  L:    dri-devel@lists.freedesktop.org
  S:    Maintained
  F:    drivers/gpu/drm/etnaviv/
@@@ -4384,13 -4282,6 +4385,13 @@@ S:    Maintaine
  F:    drivers/gpu/drm/tilcdc/
  F:    Documentation/devicetree/bindings/display/tilcdc/
  
 +DRM DRIVERS FOR ZTE ZX
 +M:    Shawn Guo <shawnguo@kernel.org>
 +L:    dri-devel@lists.freedesktop.org
 +S:    Maintained
 +F:    drivers/gpu/drm/zte/
 +F:    Documentation/devicetree/bindings/display/zte,vou.txt
 +
  DSBR100 USB FM RADIO DRIVER
  M:    Alexey Klimov <klimov.linux@gmail.com>
  L:    linux-media@vger.kernel.org
@@@ -4730,19 -4621,16 +4731,19 @@@ F:   sound/usb/misc/ua101.
  
  EXTENSIBLE FIRMWARE INTERFACE (EFI)
  M:    Matt Fleming <matt@codeblueprint.co.uk>
 +M:    Ard Biesheuvel <ard.biesheuvel@linaro.org>
  L:    linux-efi@vger.kernel.org
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
  S:    Maintained
  F:    Documentation/efi-stub.txt
 -F:    arch/ia64/kernel/efi.c
 +F:    arch/*/kernel/efi.c
  F:    arch/x86/boot/compressed/eboot.[ch]
 -F:    arch/x86/include/asm/efi.h
 +F:    arch/*/include/asm/efi.h
  F:    arch/x86/platform/efi/
  F:    drivers/firmware/efi/
  F:    include/linux/efi*.h
 +F:    arch/arm/boot/compressed/efi-header.S
 +F:    arch/arm64/kernel/efi-entry.S
  
  EFI VARIABLE FILESYSTEM
  M:    Matthew Garrett <matthew.garrett@nebula.com>
@@@ -4794,11 -4682,11 +4795,11 @@@ M:   David Woodhouse <dwmw2@infradead.org
  L:    linux-embedded@vger.kernel.org
  S:    Maintained
  
 -EMULEX/AVAGO LPFC FC/FCOE SCSI DRIVER
 -M:    James Smart <james.smart@avagotech.com>
 -M:    Dick Kennedy <dick.kennedy@avagotech.com>
 +EMULEX/BROADCOM LPFC FC/FCOE SCSI DRIVER
 +M:    James Smart <james.smart@broadcom.com>
 +M:    Dick Kennedy <dick.kennedy@broadcom.com>
  L:    linux-scsi@vger.kernel.org
 -W:    http://www.avagotech.com
 +W:    http://www.broadcom.com
  S:    Supported
  F:    drivers/scsi/lpfc/
  
@@@ -5056,9 -4944,7 +5057,9 @@@ K:      fmc_d.*registe
  FPGA MANAGER FRAMEWORK
  M:    Alan Tull <atull@opensource.altera.com>
  R:    Moritz Fischer <moritz.fischer@ettus.com>
 +L:    linux-fpga@vger.kernel.org
  S:    Maintained
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/atull/linux-fpga.git
  F:    drivers/fpga/
  F:    include/linux/fpga/fpga-mgr.h
  W:    http://www.rocketboards.org
@@@ -5076,9 -4962,10 +5077,9 @@@ F:     drivers/net/wan/dlci.
  F:    drivers/net/wan/sdla.c
  
  FRAMEBUFFER LAYER
 -M:    Tomi Valkeinen <tomi.valkeinen@ti.com>
  L:    linux-fbdev@vger.kernel.org
  Q:    http://patchwork.kernel.org/project/linux-fbdev/list/
 -S:    Maintained
 +S:    Orphan
  F:    Documentation/fb/
  F:    drivers/video/
  F:    include/video/
@@@ -5086,6 -4973,14 +5087,14 @@@ F:    include/linux/fb.
  F:    include/uapi/video/
  F:    include/uapi/linux/fb.h
  
+ FREESCALE CAAM (Cryptographic Acceleration and Assurance Module) DRIVER
+ M:    Horia Geantă <horia.geanta@nxp.com>
+ M:    Dan Douglass <dan.douglass@nxp.com>
+ L:    linux-crypto@vger.kernel.org
+ S:    Maintained
+ F:    drivers/crypto/caam/
+ F:    Documentation/devicetree/bindings/crypto/fsl-sec4.txt
  FREESCALE DIU FRAMEBUFFER DRIVER
  M:    Timur Tabi <timur@tabi.org>
  L:    linux-fbdev@vger.kernel.org
@@@ -5151,18 -5046,9 +5160,18 @@@ S:    Maintaine
  F:    drivers/net/ethernet/freescale/fman
  F:    Documentation/devicetree/bindings/powerpc/fsl/fman.txt
  
 +FREESCALE SOC DRIVERS
 +M:    Scott Wood <oss@buserror.net>
 +L:    linuxppc-dev@lists.ozlabs.org
 +L:    linux-arm-kernel@lists.infradead.org
 +S:    Maintained
 +F:    drivers/soc/fsl/
 +F:    include/linux/fsl/
 +
  FREESCALE QUICC ENGINE LIBRARY
 +M:    Qiang Zhao <qiang.zhao@nxp.com>
  L:    linuxppc-dev@lists.ozlabs.org
 -S:    Orphan
 +S:    Maintained
  F:    drivers/soc/fsl/qe/
  F:    include/soc/fsl/*qe*.h
  F:    include/soc/fsl/*ucc*.h
@@@ -5214,6 -5100,13 +5223,6 @@@ F:     sound/soc/fsl/fsl
  F:    sound/soc/fsl/imx*
  F:    sound/soc/fsl/mpc8610_hpcd.c
  
 -FREESCALE QORIQ MANAGEMENT COMPLEX DRIVER
 -M:    "J. German Rivera" <German.Rivera@freescale.com>
 -M:    Stuart Yoder <stuart.yoder@nxp.com>
 -L:    linux-kernel@vger.kernel.org
 -S:    Maintained
 -F:    drivers/staging/fsl-mc/
 -
  FREEVXFS FILESYSTEM
  M:    Christoph Hellwig <hch@infradead.org>
  W:    ftp://ftp.openlinux.org/pub/people/hch/vxfs
@@@ -5247,7 -5140,6 +5256,7 @@@ F:      include/linux/fscache*.
  FS-CRYPTO: FILE SYSTEM LEVEL ENCRYPTION SUPPORT
  M:    Theodore Y. Ts'o <tytso@mit.edu>
  M:    Jaegeuk Kim <jaegeuk@kernel.org>
 +L:    linux-fsdevel@vger.kernel.org
  S:    Supported
  F:    fs/crypto/
  F:    include/linux/fscrypto.h
@@@ -5312,7 -5204,6 +5321,7 @@@ L:      kernel-hardening@lists.openwall.co
  S:    Maintained
  F:    scripts/gcc-plugins/
  F:    scripts/gcc-plugin.sh
 +F:    scripts/Makefile.gcc-plugins
  F:    Documentation/gcc-plugins.txt
  
  GCOV BASED KERNEL PROFILING
@@@ -5404,12 -5295,6 +5413,12 @@@ M:    Joe Perches <joe@perches.com
  S:    Maintained
  F:    scripts/get_maintainer.pl
  
 +GENWQE (IBM Generic Workqueue Card)
 +M:    Frank Haverkamp <haver@linux.vnet.ibm.com>
 +M:    Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
 +S:    Supported
 +F:    drivers/misc/genwqe/
 +
  GFS2 FILE SYSTEM
  M:    Steven Whitehouse <swhiteho@redhat.com>
  M:    Bob Peterson <rpeterso@redhat.com>
@@@ -5724,6 -5609,7 +5733,6 @@@ F:      drivers/watchdog/hpwdt.
  
  HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
  M:    Don Brace <don.brace@microsemi.com>
 -L:    iss_storagedev@hp.com
  L:    esc.storagedev@microsemi.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
@@@ -5734,6 -5620,7 +5743,6 @@@ F:      include/uapi/linux/cciss*.
  
  HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss)
  M:    Don Brace <don.brace@microsemi.com>
 -L:    iss_storagedev@hp.com
  L:    esc.storagedev@microsemi.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
@@@ -5772,7 -5659,6 +5781,7 @@@ HIBERNATION (aka Software Suspend, aka 
  M:    "Rafael J. Wysocki" <rjw@rjwysocki.net>
  M:    Pavel Machek <pavel@ucw.cz>
  L:    linux-pm@vger.kernel.org
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    arch/x86/power/
  F:    drivers/base/power/
@@@ -5954,7 -5840,6 +5963,7 @@@ F:      drivers/input/serio/hyperv-keyboard.
  F:    drivers/pci/host/pci-hyperv.c
  F:    drivers/net/hyperv/
  F:    drivers/scsi/storvsc_drv.c
 +F:    drivers/uio/uio_hv_generic.c
  F:    drivers/video/fbdev/hyperv_fb.c
  F:    include/linux/hyperv.h
  F:    tools/hv/
@@@ -6198,9 -6083,14 +6207,9 @@@ S:     Maintaine
  F:    Documentation/cdrom/ide-cd
  F:    drivers/ide/ide-cd*
  
 -IDLE-I7300
 -M:    Andy Henroid <andrew.d.henroid@intel.com>
 -L:    linux-pm@vger.kernel.org
 -S:    Supported
 -F:    drivers/idle/i7300_idle.c
 -
  IEEE 802.15.4 SUBSYSTEM
  M:    Alexander Aring <aar@pengutronix.de>
 +M:    Stefan Schmidt <stefan@osg.samsung.com>
  L:    linux-wpan@vger.kernel.org
  W:    http://wpan.cakelab.org/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
@@@ -6230,22 -6120,6 +6239,22 @@@ L:    linux-media@vger.kernel.or
  S:    Maintained
  F:    drivers/media/rc/iguanair.c
  
 +IIO DIGITAL POTENTIOMETER DAC
 +M:    Peter Rosin <peda@axentia.se>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-bus-iio-dac-dpot-dac
 +F:    Documentation/devicetree/bindings/iio/dac/dpot-dac.txt
 +F:    drivers/iio/dac/dpot-dac.c
 +
 +IIO ENVELOPE DETECTOR
 +M:    Peter Rosin <peda@axentia.se>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-bus-iio-adc-envelope-detector
 +F:    Documentation/devicetree/bindings/iio/adc/envelope-detector.txt
 +F:    drivers/iio/adc/envelope-detector.c
 +
  IIO SUBSYSTEM AND DRIVERS
  M:    Jonathan Cameron <jic23@kernel.org>
  R:    Hartmut Knaack <knaack.h@gmx.de>
@@@ -6403,11 -6277,9 +6412,11 @@@ S:    Maintaine
  F:    drivers/platform/x86/intel-vbtn.c
  
  INTEL IDLE DRIVER
 +M:    Jacob Pan <jacob.jun.pan@linux.intel.com>
  M:    Len Brown <lenb@kernel.org>
  L:    linux-pm@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux.git
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    drivers/idle/intel_idle.c
  
@@@ -6627,13 -6499,6 +6636,13 @@@ S:    Maintaine
  F:    arch/x86/include/asm/pmc_core.h
  F:    drivers/platform/x86/intel_pmc_core*
  
 +INVENSENSE MPU-3050 GYROSCOPE DRIVER
 +M:    Linus Walleij <linus.walleij@linaro.org>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/iio/gyro/mpu3050*
 +F:    Documentation/devicetree/bindings/iio/gyroscope/inv,mpu3050.txt
 +
  IOC3 ETHERNET DRIVER
  M:    Ralf Baechle <ralf@linux-mips.org>
  L:    linux-mips@linux-mips.org
@@@ -7215,7 -7080,6 +7224,7 @@@ F:      drivers/scsi/53c700
  LED SUBSYSTEM
  M:    Richard Purdie <rpurdie@rpsys.net>
  M:    Jacek Anaszewski <j.anaszewski@samsung.com>
 +M:    Pavel Machek <pavel@ucw.cz>
  L:    linux-leds@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
  S:    Maintained
@@@ -7688,10 -7552,8 +7697,10 @@@ S:    Maintaine
  MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER
  M:    Andrew Lunn <andrew@lunn.ch>
  M:    Vivien Didelot <vivien.didelot@savoirfairelinux.com>
 +L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/dsa/mv88e6xxx/
 +F:    Documentation/devicetree/bindings/net/dsa/marvell.txt
  
  MARVELL ARMADA DRM SUPPORT
  M:    Russell King <rmk+kernel@armlinux.org.uk>
@@@ -7841,7 -7703,6 +7850,7 @@@ MCP4531 MICROCHIP DIGITAL POTENTIOMETE
  M:    Peter Rosin <peda@axentia.se>
  L:    linux-iio@vger.kernel.org
  S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531
  F:    drivers/iio/potentiometer/mcp4531.c
  
  MEASUREMENT COMPUTING CIO-DAC IIO DRIVER
@@@ -7973,12 -7834,12 +7982,12 @@@ S:   Maintaine
  F:    drivers/net/wireless/mediatek/mt7601u/
  
  MEGARAID SCSI/SAS DRIVERS
 -M:    Kashyap Desai <kashyap.desai@avagotech.com>
 -M:    Sumit Saxena <sumit.saxena@avagotech.com>
 -M:    Uday Lingala <uday.lingala@avagotech.com>
 -L:    megaraidlinux.pdl@avagotech.com
 +M:    Kashyap Desai <kashyap.desai@broadcom.com>
 +M:    Sumit Saxena <sumit.saxena@broadcom.com>
 +M:    Shivasharan S <shivasharan.srikanteshwara@broadcom.com>
 +L:    megaraidlinux.pdl@broadcom.com
  L:    linux-scsi@vger.kernel.org
 -W:    http://www.lsi.com
 +W:    http://www.avagotech.com/support/
  S:    Maintained
  F:    Documentation/scsi/megaraid.txt
  F:    drivers/scsi/megaraid.*
@@@ -8060,10 -7921,6 +8069,10 @@@ F:    mm
  MEMORY TECHNOLOGY DEVICES (MTD)
  M:    David Woodhouse <dwmw2@infradead.org>
  M:    Brian Norris <computersforpeace@gmail.com>
 +M:    Boris Brezillon <boris.brezillon@free-electrons.com>
 +M:    Marek Vasut <marek.vasut@gmail.com>
 +M:    Richard Weinberger <richard@nod.at>
 +M:    Cyrille Pitchen <cyrille.pitchen@atmel.com>
  L:    linux-mtd@lists.infradead.org
  W:    http://www.linux-mtd.infradead.org/
  Q:    http://patchwork.ozlabs.org/project/linux-mtd/list/
@@@ -8192,7 -8049,6 +8201,7 @@@ F:      drivers/infiniband/hw/mlx4
  F:    include/linux/mlx4/
  
  MELLANOX MLX5 core VPI driver
 +M:    Saeed Mahameed <saeedm@mellanox.com>
  M:    Matan Barak <matanb@mellanox.com>
  M:    Leon Romanovsky <leonro@mellanox.com>
  L:    netdev@vger.kernel.org
@@@ -8252,7 -8108,6 +8261,7 @@@ S:      Maintaine
  F:    drivers/media/dvb-frontends/mn88473*
  
  MODULE SUPPORT
 +M:    Jessica Yu <jeyu@redhat.com>
  M:    Rusty Russell <rusty@rustcorp.com.au>
  S:    Maintained
  F:    include/linux/module.h
@@@ -8366,7 -8221,7 +8375,7 @@@ F:      include/linux/mfd
  MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
  M:    Ulf Hansson <ulf.hansson@linaro.org>
  L:    linux-mmc@vger.kernel.org
 -T:    git git://git.linaro.org/people/ulf.hansson/mmc.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc.git
  S:    Maintained
  F:    Documentation/devicetree/bindings/mmc/
  F:    drivers/mmc/
@@@ -8406,12 -8261,6 +8415,12 @@@ T:    git git://linuxtv.org/mkrufky/tuners
  S:    Maintained
  F:    drivers/media/tuners/mxl5007t.*
  
 +MXSFB DRM DRIVER
 +M:    Marek Vasut <marex@denx.de>
 +S:    Supported
 +F:    drivers/gpu/drm/mxsfb/
 +F:    Documentation/devicetree/bindings/display/mxsfb-drm.txt
 +
  MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
  M:    Hyong-Youb Kim <hykim@myri.com>
  L:    netdev@vger.kernel.org
@@@ -8458,6 -8307,7 +8467,6 @@@ F:      drivers/scsi/arm/oak.
  F:    drivers/scsi/atari_scsi.*
  F:    drivers/scsi/dmx3191d.c
  F:    drivers/scsi/g_NCR5380.*
 -F:    drivers/scsi/g_NCR5380_mmio.c
  F:    drivers/scsi/mac_scsi.*
  F:    drivers/scsi/sun3_scsi.*
  F:    drivers/scsi/sun3_scsi_vme.c
@@@ -8588,6 -8438,7 +8597,6 @@@ F:      include/uapi/linux/net_namespace.
  F:    tools/net/
  F:    tools/testing/selftests/net/
  F:    lib/random32.c
 -F:    lib/test_bpf.c
  
  NETWORKING [IPv4/IPv6]
  M:    "David S. Miller" <davem@davemloft.net>
@@@ -8666,10 -8517,11 +8675,10 @@@ F:   Documentation/devicetree/bindings/ne
  F:    drivers/net/wireless/
  
  NETXEN (1/10) GbE SUPPORT
 -M:    Manish Chopra <manish.chopra@qlogic.com>
 -M:    Sony Chacko <sony.chacko@qlogic.com>
 -M:    Rajesh Borundia <rajesh.borundia@qlogic.com>
 +M:    Manish Chopra <manish.chopra@cavium.com>
 +M:    Rahul Verma <rahul.verma@cavium.com>
 +M:    Dept-GELinuxNICDev@cavium.com
  L:    netdev@vger.kernel.org
 -W:    http://www.qlogic.com
  S:    Supported
  F:    drivers/net/ethernet/qlogic/netxen/
  
@@@ -8816,16 -8668,6 +8825,16 @@@ L:    linux-nvme@lists.infradead.or
  S:    Supported
  F:    drivers/nvme/target/
  
 +NVM EXPRESS FC TRANSPORT DRIVERS
 +M:    James Smart <james.smart@broadcom.com>
 +L:    linux-nvme@lists.infradead.org
 +S:    Supported
 +F:    include/linux/nvme-fc.h
 +F:    include/linux/nvme-fc-driver.h
 +F:    drivers/nvme/host/fc.c
 +F:    drivers/nvme/target/fc.c
 +F:    drivers/nvme/target/fcloop.c
 +
  NVMEM FRAMEWORK
  M:    Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
  M:    Maxime Ripard <maxime.ripard@free-electrons.com>
@@@ -8888,7 -8730,6 +8897,7 @@@ F:      drivers/regulator/tps65217-regulator
  F:    drivers/regulator/tps65218-regulator.c
  F:    drivers/regulator/tps65910-regulator.c
  F:    drivers/regulator/twl-regulator.c
 +F:    drivers/regulator/twl6030-regulator.c
  F:    include/linux/i2c-omap.h
  
  OMAP DEVICE TREE SUPPORT
@@@ -9109,11 -8950,9 +9118,11 @@@ F:    drivers/of/resolver.
  
  OPENRISC ARCHITECTURE
  M:    Jonas Bonn <jonas@southpole.se>
 -W:    http://openrisc.net
 +M:    Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
 +M:    Stafford Horne <shorne@gmail.com>
 +L:    openrisc@lists.librecores.org
 +W:    http://openrisc.io
  S:    Maintained
 -T:    git git://openrisc.net/~jonas/linux
  F:    arch/openrisc/
  
  OPENVSWITCH
@@@ -9245,7 -9084,7 +9254,7 @@@ F:      drivers/misc/panel.
  
  PARALLEL PORT SUBSYSTEM
  M:    Sudip Mukherjee <sudipm.mukherjee@gmail.com>
 -M:    Sudip Mukherjee <sudip@vectorindia.org>
 +M:    Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
  L:    linux-parport@lists.infradead.org (subscribers-only)
  S:    Maintained
  F:    drivers/parport/
@@@ -9400,12 -9239,11 +9409,12 @@@ S:   Maintaine
  F:    drivers/pci/host/*layerscape*
  
  PCI DRIVER FOR IMX6
 -M:    Richard Zhu <Richard.Zhu@freescale.com>
 +M:    Richard Zhu <hongxing.zhu@nxp.com>
  M:    Lucas Stach <l.stach@pengutronix.de>
  L:    linux-pci@vger.kernel.org
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
 +F:    Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
  F:    drivers/pci/host/*imx6*
  
  PCI DRIVER FOR TI KEYSTONE
@@@ -9464,11 -9302,17 +9473,11 @@@ F:   drivers/pci/host/pci-exynos.
  
  PCI DRIVER FOR SYNOPSIS DESIGNWARE
  M:    Jingoo Han <jingoohan1@gmail.com>
 -M:    Pratyush Anand <pratyush.anand@gmail.com>
 -L:    linux-pci@vger.kernel.org
 -S:    Maintained
 -F:    drivers/pci/host/*designware*
 -
 -PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE
 -M:    Joao Pinto <jpinto@synopsys.com>
 +M:    Joao Pinto <Joao.Pinto@synopsys.com>
  L:    linux-pci@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/pci/designware-pcie.txt
 -F:    drivers/pci/host/pcie-designware-plat.c
 +F:    drivers/pci/host/*designware*
  
  PCI DRIVER FOR GENERIC OF HOSTS
  M:    Will Deacon <will.deacon@arm.com>
@@@ -9483,7 -9327,7 +9492,7 @@@ PCI DRIVER FOR INTEL VOLUME MANAGEMENT 
  M:    Keith Busch <keith.busch@intel.com>
  L:    linux-pci@vger.kernel.org
  S:    Supported
 -F:    arch/x86/pci/vmd.c
 +F:    drivers/pci/host/vmd.c
  
  PCIE DRIVER FOR ST SPEAR13XX
  M:    Pratyush Anand <pratyush.anand@gmail.com>
@@@ -9716,8 -9560,8 +9725,8 @@@ F:      arch/mips/boot/dts/pistachio
  F:      arch/mips/configs/pistachio*_defconfig
  
  PKTCDVD DRIVER
 -M:    Jiri Kosina <jikos@kernel.org>
 -S:    Maintained
 +S:    Orphan
 +M:    linux-block@vger.kernel.org
  F:    drivers/block/pktcdvd.c
  F:    include/linux/pktcdvd.h
  F:    include/uapi/linux/pktcdvd.h
@@@ -9770,7 -9614,6 +9779,7 @@@ POWER MANAGEMENT COR
  M:    "Rafael J. Wysocki" <rjw@rjwysocki.net>
  L:    linux-pm@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    drivers/base/power/
  F:    include/linux/pm.h
@@@ -9952,7 -9795,7 +9961,7 @@@ F:      drivers/media/usb/pwc/
  
  PWM FAN DRIVER
  M:    Kamil Debski <kamil@wypas.org>
 -M:    Lukasz Majewski <l.majewski@samsung.com>
 +M:    Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Supported
  F:    Documentation/devicetree/bindings/hwmon/pwm-fan.txt
@@@ -10054,32 -9897,33 +10063,32 @@@ F: Documentation/scsi/LICENSE.qla4xx
  F:    drivers/scsi/qla4xxx/
  
  QLOGIC QLA3XXX NETWORK DRIVER
 -M:    Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
 -M:    Ron Mercer <ron.mercer@qlogic.com>
 -M:    linux-driver@qlogic.com
 +M:    Dept-GELinuxNICDev@cavium.com
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    Documentation/networking/LICENSE.qla3xxx
  F:    drivers/net/ethernet/qlogic/qla3xxx.*
  
  QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
 -M:    Dept-GELinuxNICDev@qlogic.com
 +M:    Harish Patil <harish.patil@cavium.com>
 +M:    Manish Chopra <manish.chopra@cavium.com>
 +M:    Dept-GELinuxNICDev@cavium.com
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/qlogic/qlcnic/
  
  QLOGIC QLGE 10Gb ETHERNET DRIVER
 -M:    Harish Patil <harish.patil@qlogic.com>
 -M:    Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
 -M:    Dept-GELinuxNICDev@qlogic.com
 -M:    linux-driver@qlogic.com
 +M:    Harish Patil <harish.patil@cavium.com>
 +M:    Manish Chopra <manish.chopra@cavium.com>
 +M:    Dept-GELinuxNICDev@cavium.com
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/qlogic/qlge/
  
  QLOGIC QL4xxx ETHERNET DRIVER
 -M:    Yuval Mintz <Yuval.Mintz@qlogic.com>
 -M:    Ariel Elior <Ariel.Elior@qlogic.com>
 -M:    everest-linux-l2@qlogic.com
 +M:    Yuval Mintz <Yuval.Mintz@cavium.com>
 +M:    Ariel Elior <Ariel.Elior@cavium.com>
 +M:    everest-linux-l2@cavium.com
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/qlogic/qed/
@@@ -10094,12 -9938,6 +10103,12 @@@ F:  fs/qnx4
  F:    include/uapi/linux/qnx4_fs.h
  F:    include/uapi/linux/qnxtypes.h
  
 +QORIQ DPAA2 FSL-MC BUS DRIVER
 +M:    Stuart Yoder <stuart.yoder@nxp.com>
 +L:    linux-kernel@vger.kernel.org
 +S:    Maintained
 +F:    drivers/staging/fsl-mc/
 +
  QT1010 MEDIA DRIVER
  M:    Antti Palosaari <crope@iki.fi>
  L:    linux-media@vger.kernel.org
@@@ -10562,7 -10400,7 +10571,7 @@@ F:   arch/s390/pci
  F:    drivers/pci/hotplug/s390_pci_hpc.c
  
  S390 ZCRYPT DRIVER
 -M:    Ingo Tuchscherer <ingo.tuchscherer@de.ibm.com>
 +M:    Harald Freudenberger <freude@de.ibm.com>
  L:    linux-s390@vger.kernel.org
  W:    http://www.ibm.com/developerworks/linux/linux390/
  S:    Supported
@@@ -10729,7 -10567,7 +10738,7 @@@ L:   netdev@vger.kernel.or
  F:    drivers/net/ethernet/samsung/sxgbe/
  
  SAMSUNG THERMAL DRIVER
 -M:    Lukasz Majewski <l.majewski@samsung.com>
 +M:    Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
  L:    linux-pm@vger.kernel.org
  L:    linux-samsung-soc@vger.kernel.org
  S:    Supported
@@@ -10855,11 -10693,6 +10864,11 @@@ W: http://www.sunplus.co
  S:    Supported
  F:    arch/score/
  
 +SCR24X CHIP CARD INTERFACE DRIVER
 +M:    Lubomir Rintel <lkundrak@v3.sk>
 +S:    Supported
 +F:    drivers/char/pcmcia/scr24x_cs.c
 +
  SYSTEM CONTROL & POWER INTERFACE (SCPI) Message Protocol drivers
  M:    Sudeep Holla <sudeep.holla@arm.com>
  L:    linux-arm-kernel@lists.infradead.org
@@@ -11263,7 -11096,7 +11272,7 @@@ F:   include/media/i2c/ov2659.
  SILICON MOTION SM712 FRAME BUFFER DRIVER
  M:    Sudip Mukherjee <sudipm.mukherjee@gmail.com>
  M:    Teddy Wang <teddy.wang@siliconmotion.com>
 -M:    Sudip Mukherjee <sudip@vectorindia.org>
 +M:    Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
  L:    linux-fbdev@vger.kernel.org
  S:    Maintained
  F:    drivers/video/fbdev/sm712*
@@@ -11568,17 -11401,6 +11577,17 @@@ W: http://www.st.com/spea
  S:    Maintained
  F:    drivers/clk/spear/
  
 +SPI NOR SUBSYSTEM
 +M:    Cyrille Pitchen <cyrille.pitchen@atmel.com>
 +M:    Marek Vasut <marek.vasut@gmail.com>
 +L:    linux-mtd@lists.infradead.org
 +W:    http://www.linux-mtd.infradead.org/
 +Q:    http://patchwork.ozlabs.org/project/linux-mtd/list/
 +T:    git git://github.com/spi-nor/linux.git
 +S:    Maintained
 +F:    drivers/mtd/spi-nor/
 +F:    include/linux/mtd/spi-nor.h
 +
  SPI SUBSYSTEM
  M:    Mark Brown <broonie@kernel.org>
  L:    linux-spi@vger.kernel.org
@@@ -11625,7 -11447,7 +11634,7 @@@ STABLE BRANC
  M:    Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  L:    stable@vger.kernel.org
  S:    Supported
 -F:    Documentation/stable_kernel_rules.txt
 +F:    Documentation/process/stable-kernel-rules.rst
  
  STAGING SUBSYSTEM
  M:    Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@@ -11691,11 -11513,17 +11700,11 @@@ F:        drivers/staging/rtl8712
  STAGING - SILICON MOTION SM750 FRAME BUFFER DRIVER
  M:    Sudip Mukherjee <sudipm.mukherjee@gmail.com>
  M:    Teddy Wang <teddy.wang@siliconmotion.com>
 -M:    Sudip Mukherjee <sudip@vectorindia.org>
 +M:    Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
  L:    linux-fbdev@vger.kernel.org
  S:    Maintained
  F:    drivers/staging/sm750fb/
  
 -STAGING - SLICOSS
 -M:    Lior Dotan <liodot@gmail.com>
 -M:    Christopher Harrer <charrer@alacritech.com>
 -S:    Odd Fixes
 -F:    drivers/staging/slicoss/
 -
  STAGING - SPEAKUP CONSOLE SPEECH DRIVER
  M:    William Hubbs <w.d.hubbs@gmail.com>
  M:    Chris Brannon <chris@the-brannons.com>
@@@ -11765,7 -11593,6 +11774,7 @@@ M:   "Rafael J. Wysocki" <rjw@rjwysocki.n
  M:    Len Brown <len.brown@intel.com>
  M:    Pavel Machek <pavel@ucw.cz>
  L:    linux-pm@vger.kernel.org
 +B:    https://bugzilla.kernel.org
  S:    Supported
  F:    Documentation/power/
  F:    arch/x86/kernel/acpi/
@@@ -12495,12 -12322,6 +12504,12 @@@ S: Maintaine
  F:    Documentation/filesystems/udf.txt
  F:    fs/udf/
  
 +UDRAW TABLET
 +M:    Bastien Nocera <hadess@hadess.net>
 +L:    linux-input@vger.kernel.org
 +S:    Maintained
 +F:    drivers/hid/hid-udraw.c
 +
  UFS FILESYSTEM
  M:    Evgeniy Dushistov <dushistov@mail.ru>
  S:    Maintained
@@@ -12557,8 -12378,7 +12566,8 @@@ F:   Documentation/scsi/ufs.tx
  F:    drivers/scsi/ufs/
  
  UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER DWC HOOKS
 -M:    Joao Pinto <Joao.Pinto@synopsys.com>
 +M:    Manjunath M Bettegowda <manjumb@synopsys.com>
 +M:    Prabu Thangamuthu <prabut@synopsys.com>
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    drivers/scsi/ufs/*dwc*
@@@ -12916,15 -12736,6 +12925,15 @@@ F: drivers/vfio
  F:    include/linux/vfio.h
  F:    include/uapi/linux/vfio.h
  
 +VFIO MEDIATED DEVICE DRIVERS
 +M:    Kirti Wankhede <kwankhede@nvidia.com>
 +L:    kvm@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/vfio-mediated-device.txt
 +F:    drivers/vfio/mdev/
 +F:    include/linux/mdev.h
 +F:    samples/vfio-mdev/
 +
  VFIO PLATFORM DRIVER
  M:    Baptiste Reynal <b.reynal@virtualopensystems.com>
  L:    kvm@vger.kernel.org
@@@ -12969,7 -12780,6 +12978,7 @@@ F:   include/uapi/linux/virtio_console.
  
  VIRTIO CORE, NET AND BLOCK DRIVERS
  M:    "Michael S. Tsirkin" <mst@redhat.com>
 +M:    Jason Wang <jasowang@redhat.com>
  L:    virtualization@lists.linux-foundation.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/virtio/
@@@ -13000,7 -12810,6 +13009,7 @@@ F:   include/uapi/linux/virtio_gpu.
  
  VIRTIO HOST (VHOST)
  M:    "Michael S. Tsirkin" <mst@redhat.com>
 +M:    Jason Wang <jasowang@redhat.com>
  L:    kvm@vger.kernel.org
  L:    virtualization@lists.linux-foundation.org
  L:    netdev@vger.kernel.org
@@@ -13077,7 -12886,7 +13086,7 @@@ M:   Greg Kroah-Hartman <gregkh@linuxfoun
  L:    devel@driverdev.osuosl.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
 -F:    Documentation/vme_api.txt
 +F:    Documentation/driver-api/vme.rst
  F:    drivers/staging/vme/
  F:    drivers/vme/
  F:    include/linux/vme*
@@@ -13301,7 -13110,7 +13310,7 @@@ T:   git git://git.kernel.org/pub/scm/lin
  S:    Maintained
  F:    include/linux/workqueue.h
  F:    kernel/workqueue.c
 -F:    Documentation/workqueue.txt
 +F:    Documentation/core-api/workqueue.rst
  
  X-POWERS MULTIFUNCTION PMIC DEVICE DRIVERS
  M:    Chen-Yu Tsai <wens@csie.org>
@@@ -13366,6 -13175,7 +13375,6 @@@ F:   drivers/media/tuners/tuner-xc2028.
  
  XEN HYPERVISOR INTERFACE
  M:    Boris Ostrovsky <boris.ostrovsky@oracle.com>
 -M:    David Vrabel <david.vrabel@citrix.com>
  M:    Juergen Gross <jgross@suse.com>
  L:    xen-devel@lists.xenproject.org (moderated for non-subscribers)
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
                                #address-cells = <0x1>;
                                #size-cells = <0x0>;
                                cell-index = <1>;
 -                              clocks = <&cpm_syscon0 0 3>;
 +                              clocks = <&cpm_syscon0 1 21>;
                                status = "disabled";
                        };
  
                                clocks = <&cpm_syscon0 1 21>;
                                status = "disabled";
                        };
+                       cpm_trng: trng@760000 {
+                               compatible = "marvell,armada-8k-rng", "inside-secure,safexcel-eip76";
+                               reg = <0x760000 0x7d>;
+                               interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+                               clocks = <&cpm_syscon0 1 25>;
+                               status = "okay";
+                       };
                };
  
                cpm_pcie0: pcie@f2600000 {
                                reg = <0x700600 0x50>;
                                #address-cells = <0x1>;
                                #size-cells = <0x0>;
 -                              cell-index = <1>;
 -                              clocks = <&cps_syscon0 0 3>;
 +                              cell-index = <3>;
 +                              clocks = <&cps_syscon0 1 21>;
                                status = "disabled";
                        };
  
                                reg = <0x700680 0x50>;
                                #address-cells = <1>;
                                #size-cells = <0>;
 -                              cell-index = <2>;
 +                              cell-index = <4>;
                                clocks = <&cps_syscon0 1 21>;
                                status = "disabled";
                        };
                                clocks = <&cps_syscon0 1 21>;
                                status = "disabled";
                        };
+                       cps_trng: trng@760000 {
+                               compatible = "marvell,armada-8k-rng", "inside-secure,safexcel-eip76";
+                               reg = <0x760000 0x7d>;
+                               interrupts = <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>;
+                               clocks = <&cps_syscon0 1 25>;
+                               status = "okay";
+                       };
                };
  
                cps_pcie0: pcie@f4600000 {
@@@ -21,7 -21,6 +21,6 @@@
  
  #include <linux/hardirq.h>
  #include <linux/types.h>
- #include <linux/crypto.h>
  #include <linux/module.h>
  #include <linux/err.h>
  #include <crypto/algapi.h>
  #include <crypto/cryptd.h>
  #include <crypto/ctr.h>
  #include <crypto/b128ops.h>
- #include <crypto/lrw.h>
  #include <crypto/xts.h>
  #include <asm/cpu_device_id.h>
  #include <asm/fpu/api.h>
  #include <asm/crypto/aes.h>
- #include <crypto/ablk_helper.h>
  #include <crypto/scatterwalk.h>
  #include <crypto/internal/aead.h>
+ #include <crypto/internal/simd.h>
+ #include <crypto/internal/skcipher.h>
  #include <linux/workqueue.h>
  #include <linux/spinlock.h>
  #ifdef CONFIG_X86_64
  
  
  #define AESNI_ALIGN   16
+ #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
  #define AES_BLOCK_MASK        (~(AES_BLOCK_SIZE - 1))
  #define RFC4106_HASH_SUBKEY_SIZE 16
+ #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
+ #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
+ #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
  
  /* This data is stored at the end of the crypto_tfm struct.
   * It's a type of per "session" data storage location.
   * This needs to be 16 byte aligned.
   */
  struct aesni_rfc4106_gcm_ctx {
-       u8 hash_subkey[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
-       struct crypto_aes_ctx aes_key_expanded
-               __attribute__ ((__aligned__(AESNI_ALIGN)));
+       u8 hash_subkey[16] AESNI_ALIGN_ATTR;
+       struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
        u8 nonce[4];
  };
  
- struct aesni_lrw_ctx {
-       struct lrw_table_ctx lrw_table;
-       u8 raw_aes_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
- };
  struct aesni_xts_ctx {
-       u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
-       u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx) + AESNI_ALIGN - 1];
+       u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
+       u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
  };
  
  asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@@ -360,96 -357,95 +357,95 @@@ static void __aes_decrypt(struct crypto
        aesni_dec(ctx, dst, src);
  }
  
- static int ecb_encrypt(struct blkcipher_desc *desc,
-                      struct scatterlist *dst, struct scatterlist *src,
-                      unsigned int nbytes)
+ static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                                unsigned int len)
+ {
+       return aes_set_key_common(crypto_skcipher_tfm(tfm),
+                                 crypto_skcipher_ctx(tfm), key, len);
+ }
+ static int ecb_encrypt(struct skcipher_request *req)
  {
-       struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
-       struct blkcipher_walk walk;
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+       struct skcipher_walk walk;
+       unsigned int nbytes;
        int err;
  
-       blkcipher_walk_init(&walk, dst, src, nbytes);
-       err = blkcipher_walk_virt(desc, &walk);
-       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       err = skcipher_walk_virt(&walk, req, true);
  
        kernel_fpu_begin();
        while ((nbytes = walk.nbytes)) {
                aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
                              nbytes & AES_BLOCK_MASK);
                nbytes &= AES_BLOCK_SIZE - 1;
-               err = blkcipher_walk_done(desc, &walk, nbytes);
+               err = skcipher_walk_done(&walk, nbytes);
        }
        kernel_fpu_end();
  
        return err;
  }
  
- static int ecb_decrypt(struct blkcipher_desc *desc,
-                      struct scatterlist *dst, struct scatterlist *src,
-                      unsigned int nbytes)
+ static int ecb_decrypt(struct skcipher_request *req)
  {
-       struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
-       struct blkcipher_walk walk;
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+       struct skcipher_walk walk;
+       unsigned int nbytes;
        int err;
  
-       blkcipher_walk_init(&walk, dst, src, nbytes);
-       err = blkcipher_walk_virt(desc, &walk);
-       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       err = skcipher_walk_virt(&walk, req, true);
  
        kernel_fpu_begin();
        while ((nbytes = walk.nbytes)) {
                aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
                              nbytes & AES_BLOCK_MASK);
                nbytes &= AES_BLOCK_SIZE - 1;
-               err = blkcipher_walk_done(desc, &walk, nbytes);
+               err = skcipher_walk_done(&walk, nbytes);
        }
        kernel_fpu_end();
  
        return err;
  }
  
- static int cbc_encrypt(struct blkcipher_desc *desc,
-                      struct scatterlist *dst, struct scatterlist *src,
-                      unsigned int nbytes)
+ static int cbc_encrypt(struct skcipher_request *req)
  {
-       struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
-       struct blkcipher_walk walk;
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+       struct skcipher_walk walk;
+       unsigned int nbytes;
        int err;
  
-       blkcipher_walk_init(&walk, dst, src, nbytes);
-       err = blkcipher_walk_virt(desc, &walk);
-       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       err = skcipher_walk_virt(&walk, req, true);
  
        kernel_fpu_begin();
        while ((nbytes = walk.nbytes)) {
                aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
                              nbytes & AES_BLOCK_MASK, walk.iv);
                nbytes &= AES_BLOCK_SIZE - 1;
-               err = blkcipher_walk_done(desc, &walk, nbytes);
+               err = skcipher_walk_done(&walk, nbytes);
        }
        kernel_fpu_end();
  
        return err;
  }
  
- static int cbc_decrypt(struct blkcipher_desc *desc,
-                      struct scatterlist *dst, struct scatterlist *src,
-                      unsigned int nbytes)
+ static int cbc_decrypt(struct skcipher_request *req)
  {
-       struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
-       struct blkcipher_walk walk;
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+       struct skcipher_walk walk;
+       unsigned int nbytes;
        int err;
  
-       blkcipher_walk_init(&walk, dst, src, nbytes);
-       err = blkcipher_walk_virt(desc, &walk);
-       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       err = skcipher_walk_virt(&walk, req, true);
  
        kernel_fpu_begin();
        while ((nbytes = walk.nbytes)) {
                aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
                              nbytes & AES_BLOCK_MASK, walk.iv);
                nbytes &= AES_BLOCK_SIZE - 1;
-               err = blkcipher_walk_done(desc, &walk, nbytes);
+               err = skcipher_walk_done(&walk, nbytes);
        }
        kernel_fpu_end();
  
  
  #ifdef CONFIG_X86_64
  static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
-                           struct blkcipher_walk *walk)
+                           struct skcipher_walk *walk)
  {
        u8 *ctrblk = walk->iv;
        u8 keystream[AES_BLOCK_SIZE];
@@@ -491,157 -487,53 +487,53 @@@ static void aesni_ctr_enc_avx_tfm(struc
  }
  #endif
  
- static int ctr_crypt(struct blkcipher_desc *desc,
-                    struct scatterlist *dst, struct scatterlist *src,
-                    unsigned int nbytes)
+ static int ctr_crypt(struct skcipher_request *req)
  {
-       struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
-       struct blkcipher_walk walk;
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
+       struct skcipher_walk walk;
+       unsigned int nbytes;
        int err;
  
-       blkcipher_walk_init(&walk, dst, src, nbytes);
-       err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
-       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+       err = skcipher_walk_virt(&walk, req, true);
  
        kernel_fpu_begin();
        while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
                aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
                                      nbytes & AES_BLOCK_MASK, walk.iv);
                nbytes &= AES_BLOCK_SIZE - 1;
-               err = blkcipher_walk_done(desc, &walk, nbytes);
+               err = skcipher_walk_done(&walk, nbytes);
        }
        if (walk.nbytes) {
                ctr_crypt_final(ctx, &walk);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = skcipher_walk_done(&walk, 0);
        }
        kernel_fpu_end();
  
        return err;
  }
- #endif
- static int ablk_ecb_init(struct crypto_tfm *tfm)
- {
-       return ablk_init_common(tfm, "__driver-ecb-aes-aesni");
- }
- static int ablk_cbc_init(struct crypto_tfm *tfm)
- {
-       return ablk_init_common(tfm, "__driver-cbc-aes-aesni");
- }
- #ifdef CONFIG_X86_64
- static int ablk_ctr_init(struct crypto_tfm *tfm)
- {
-       return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
- }
- #endif
- #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
- static int ablk_pcbc_init(struct crypto_tfm *tfm)
- {
-       return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
- }
- #endif
- static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
- {
-       aesni_ecb_enc(ctx, blks, blks, nbytes);
- }
  
- static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes)
- {
-       aesni_ecb_dec(ctx, blks, blks, nbytes);
- }
- static int lrw_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
+ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
                            unsigned int keylen)
  {
-       struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
        int err;
  
-       err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key,
-                                keylen - AES_BLOCK_SIZE);
+       err = xts_verify_key(tfm, key, keylen);
        if (err)
                return err;
  
-       return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE);
- }
- static void lrw_aesni_exit_tfm(struct crypto_tfm *tfm)
- {
-       struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm);
-       lrw_free_table(&ctx->lrw_table);
- }
- static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-                      struct scatterlist *src, unsigned int nbytes)
- {
-       struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-       be128 buf[8];
-       struct lrw_crypt_req req = {
-               .tbuf = buf,
-               .tbuflen = sizeof(buf),
-               .table_ctx = &ctx->lrw_table,
-               .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
-               .crypt_fn = lrw_xts_encrypt_callback,
-       };
-       int ret;
-       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-       kernel_fpu_begin();
-       ret = lrw_crypt(desc, dst, src, nbytes, &req);
-       kernel_fpu_end();
-       return ret;
- }
- static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-                      struct scatterlist *src, unsigned int nbytes)
- {
-       struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-       be128 buf[8];
-       struct lrw_crypt_req req = {
-               .tbuf = buf,
-               .tbuflen = sizeof(buf),
-               .table_ctx = &ctx->lrw_table,
-               .crypt_ctx = aes_ctx(ctx->raw_aes_ctx),
-               .crypt_fn = lrw_xts_decrypt_callback,
-       };
-       int ret;
-       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-       kernel_fpu_begin();
-       ret = lrw_crypt(desc, dst, src, nbytes, &req);
-       kernel_fpu_end();
-       return ret;
- }
- static int xts_aesni_setkey(struct crypto_tfm *tfm, const u8 *key,
-                           unsigned int keylen)
- {
-       struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm);
-       int err;
-       err = xts_check_key(tfm, key, keylen);
-       if (err)
-               return err;
+       keylen /= 2;
  
        /* first half of xts-key is for crypt */
-       err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2);
+       err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
+                                key, keylen);
        if (err)
                return err;
  
        /* second half of xts-key is for tweak */
-       return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2,
-                                 keylen / 2);
+       return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
+                                 key + keylen, keylen);
  }
  
  
@@@ -650,8 -542,6 +542,6 @@@ static void aesni_xts_tweak(void *ctx, 
        aesni_enc(ctx, out, in);
  }
  
- #ifdef CONFIG_X86_64
  static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
  {
        glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
@@@ -698,83 -588,28 +588,28 @@@ static const struct common_glue_ctx aes
        } }
  };
  
- static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-                      struct scatterlist *src, unsigned int nbytes)
+ static int xts_encrypt(struct skcipher_request *req)
  {
-       struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
  
-       return glue_xts_crypt_128bit(&aesni_enc_xts, desc, dst, src, nbytes,
-                                    XTS_TWEAK_CAST(aesni_xts_tweak),
-                                    aes_ctx(ctx->raw_tweak_ctx),
-                                    aes_ctx(ctx->raw_crypt_ctx));
+       return glue_xts_req_128bit(&aesni_enc_xts, req,
+                                  XTS_TWEAK_CAST(aesni_xts_tweak),
+                                  aes_ctx(ctx->raw_tweak_ctx),
+                                  aes_ctx(ctx->raw_crypt_ctx));
  }
  
- static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-                      struct scatterlist *src, unsigned int nbytes)
+ static int xts_decrypt(struct skcipher_request *req)
  {
-       struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-       return glue_xts_crypt_128bit(&aesni_dec_xts, desc, dst, src, nbytes,
-                                    XTS_TWEAK_CAST(aesni_xts_tweak),
-                                    aes_ctx(ctx->raw_tweak_ctx),
-                                    aes_ctx(ctx->raw_crypt_ctx));
- }
- #else
- static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-                      struct scatterlist *src, unsigned int nbytes)
- {
-       struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-       be128 buf[8];
-       struct xts_crypt_req req = {
-               .tbuf = buf,
-               .tbuflen = sizeof(buf),
-               .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
-               .tweak_fn = aesni_xts_tweak,
-               .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
-               .crypt_fn = lrw_xts_encrypt_callback,
-       };
-       int ret;
-       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-       kernel_fpu_begin();
-       ret = xts_crypt(desc, dst, src, nbytes, &req);
-       kernel_fpu_end();
-       return ret;
- }
- static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-                      struct scatterlist *src, unsigned int nbytes)
- {
-       struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
-       be128 buf[8];
-       struct xts_crypt_req req = {
-               .tbuf = buf,
-               .tbuflen = sizeof(buf),
-               .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx),
-               .tweak_fn = aesni_xts_tweak,
-               .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx),
-               .crypt_fn = lrw_xts_decrypt_callback,
-       };
-       int ret;
-       desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-       kernel_fpu_begin();
-       ret = xts_crypt(desc, dst, src, nbytes, &req);
-       kernel_fpu_end();
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
  
-       return ret;
+       return glue_xts_req_128bit(&aesni_dec_xts, req,
+                                  XTS_TWEAK_CAST(aesni_xts_tweak),
+                                  aes_ctx(ctx->raw_tweak_ctx),
+                                  aes_ctx(ctx->raw_crypt_ctx));
  }
  
- #endif
- #ifdef CONFIG_X86_64
  static int rfc4106_init(struct crypto_aead *aead)
  {
        struct cryptd_aead *cryptd_tfm;
@@@ -888,7 -723,7 +723,7 @@@ static int helper_rfc4106_encrypt(struc
        unsigned long auth_tag_len = crypto_aead_authsize(tfm);
        u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
        struct scatter_walk src_sg_walk;
 -      struct scatter_walk dst_sg_walk;
 +      struct scatter_walk dst_sg_walk = {};
        unsigned int i;
  
        /* Assuming we are supporting rfc4106 64-bit extended */
@@@ -968,7 -803,7 +803,7 @@@ static int helper_rfc4106_decrypt(struc
        u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
        u8 authTag[16];
        struct scatter_walk src_sg_walk;
 -      struct scatter_walk dst_sg_walk;
 +      struct scatter_walk dst_sg_walk = {};
        unsigned int i;
  
        if (unlikely(req->assoclen != 16 && req->assoclen != 20))
@@@ -1077,9 -912,7 +912,7 @@@ static struct crypto_alg aesni_algs[] 
        .cra_priority           = 300,
        .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          = AES_BLOCK_SIZE,
-       .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
-                                 AESNI_ALIGN - 1,
-       .cra_alignmask          = 0,
+       .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
        .cra_module             = THIS_MODULE,
        .cra_u  = {
                .cipher = {
                }
        }
  }, {
-       .cra_name               = "__aes-aesni",
-       .cra_driver_name        = "__driver-aes-aesni",
-       .cra_priority           = 0,
+       .cra_name               = "__aes",
+       .cra_driver_name        = "__aes-aesni",
+       .cra_priority           = 300,
        .cra_flags              = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
        .cra_blocksize          = AES_BLOCK_SIZE,
-       .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
-                                 AESNI_ALIGN - 1,
-       .cra_alignmask          = 0,
+       .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
        .cra_module             = THIS_MODULE,
        .cra_u  = {
                .cipher = {
                        .cia_decrypt            = __aes_decrypt
                }
        }
- }, {
-       .cra_name               = "__ecb-aes-aesni",
-       .cra_driver_name        = "__driver-ecb-aes-aesni",
-       .cra_priority           = 0,
-       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
-                                 CRYPTO_ALG_INTERNAL,
-       .cra_blocksize          = AES_BLOCK_SIZE,
-       .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
-                                 AESNI_ALIGN - 1,
-       .cra_alignmask          = 0,
-       .cra_type               = &crypto_blkcipher_type,
-       .cra_module             = THIS_MODULE,
-       .cra_u = {
-               .blkcipher = {
-                       .min_keysize    = AES_MIN_KEY_SIZE,
-                       .max_keysize    = AES_MAX_KEY_SIZE,
-                       .setkey         = aes_set_key,
-                       .encrypt        = ecb_encrypt,
-                       .decrypt        = ecb_decrypt,
-               },
-       },
- }, {
-       .cra_name               = "__cbc-aes-aesni",
-       .cra_driver_name        = "__driver-cbc-aes-aesni",
-       .cra_priority           = 0,
-       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
-                                 CRYPTO_ALG_INTERNAL,
-       .cra_blocksize          = AES_BLOCK_SIZE,
-       .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
-                                 AESNI_ALIGN - 1,
-       .cra_alignmask          = 0,
-       .cra_type               = &crypto_blkcipher_type,
-       .cra_module             = THIS_MODULE,
-       .cra_u = {
-               .blkcipher = {
-                       .min_keysize    = AES_MIN_KEY_SIZE,
-                       .max_keysize    = AES_MAX_KEY_SIZE,
-                       .setkey         = aes_set_key,
-                       .encrypt        = cbc_encrypt,
-                       .decrypt        = cbc_decrypt,
-               },
-       },
- }, {
-       .cra_name               = "ecb(aes)",
-       .cra_driver_name        = "ecb-aes-aesni",
-       .cra_priority           = 400,
-       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-       .cra_blocksize          = AES_BLOCK_SIZE,
-       .cra_ctxsize            = sizeof(struct async_helper_ctx),
-       .cra_alignmask          = 0,
-       .cra_type               = &crypto_ablkcipher_type,
-       .cra_module             = THIS_MODULE,
-       .cra_init               = ablk_ecb_init,
-       .cra_exit               = ablk_exit,
-       .cra_u = {
-               .ablkcipher = {
-                       .min_keysize    = AES_MIN_KEY_SIZE,
-                       .max_keysize    = AES_MAX_KEY_SIZE,
-                       .setkey         = ablk_set_key,
-                       .encrypt        = ablk_encrypt,
-                       .decrypt        = ablk_decrypt,
+ } };
+ static struct skcipher_alg aesni_skciphers[] = {
+       {
+               .base = {
+                       .cra_name               = "__ecb(aes)",
+                       .cra_driver_name        = "__ecb-aes-aesni",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = AES_BLOCK_SIZE,
+                       .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
+                       .cra_module             = THIS_MODULE,
                },
-       },
- }, {
-       .cra_name               = "cbc(aes)",
-       .cra_driver_name        = "cbc-aes-aesni",
-       .cra_priority           = 400,
-       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-       .cra_blocksize          = AES_BLOCK_SIZE,
-       .cra_ctxsize            = sizeof(struct async_helper_ctx),
-       .cra_alignmask          = 0,
-       .cra_type               = &crypto_ablkcipher_type,
-       .cra_module             = THIS_MODULE,
-       .cra_init               = ablk_cbc_init,
-       .cra_exit               = ablk_exit,
-       .cra_u = {
-               .ablkcipher = {
-                       .min_keysize    = AES_MIN_KEY_SIZE,
-                       .max_keysize    = AES_MAX_KEY_SIZE,
-                       .ivsize         = AES_BLOCK_SIZE,
-                       .setkey         = ablk_set_key,
-                       .encrypt        = ablk_encrypt,
-                       .decrypt        = ablk_decrypt,
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .setkey         = aesni_skcipher_setkey,
+               .encrypt        = ecb_encrypt,
+               .decrypt        = ecb_decrypt,
+       }, {
+               .base = {
+                       .cra_name               = "__cbc(aes)",
+                       .cra_driver_name        = "__cbc-aes-aesni",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = AES_BLOCK_SIZE,
+                       .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
+                       .cra_module             = THIS_MODULE,
                },
-       },
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = aesni_skcipher_setkey,
+               .encrypt        = cbc_encrypt,
+               .decrypt        = cbc_decrypt,
  #ifdef CONFIG_X86_64
- }, {
-       .cra_name               = "__ctr-aes-aesni",
-       .cra_driver_name        = "__driver-ctr-aes-aesni",
-       .cra_priority           = 0,
-       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
-                                 CRYPTO_ALG_INTERNAL,
-       .cra_blocksize          = 1,
-       .cra_ctxsize            = sizeof(struct crypto_aes_ctx) +
-                                 AESNI_ALIGN - 1,
-       .cra_alignmask          = 0,
-       .cra_type               = &crypto_blkcipher_type,
-       .cra_module             = THIS_MODULE,
-       .cra_u = {
-               .blkcipher = {
-                       .min_keysize    = AES_MIN_KEY_SIZE,
-                       .max_keysize    = AES_MAX_KEY_SIZE,
-                       .ivsize         = AES_BLOCK_SIZE,
-                       .setkey         = aes_set_key,
-                       .encrypt        = ctr_crypt,
-                       .decrypt        = ctr_crypt,
+       }, {
+               .base = {
+                       .cra_name               = "__ctr(aes)",
+                       .cra_driver_name        = "__ctr-aes-aesni",
+                       .cra_priority           = 400,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
+                       .cra_module             = THIS_MODULE,
                },
-       },
- }, {
-       .cra_name               = "ctr(aes)",
-       .cra_driver_name        = "ctr-aes-aesni",
-       .cra_priority           = 400,
-       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-       .cra_blocksize          = 1,
-       .cra_ctxsize            = sizeof(struct async_helper_ctx),
-       .cra_alignmask          = 0,
-       .cra_type               = &crypto_ablkcipher_type,
-       .cra_module             = THIS_MODULE,
-       .cra_init               = ablk_ctr_init,
-       .cra_exit               = ablk_exit,
-       .cra_u = {
-               .ablkcipher = {
-                       .min_keysize    = AES_MIN_KEY_SIZE,
-                       .max_keysize    = AES_MAX_KEY_SIZE,
-                       .ivsize         = AES_BLOCK_SIZE,
-                       .setkey         = ablk_set_key,
-                       .encrypt        = ablk_encrypt,
-                       .decrypt        = ablk_encrypt,
-                       .geniv          = "chainiv",
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .chunksize      = AES_BLOCK_SIZE,
+               .setkey         = aesni_skcipher_setkey,
+               .encrypt        = ctr_crypt,
+               .decrypt        = ctr_crypt,
+       }, {
+               .base = {
+                       .cra_name               = "__xts(aes)",
+                       .cra_driver_name        = "__xts-aes-aesni",
+                       .cra_priority           = 401,
+                       .cra_flags              = CRYPTO_ALG_INTERNAL,
+                       .cra_blocksize          = AES_BLOCK_SIZE,
+                       .cra_ctxsize            = XTS_AES_CTX_SIZE,
+                       .cra_module             = THIS_MODULE,
                },
-       },
+               .min_keysize    = 2 * AES_MIN_KEY_SIZE,
+               .max_keysize    = 2 * AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = xts_aesni_setkey,
+               .encrypt        = xts_encrypt,
+               .decrypt        = xts_decrypt,
  #endif
+       }
+ };
+ struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
+ struct {
+       const char *algname;
+       const char *drvname;
+       const char *basename;
+       struct simd_skcipher_alg *simd;
+ } aesni_simd_skciphers2[] = {
  #if IS_ENABLED(CONFIG_CRYPTO_PCBC)
- }, {
-       .cra_name               = "pcbc(aes)",
-       .cra_driver_name        = "pcbc-aes-aesni",
-       .cra_priority           = 400,
-       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-       .cra_blocksize          = AES_BLOCK_SIZE,
-       .cra_ctxsize            = sizeof(struct async_helper_ctx),
-       .cra_alignmask          = 0,
-       .cra_type               = &crypto_ablkcipher_type,
-       .cra_module             = THIS_MODULE,
-       .cra_init               = ablk_pcbc_init,
-       .cra_exit               = ablk_exit,
-       .cra_u = {
-               .ablkcipher = {
-                       .min_keysize    = AES_MIN_KEY_SIZE,
-                       .max_keysize    = AES_MAX_KEY_SIZE,
-                       .ivsize         = AES_BLOCK_SIZE,
-                       .setkey         = ablk_set_key,
-                       .encrypt        = ablk_encrypt,
-                       .decrypt        = ablk_decrypt,
-               },
+       {
+               .algname        = "pcbc(aes)",
+               .drvname        = "pcbc-aes-aesni",
+               .basename       = "fpu(pcbc(__aes-aesni))",
        },
  #endif
- }, {
-       .cra_name               = "__lrw-aes-aesni",
-       .cra_driver_name        = "__driver-lrw-aes-aesni",
-       .cra_priority           = 0,
-       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
-                                 CRYPTO_ALG_INTERNAL,
-       .cra_blocksize          = AES_BLOCK_SIZE,
-       .cra_ctxsize            = sizeof(struct aesni_lrw_ctx),
-       .cra_alignmask          = 0,
-       .cra_type               = &crypto_blkcipher_type,
-       .cra_module             = THIS_MODULE,
-       .cra_exit               = lrw_aesni_exit_tfm,
-       .cra_u = {
-               .blkcipher = {
-                       .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
-                       .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
-                       .ivsize         = AES_BLOCK_SIZE,
-                       .setkey         = lrw_aesni_setkey,
-                       .encrypt        = lrw_encrypt,
-                       .decrypt        = lrw_decrypt,
-               },
-       },
- }, {
-       .cra_name               = "__xts-aes-aesni",
-       .cra_driver_name        = "__driver-xts-aes-aesni",
-       .cra_priority           = 0,
-       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
-                                 CRYPTO_ALG_INTERNAL,
-       .cra_blocksize          = AES_BLOCK_SIZE,
-       .cra_ctxsize            = sizeof(struct aesni_xts_ctx),
-       .cra_alignmask          = 0,
-       .cra_type               = &crypto_blkcipher_type,
-       .cra_module             = THIS_MODULE,
-       .cra_u = {
-               .blkcipher = {
-                       .min_keysize    = 2 * AES_MIN_KEY_SIZE,
-                       .max_keysize    = 2 * AES_MAX_KEY_SIZE,
-                       .ivsize         = AES_BLOCK_SIZE,
-                       .setkey         = xts_aesni_setkey,
-                       .encrypt        = xts_encrypt,
-                       .decrypt        = xts_decrypt,
-               },
-       },
- }, {
-       .cra_name               = "lrw(aes)",
-       .cra_driver_name        = "lrw-aes-aesni",
-       .cra_priority           = 400,
-       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-       .cra_blocksize          = AES_BLOCK_SIZE,
-       .cra_ctxsize            = sizeof(struct async_helper_ctx),
-       .cra_alignmask          = 0,
-       .cra_type               = &crypto_ablkcipher_type,
-       .cra_module             = THIS_MODULE,
-       .cra_init               = ablk_init,
-       .cra_exit               = ablk_exit,
-       .cra_u = {
-               .ablkcipher = {
-                       .min_keysize    = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
-                       .max_keysize    = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
-                       .ivsize         = AES_BLOCK_SIZE,
-                       .setkey         = ablk_set_key,
-                       .encrypt        = ablk_encrypt,
-                       .decrypt        = ablk_decrypt,
-               },
-       },
- }, {
-       .cra_name               = "xts(aes)",
-       .cra_driver_name        = "xts-aes-aesni",
-       .cra_priority           = 400,
-       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-       .cra_blocksize          = AES_BLOCK_SIZE,
-       .cra_ctxsize            = sizeof(struct async_helper_ctx),
-       .cra_alignmask          = 0,
-       .cra_type               = &crypto_ablkcipher_type,
-       .cra_module             = THIS_MODULE,
-       .cra_init               = ablk_init,
-       .cra_exit               = ablk_exit,
-       .cra_u = {
-               .ablkcipher = {
-                       .min_keysize    = 2 * AES_MIN_KEY_SIZE,
-                       .max_keysize    = 2 * AES_MAX_KEY_SIZE,
-                       .ivsize         = AES_BLOCK_SIZE,
-                       .setkey         = ablk_set_key,
-                       .encrypt        = ablk_encrypt,
-                       .decrypt        = ablk_decrypt,
-               },
-       },
- } };
+ };
  
  #ifdef CONFIG_X86_64
  static struct aead_alg aesni_aead_algs[] = { {
@@@ -1401,9 -1076,27 +1076,27 @@@ static const struct x86_cpu_id aesni_cp
  };
  MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
  
+ static void aesni_free_simds(void)
+ {
+       int i;
+       for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
+                   aesni_simd_skciphers[i]; i++)
+               simd_skcipher_free(aesni_simd_skciphers[i]);
+       for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) &&
+                   aesni_simd_skciphers2[i].simd; i++)
+               simd_skcipher_free(aesni_simd_skciphers2[i].simd);
+ }
  static int __init aesni_init(void)
  {
+       struct simd_skcipher_alg *simd;
+       const char *basename;
+       const char *algname;
+       const char *drvname;
        int err;
+       int i;
  
        if (!x86_match_cpu(aesni_cpu_id))
                return -ENODEV;
        if (err)
                goto fpu_exit;
  
+       err = crypto_register_skciphers(aesni_skciphers,
+                                       ARRAY_SIZE(aesni_skciphers));
+       if (err)
+               goto unregister_algs;
        err = crypto_register_aeads(aesni_aead_algs,
                                    ARRAY_SIZE(aesni_aead_algs));
        if (err)
-               goto unregister_algs;
+               goto unregister_skciphers;
+       for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
+               algname = aesni_skciphers[i].base.cra_name + 2;
+               drvname = aesni_skciphers[i].base.cra_driver_name + 2;
+               basename = aesni_skciphers[i].base.cra_driver_name;
+               simd = simd_skcipher_create_compat(algname, drvname, basename);
+               err = PTR_ERR(simd);
+               if (IS_ERR(simd))
+                       goto unregister_simds;
+               aesni_simd_skciphers[i] = simd;
+       }
  
-       return err;
+       for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) {
+               algname = aesni_simd_skciphers2[i].algname;
+               drvname = aesni_simd_skciphers2[i].drvname;
+               basename = aesni_simd_skciphers2[i].basename;
+               simd = simd_skcipher_create_compat(algname, drvname, basename);
+               err = PTR_ERR(simd);
+               if (IS_ERR(simd))
+                       goto unregister_simds;
  
+               aesni_simd_skciphers2[i].simd = simd;
+       }
+       return 0;
+ unregister_simds:
+       aesni_free_simds();
+       crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
+ unregister_skciphers:
+       crypto_unregister_skciphers(aesni_skciphers,
+                                   ARRAY_SIZE(aesni_skciphers));
  unregister_algs:
        crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
  fpu_exit:
  
  static void __exit aesni_exit(void)
  {
+       aesni_free_simds();
        crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
+       crypto_unregister_skciphers(aesni_skciphers,
+                                   ARRAY_SIZE(aesni_skciphers));
        crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
  
        crypto_fpu_exit();
diff --combined crypto/algif_aead.c
@@@ -81,11 -81,7 +81,11 @@@ static inline bool aead_sufficient_data
  {
        unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req));
  
 -      return ctx->used >= ctx->aead_assoclen + as;
 +      /*
 +       * The minimum amount of memory needed for an AEAD cipher is
 +       * the AAD and in case of decryption the tag.
 +       */
 +      return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
  }
  
  static void aead_reset_ctx(struct aead_ctx *ctx)
@@@ -136,27 -132,28 +136,27 @@@ static void aead_wmem_wakeup(struct soc
  
  static int aead_wait_for_data(struct sock *sk, unsigned flags)
  {
 +      DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct alg_sock *ask = alg_sk(sk);
        struct aead_ctx *ctx = ask->private;
        long timeout;
 -      DEFINE_WAIT(wait);
        int err = -ERESTARTSYS;
  
        if (flags & MSG_DONTWAIT)
                return -EAGAIN;
  
        sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
 -
 +      add_wait_queue(sk_sleep(sk), &wait);
        for (;;) {
                if (signal_pending(current))
                        break;
 -              prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
                timeout = MAX_SCHEDULE_TIMEOUT;
 -              if (sk_wait_event(sk, &timeout, !ctx->more)) {
 +              if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) {
                        err = 0;
                        break;
                }
        }
 -      finish_wait(sk_sleep(sk), &wait);
 +      remove_wait_queue(sk_sleep(sk), &wait);
  
        sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  
@@@ -419,7 -416,7 +419,7 @@@ static int aead_recvmsg_async(struct so
        unsigned int i, reqlen = GET_REQ_SIZE(tfm);
        int err = -ENOMEM;
        unsigned long used;
 -      size_t outlen;
 +      size_t outlen = 0;
        size_t usedpages = 0;
  
        lock_sock(sk);
                        goto unlock;
        }
  
 -      used = ctx->used;
 -      outlen = used;
 -
        if (!aead_sufficient_data(ctx))
                goto unlock;
  
 +      used = ctx->used;
 +      if (ctx->enc)
 +              outlen = used + as;
 +      else
 +              outlen = used - as;
 +
        req = sock_kmalloc(sk, reqlen, GFP_KERNEL);
        if (unlikely(!req))
                goto unlock;
        aead_request_set_ad(req, ctx->aead_assoclen);
        aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                  aead_async_cb, sk);
 -      used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
 +      used -= ctx->aead_assoclen;
  
        /* take over all tx sgls from ctx */
-       areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * sgl->cur,
+       areq->tsgl = sock_kmalloc(sk,
+                                 sizeof(*areq->tsgl) * max_t(u32, sgl->cur, 1),
                                  GFP_KERNEL);
        if (unlikely(!areq->tsgl))
                goto free;
  
-       sg_init_table(areq->tsgl, sgl->cur);
+       sg_init_table(areq->tsgl, max_t(u32, sgl->cur, 1));
        for (i = 0; i < sgl->cur; i++)
                sg_set_page(&areq->tsgl[i], sg_page(&sgl->sg[i]),
                            sgl->sg[i].length, sgl->sg[i].offset);
        areq->tsgls = sgl->cur;
  
        /* create rx sgls */
 -      while (iov_iter_count(&msg->msg_iter)) {
 +      while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
                size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
                                      (outlen - usedpages));
  
  
                last_rsgl = rsgl;
  
 -              /* we do not need more iovecs as we have sufficient memory */
 -              if (outlen <= usedpages)
 -                      break;
 -
                iov_iter_advance(&msg->msg_iter, err);
        }
 -      err = -EINVAL;
 +
        /* ensure output buffer is sufficiently large */
 -      if (usedpages < outlen)
 -              goto free;
 +      if (usedpages < outlen) {
 +              err = -EINVAL;
 +              goto unlock;
 +      }
  
        aead_request_set_crypt(req, areq->tsgl, areq->first_rsgl.sgl.sg, used,
                               areq->iv);
@@@ -575,7 -572,6 +576,7 @@@ static int aead_recvmsg_sync(struct soc
                        goto unlock;
        }
  
 +      /* data length provided by caller via sendmsg/sendpage */
        used = ctx->used;
  
        /*
        if (!aead_sufficient_data(ctx))
                goto unlock;
  
 -      outlen = used;
 +      /*
 +       * Calculate the minimum output buffer size holding the result of the
 +       * cipher operation. When encrypting data, the receiving buffer is
 +       * larger by the tag length compared to the input buffer as the
 +       * encryption operation generates the tag. For decryption, the input
 +       * buffer provides the tag which is consumed resulting in only the
 +       * plaintext without a buffer for the tag returned to the caller.
 +       */
 +      if (ctx->enc)
 +              outlen = used + as;
 +      else
 +              outlen = used - as;
  
        /*
         * The cipher operation input data is reduced by the associated data
         * length as this data is processed separately later on.
         */
 -      used -= ctx->aead_assoclen + (ctx->enc ? as : 0);
 +      used -= ctx->aead_assoclen;
  
        /* convert iovecs of output buffers into scatterlists */
 -      while (iov_iter_count(&msg->msg_iter)) {
 +      while (outlen > usedpages && iov_iter_count(&msg->msg_iter)) {
                size_t seglen = min_t(size_t, iov_iter_count(&msg->msg_iter),
                                      (outlen - usedpages));
  
  
                last_rsgl = rsgl;
  
 -              /* we do not need more iovecs as we have sufficient memory */
 -              if (outlen <= usedpages)
 -                      break;
                iov_iter_advance(&msg->msg_iter, err);
        }
  
 -      err = -EINVAL;
        /* ensure output buffer is sufficiently large */
 -      if (usedpages < outlen)
 +      if (usedpages < outlen) {
 +              err = -EINVAL;
                goto unlock;
 +      }
  
        sg_mark_end(sgl->sg + sgl->cur - 1);
        aead_request_set_crypt(&ctx->aead_req, sgl->sg, ctx->first_rsgl.sgl.sg,
diff --combined crypto/algif_skcipher.c
@@@ -199,26 -199,26 +199,26 @@@ static void skcipher_free_sgl(struct so
  
  static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
  {
 -      long timeout;
 -      DEFINE_WAIT(wait);
 +      DEFINE_WAIT_FUNC(wait, woken_wake_function);
        int err = -ERESTARTSYS;
 +      long timeout;
  
        if (flags & MSG_DONTWAIT)
                return -EAGAIN;
  
        sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  
 +      add_wait_queue(sk_sleep(sk), &wait);
        for (;;) {
                if (signal_pending(current))
                        break;
 -              prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
                timeout = MAX_SCHEDULE_TIMEOUT;
 -              if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
 +              if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) {
                        err = 0;
                        break;
                }
        }
 -      finish_wait(sk_sleep(sk), &wait);
 +      remove_wait_queue(sk_sleep(sk), &wait);
  
        return err;
  }
@@@ -242,10 -242,10 +242,10 @@@ static void skcipher_wmem_wakeup(struc
  
  static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
  {
 +      DEFINE_WAIT_FUNC(wait, woken_wake_function);
        struct alg_sock *ask = alg_sk(sk);
        struct skcipher_ctx *ctx = ask->private;
        long timeout;
 -      DEFINE_WAIT(wait);
        int err = -ERESTARTSYS;
  
        if (flags & MSG_DONTWAIT) {
  
        sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  
 +      add_wait_queue(sk_sleep(sk), &wait);
        for (;;) {
                if (signal_pending(current))
                        break;
 -              prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
                timeout = MAX_SCHEDULE_TIMEOUT;
 -              if (sk_wait_event(sk, &timeout, ctx->used)) {
 +              if (sk_wait_event(sk, &timeout, ctx->used, &wait)) {
                        err = 0;
                        break;
                }
        }
 -      finish_wait(sk_sleep(sk), &wait);
 +      remove_wait_queue(sk_sleep(sk), &wait);
  
        sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
  
@@@ -566,8 -566,10 +566,10 @@@ static int skcipher_recvmsg_async(struc
                         * need to expand */
                        tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
                                      GFP_KERNEL);
-                       if (!tmp)
+                       if (!tmp) {
+                               err = -ENOMEM;
                                goto free;
+                       }
  
                        sg_init_table(tmp, tx_nents * 2);
                        for (x = 0; x < tx_nents; x++)
@@@ -330,8 -330,8 +330,8 @@@ static int caam_remove(struct platform_
        clk_disable_unprepare(ctrlpriv->caam_ipg);
        clk_disable_unprepare(ctrlpriv->caam_mem);
        clk_disable_unprepare(ctrlpriv->caam_aclk);
-       clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+       if (ctrlpriv->caam_emi_slow)
+               clk_disable_unprepare(ctrlpriv->caam_emi_slow);
        return 0;
  }
  
@@@ -365,11 -365,8 +365,8 @@@ static void kick_trng(struct platform_d
         */
        val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
              >> RTSDCTL_ENT_DLY_SHIFT;
-       if (ent_delay <= val) {
-               /* put RNG4 into run mode */
-               clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, 0);
-               return;
-       }
+       if (ent_delay <= val)
+               goto start_rng;
  
        val = rd_reg32(&r4tst->rtsdctl);
        val = (val & ~RTSDCTL_ENT_DLY_MASK) |
        wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
        /* read the control register */
        val = rd_reg32(&r4tst->rtmctl);
+ start_rng:
        /*
         * select raw sampling in both entropy shifter
-        * and statistical checker
+        * and statistical checker; ; put RNG4 into run mode
         */
-       clrsetbits_32(&val, 0, RTMCTL_SAMP_MODE_RAW_ES_SC);
-       /* put RNG4 into run mode */
-       clrsetbits_32(&val, RTMCTL_PRGM, 0);
-       /* write back the control register */
-       wr_reg32(&r4tst->rtmctl, val);
+       clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM, RTMCTL_SAMP_MODE_RAW_ES_SC);
  }
  
  /**
@@@ -482,14 -476,16 +476,16 @@@ static int caam_probe(struct platform_d
        }
        ctrlpriv->caam_aclk = clk;
  
-       clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
-       if (IS_ERR(clk)) {
-               ret = PTR_ERR(clk);
-               dev_err(&pdev->dev,
-                       "can't identify CAAM emi_slow clk: %d\n", ret);
-               return ret;
+       if (!of_machine_is_compatible("fsl,imx6ul")) {
+               clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
+               if (IS_ERR(clk)) {
+                       ret = PTR_ERR(clk);
+                       dev_err(&pdev->dev,
+                               "can't identify CAAM emi_slow clk: %d\n", ret);
+                       return ret;
+               }
+               ctrlpriv->caam_emi_slow = clk;
        }
-       ctrlpriv->caam_emi_slow = clk;
  
        ret = clk_prepare_enable(ctrlpriv->caam_ipg);
        if (ret < 0) {
                goto disable_caam_mem;
        }
  
-       ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
-                       ret);
-               goto disable_caam_aclk;
+       if (ctrlpriv->caam_emi_slow) {
+               ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
+                               ret);
+                       goto disable_caam_aclk;
+               }
        }
  
        /* Get configuration properties from device tree */
        else
                BLOCK_OFFSET = PG_SIZE_64K;
  
-       ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
-       ctrlpriv->assure = (struct caam_assurance __force *)
-                          ((uint8_t *)ctrl +
+       ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
+       ctrlpriv->assure = (struct caam_assurance __iomem __force *)
+                          ((__force uint8_t *)ctrl +
                            BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
                           );
-       ctrlpriv->deco = (struct caam_deco __force *)
-                        ((uint8_t *)ctrl +
+       ctrlpriv->deco = (struct caam_deco __iomem __force *)
+                        ((__force uint8_t *)ctrl +
                         BLOCK_OFFSET * DECO_BLOCK_NUMBER
                         );
  
         * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
         * long pointers in master configuration register
         */
 -      clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
 -                    MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST |
 +      clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
 +                    MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
 +                    MCFGR_WDENABLE | MCFGR_LARGE_BURST |
                      (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
  
        /*
                                        ring);
                                continue;
                        }
-                       ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
-                                            ((uint8_t *)ctrl +
+                       ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+                                            ((__force uint8_t *)ctrl +
                                             (ring + JR_BLOCK_NUMBER) *
                                              BLOCK_OFFSET
                                             );
                        !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
                           CTPR_MS_QI_MASK);
        if (ctrlpriv->qi_present) {
-               ctrlpriv->qi = (struct caam_queue_if __force *)
-                              ((uint8_t *)ctrl +
+               ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
+                              ((__force uint8_t *)ctrl +
                                 BLOCK_OFFSET * QI_BLOCK_NUMBER
                               );
                /* This is all that's required to physically enable QI */
                                    &caam_fops_u32_ro);
  
        /* Internal covering keys (useful in non-secure mode only) */
-       ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
+       ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0];
        ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
        ctrlpriv->ctl_kek = debugfs_create_blob("kek",
                                                S_IRUSR |
                                                ctrlpriv->ctl,
                                                &ctrlpriv->ctl_kek_wrap);
  
-       ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
+       ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0];
        ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
        ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
                                                 S_IRUSR |
                                                 ctrlpriv->ctl,
                                                 &ctrlpriv->ctl_tkek_wrap);
  
-       ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
+       ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0];
        ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
        ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
                                                 S_IRUSR |
@@@ -833,7 -830,8 +831,8 @@@ caam_remove
  iounmap_ctrl:
        iounmap(ctrl);
  disable_caam_emi_slow:
-       clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+       if (ctrlpriv->caam_emi_slow)
+               clk_disable_unprepare(ctrlpriv->caam_emi_slow);
  disable_caam_aclk:
        clk_disable_unprepare(ctrlpriv->caam_aclk);
  disable_caam_mem:
  #include <crypto/algapi.h>
  #include <crypto/hash.h>
  #include <crypto/sha.h>
+ #include <crypto/authenc.h>
+ #include <crypto/internal/aead.h>
+ #include <crypto/null.h>
+ #include <crypto/internal/skcipher.h>
+ #include <crypto/aead.h>
+ #include <crypto/scatterwalk.h>
  #include <crypto/internal/hash.h>
  
  #include "t4fw_api.h"
  #include "chcr_algo.h"
  #include "chcr_crypto.h"
  
+ static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
+ {
+       return ctx->crypto_ctx->aeadctx;
+ }
  static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
  {
        return ctx->crypto_ctx->ablkctx;
@@@ -72,6 -83,16 +83,16 @@@ static inline struct hmac_ctx *HMAC_CTX
        return ctx->crypto_ctx->hmacctx;
  }
  
+ static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
+ {
+       return gctx->ctx->gcm;
+ }
+ static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
+ {
+       return gctx->ctx->authenc;
+ }
  static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
  {
        return ctx->dev->u_ctx;
@@@ -94,12 -115,37 +115,37 @@@ static inline unsigned int sgl_len(unsi
        return (3 * n) / 2 + (n & 1) + 2;
  }
  
+ static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
+ {
+       u8 temp[SHA512_DIGEST_SIZE];
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       int authsize = crypto_aead_authsize(tfm);
+       struct cpl_fw6_pld *fw6_pld;
+       int cmp = 0;
+       fw6_pld = (struct cpl_fw6_pld *)input;
+       if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
+           (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
+               cmp = memcmp(&fw6_pld->data[2], (fw6_pld + 1), authsize);
+       } else {
+               sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
+                               authsize, req->assoclen +
+                               req->cryptlen - authsize);
+               cmp = memcmp(temp, (fw6_pld + 1), authsize);
+       }
+       if (cmp)
+               *err = -EBADMSG;
+       else
+               *err = 0;
+ }
  /*
   *    chcr_handle_resp - Unmap the DMA buffers associated with the request
   *    @req: crypto request
   */
  int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
-                    int error_status)
+                        int err)
  {
        struct crypto_tfm *tfm = req->tfm;
        struct chcr_context *ctx = crypto_tfm_ctx(tfm);
        unsigned int digestsize, updated_digestsize;
  
        switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+       case CRYPTO_ALG_TYPE_AEAD:
+               ctx_req.req.aead_req = (struct aead_request *)req;
+               ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
+               dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
+                            ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
+               if (ctx_req.ctx.reqctx->skb) {
+                       kfree_skb(ctx_req.ctx.reqctx->skb);
+                       ctx_req.ctx.reqctx->skb = NULL;
+               }
+               if (ctx_req.ctx.reqctx->verify == VERIFY_SW) {
+                       chcr_verify_tag(ctx_req.req.aead_req, input,
+                                       &err);
+                       ctx_req.ctx.reqctx->verify = VERIFY_HW;
+               }
+               break;
        case CRYPTO_ALG_TYPE_BLKCIPHER:
                ctx_req.req.ablk_req = (struct ablkcipher_request *)req;
                ctx_req.ctx.ablk_ctx =
                        ablkcipher_request_ctx(ctx_req.req.ablk_req);
-               if (!error_status) {
+               if (!err) {
                        fw6_pld = (struct cpl_fw6_pld *)input;
                        memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2],
                               AES_BLOCK_SIZE);
                }
                dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst,
-                            ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE);
+                            ctx_req.ctx.ablk_ctx->dst_nents, DMA_FROM_DEVICE);
                if (ctx_req.ctx.ablk_ctx->skb) {
                        kfree_skb(ctx_req.ctx.ablk_ctx->skb);
                        ctx_req.ctx.ablk_ctx->skb = NULL;
                        updated_digestsize = SHA256_DIGEST_SIZE;
                else if (digestsize == SHA384_DIGEST_SIZE)
                        updated_digestsize = SHA512_DIGEST_SIZE;
-               if (ctx_req.ctx.ahash_ctx->skb)
+               if (ctx_req.ctx.ahash_ctx->skb) {
+                       kfree_skb(ctx_req.ctx.ahash_ctx->skb);
                        ctx_req.ctx.ahash_ctx->skb = NULL;
+               }
                if (ctx_req.ctx.ahash_ctx->result == 1) {
                        ctx_req.ctx.ahash_ctx->result = 0;
                        memcpy(ctx_req.req.ahash_req->result, input +
                               sizeof(struct cpl_fw6_pld),
                               updated_digestsize);
                }
-               kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr);
-               ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL;
                break;
        }
-       return 0;
+       return err;
  }
  
  /*
@@@ -178,40 -240,81 +240,81 @@@ static inline unsigned int calc_tx_flit
        return flits + sgl_len(cnt);
  }
  
- static struct shash_desc *chcr_alloc_shash(unsigned int ds)
+ static inline void get_aes_decrypt_key(unsigned char *dec_key,
+                                      const unsigned char *key,
+                                      unsigned int keylength)
+ {
+       u32 temp;
+       u32 w_ring[MAX_NK];
+       int i, j, k;
+       u8  nr, nk;
+       switch (keylength) {
+       case AES_KEYLENGTH_128BIT:
+               nk = KEYLENGTH_4BYTES;
+               nr = NUMBER_OF_ROUNDS_10;
+               break;
+       case AES_KEYLENGTH_192BIT:
+               nk = KEYLENGTH_6BYTES;
+               nr = NUMBER_OF_ROUNDS_12;
+               break;
+       case AES_KEYLENGTH_256BIT:
+               nk = KEYLENGTH_8BYTES;
+               nr = NUMBER_OF_ROUNDS_14;
+               break;
+       default:
+               return;
+       }
+       for (i = 0; i < nk; i++)
+               w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
+       i = 0;
+       temp = w_ring[nk - 1];
+       while (i + nk < (nr + 1) * 4) {
+               if (!(i % nk)) {
+                       /* RotWord(temp) */
+                       temp = (temp << 8) | (temp >> 24);
+                       temp = aes_ks_subword(temp);
+                       temp ^= round_constant[i / nk];
+               } else if (nk == 8 && (i % 4 == 0)) {
+                       temp = aes_ks_subword(temp);
+               }
+               w_ring[i % nk] ^= temp;
+               temp = w_ring[i % nk];
+               i++;
+       }
+       i--;
+       for (k = 0, j = i % nk; k < nk; k++) {
+               *((u32 *)dec_key + k) = htonl(w_ring[j]);
+               j--;
+               if (j < 0)
+                       j += nk;
+       }
+ }
+ static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
  {
        struct crypto_shash *base_hash = NULL;
-       struct shash_desc *desc;
  
        switch (ds) {
        case SHA1_DIGEST_SIZE:
-               base_hash = crypto_alloc_shash("sha1-generic", 0, 0);
+               base_hash = crypto_alloc_shash("sha1", 0, 0);
                break;
        case SHA224_DIGEST_SIZE:
-               base_hash = crypto_alloc_shash("sha224-generic", 0, 0);
+               base_hash = crypto_alloc_shash("sha224", 0, 0);
                break;
        case SHA256_DIGEST_SIZE:
-               base_hash = crypto_alloc_shash("sha256-generic", 0, 0);
+               base_hash = crypto_alloc_shash("sha256", 0, 0);
                break;
        case SHA384_DIGEST_SIZE:
-               base_hash = crypto_alloc_shash("sha384-generic", 0, 0);
+               base_hash = crypto_alloc_shash("sha384", 0, 0);
                break;
        case SHA512_DIGEST_SIZE:
-               base_hash = crypto_alloc_shash("sha512-generic", 0, 0);
+               base_hash = crypto_alloc_shash("sha512", 0, 0);
                break;
        }
-       if (IS_ERR(base_hash)) {
-               pr_err("Can not allocate sha-generic algo.\n");
-               return (void *)base_hash;
-       }
  
-       desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash),
-                      GFP_KERNEL);
-       if (!desc)
-               return ERR_PTR(-ENOMEM);
-       desc->tfm = base_hash;
-       desc->flags = crypto_shash_get_flags(base_hash);
-       return desc;
+       return base_hash;
  }
  
  static int chcr_compute_partial_hash(struct shash_desc *desc,
@@@ -279,31 -382,18 +382,18 @@@ static inline int is_hmac(struct crypto
        struct chcr_alg_template *chcr_crypto_alg =
                container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
                             alg.hash);
-       if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) ==
-           CRYPTO_ALG_SUB_TYPE_HASH_HMAC)
+       if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
                return 1;
        return 0;
  }
  
- static inline unsigned int ch_nents(struct scatterlist *sg,
-                                   unsigned int *total_size)
- {
-       unsigned int nents;
-       for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) {
-               nents++;
-               *total_size += sg->length;
-       }
-       return nents;
- }
  static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
                           struct scatterlist *sg,
                           struct phys_sge_parm *sg_param)
  {
        struct phys_sge_pairs *to;
-       unsigned int out_buf_size = sg_param->obsize;
-       unsigned int nents = sg_param->nents, i, j, tot_len = 0;
+       int out_buf_size = sg_param->obsize;
+       unsigned int nents = sg_param->nents, i, j = 0;
  
        phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
                                    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
                                       sizeof(struct cpl_rx_phys_dsgl));
  
        for (i = 0; nents; to++) {
-               for (j = i; (nents && (j < (8 + i))); j++, nents--) {
-                       to->len[j] = htons(sg->length);
+               for (j = 0; j < 8 && nents; j++, nents--) {
+                       out_buf_size -= sg_dma_len(sg);
+                       to->len[j] = htons(sg_dma_len(sg));
                        to->addr[j] = cpu_to_be64(sg_dma_address(sg));
-                       if (out_buf_size) {
-                               if (tot_len + sg_dma_len(sg) >= out_buf_size) {
-                                       to->len[j] = htons(out_buf_size -
-                                                          tot_len);
-                                       return;
-                               }
-                               tot_len += sg_dma_len(sg);
-                       }
                        sg = sg_next(sg);
                }
        }
+       if (out_buf_size) {
+               j--;
+               to--;
+               to->len[j] = htons(ntohs(to->len[j]) + (out_buf_size));
+       }
  }
  
- static inline unsigned
- int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl,
-                        struct scatterlist *sg, struct phys_sge_parm *sg_param)
+ static inline int map_writesg_phys_cpl(struct device *dev,
+                                       struct cpl_rx_phys_dsgl *phys_cpl,
+                                       struct scatterlist *sg,
+                                       struct phys_sge_parm *sg_param)
  {
        if (!sg || !sg_param->nents)
                return 0;
        return 0;
  }
  
+ static inline int get_aead_subtype(struct crypto_aead *aead)
+ {
+       struct aead_alg *alg = crypto_aead_alg(aead);
+       struct chcr_alg_template *chcr_crypto_alg =
+               container_of(alg, struct chcr_alg_template, alg.aead);
+       return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
+ }
  static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
  {
        struct crypto_alg *alg = tfm->__crt_alg;
        return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
  }
  
+ static inline void write_buffer_to_skb(struct sk_buff *skb,
+                                       unsigned int *frags,
+                                       char *bfr,
+                                       u8 bfr_len)
+ {
+       skb->len += bfr_len;
+       skb->data_len += bfr_len;
+       skb->truesize += bfr_len;
+       get_page(virt_to_page(bfr));
+       skb_fill_page_desc(skb, *frags, virt_to_page(bfr),
+                          offset_in_page(bfr), bfr_len);
+       (*frags)++;
+ }
  static inline void
- write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags,
+ write_sg_to_skb(struct sk_buff *skb, unsigned int *frags,
                        struct scatterlist *sg, unsigned int count)
  {
        struct page *spage;
        skb->len += count;
        skb->data_len += count;
        skb->truesize += count;
        while (count > 0) {
-               if (sg && (!(sg->length)))
+               if (!sg || (!(sg->length)))
                        break;
                spage = sg_page(sg);
                get_page(spage);
@@@ -389,29 -502,25 +502,25 @@@ static int generate_copy_rrkey(struct a
                               struct _key_ctx *key_ctx)
  {
        if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
-               get_aes_decrypt_key(key_ctx->key, ablkctx->key,
-                                   ablkctx->enckey_len << 3);
-               memset(key_ctx->key + ablkctx->enckey_len, 0,
-                      CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len);
+               memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
        } else {
                memcpy(key_ctx->key,
                       ablkctx->key + (ablkctx->enckey_len >> 1),
                       ablkctx->enckey_len >> 1);
-               get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1),
-                                   ablkctx->key, ablkctx->enckey_len << 2);
+               memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
+                      ablkctx->rrkey, ablkctx->enckey_len >> 1);
        }
        return 0;
  }
  
  static inline void create_wreq(struct chcr_context *ctx,
-                              struct fw_crypto_lookaside_wr *wreq,
+                              struct chcr_wr *chcr_req,
                               void *req, struct sk_buff *skb,
                               int kctx_len, int hash_sz,
-                              unsigned int phys_dsgl)
+                              int is_iv,
+                              unsigned int sc_len)
  {
        struct uld_ctx *u_ctx = ULD_CTX(ctx);
-       struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1);
-       struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1);
        int iv_loc = IV_DSGL;
        int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id];
        unsigned int immdatalen = 0, nr_frags = 0;
                nr_frags = skb_shinfo(skb)->nr_frags;
        }
  
-       wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
-                                                    (kctx_len >> 4));
-       wreq->pld_size_hash_size =
+       chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen,
+                               ((sizeof(chcr_req->key_ctx) + kctx_len) >> 4));
+       chcr_req->wreq.pld_size_hash_size =
                htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) |
                      FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
-       wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
+       chcr_req->wreq.len16_pkd =
+               htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(
                                    (calc_tx_flits_ofld(skb) * 8), 16)));
-       wreq->cookie = cpu_to_be64((uintptr_t)req);
-       wreq->rx_chid_to_rx_q_id =
+       chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
+       chcr_req->wreq.rx_chid_to_rx_q_id =
                FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid,
-                               (hash_sz) ? IV_NOP : iv_loc);
+                               is_iv ? iv_loc : IV_NOP);
  
-       ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
-       ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
-                                        16) - ((sizeof(*wreq)) >> 4)));
+       chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id);
+       chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
+                                       16) - ((sizeof(chcr_req->wreq)) >> 4)));
  
-       sc_imm->cmd_more = FILL_CMD_MORE(immdatalen);
-       sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len +
-                                 ((hash_sz) ? DUMMY_BYTES :
-                                 (sizeof(struct cpl_rx_phys_dsgl) +
-                                  phys_dsgl)) + immdatalen);
+       chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(immdatalen);
+       chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+                                  sizeof(chcr_req->key_ctx) +
+                                  kctx_len + sc_len + immdatalen);
  }
  
  /**
   *    @op_type:       encryption or decryption
   */
  static struct sk_buff
- *create_cipher_wr(struct crypto_async_request *req_base,
-                 struct chcr_context *ctx, unsigned short qid,
+ *create_cipher_wr(struct ablkcipher_request *req,
+                 unsigned short qid,
                  unsigned short op_type)
  {
-       struct ablkcipher_request *req = (struct ablkcipher_request *)req_base;
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+       struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
        struct uld_ctx *u_ctx = ULD_CTX(ctx);
        struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
        struct sk_buff *skb = NULL;
-       struct _key_ctx *key_ctx;
-       struct fw_crypto_lookaside_wr *wreq;
-       struct cpl_tx_sec_pdu *sec_cpl;
+       struct chcr_wr *chcr_req;
        struct cpl_rx_phys_dsgl *phys_cpl;
-       struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+       struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
        struct phys_sge_parm sg_param;
-       unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0;
+       unsigned int frags = 0, transhdr_len, phys_dsgl;
        unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len;
+       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+                       GFP_ATOMIC;
  
        if (!req->info)
                return ERR_PTR(-EINVAL);
-       ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize);
-       ablkctx->enc = op_type;
+       reqctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+       if (reqctx->dst_nents <= 0) {
+               pr_err("AES:Invalid Destination sg lists\n");
+               return ERR_PTR(-EINVAL);
+       }
        if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
-           (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE))
+           (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) {
+               pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
+                      ablkctx->enckey_len, req->nbytes, ivsize);
                return ERR_PTR(-EINVAL);
+       }
  
-       phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents);
+       phys_dsgl = get_space_for_phys_dsgl(reqctx->dst_nents);
  
-       kctx_len = sizeof(*key_ctx) +
-               (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
+       kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
        transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
-       skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
-                       GFP_ATOMIC);
+       skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
        if (!skb)
                return ERR_PTR(-ENOMEM);
        skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-       wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
-       sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
-       sec_cpl->op_ivinsrtofst =
-               FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1);
-       sec_cpl->pldlen = htonl(ivsize + req->nbytes);
-       sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0,
-                                                               ivsize + 1, 0);
-       sec_cpl->cipherstop_lo_authinsert =  FILL_SEC_CPL_AUTHINSERT(0, 0,
-                                                                    0, 0);
-       sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
+       chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+       memset(chcr_req, 0, transhdr_len);
+       chcr_req->sec_cpl.op_ivinsrtofst =
+               FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1);
+       chcr_req->sec_cpl.pldlen = htonl(ivsize + req->nbytes);
+       chcr_req->sec_cpl.aadstart_cipherstop_hi =
+                       FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, ivsize + 1, 0);
+       chcr_req->sec_cpl.cipherstop_lo_authinsert =
+                       FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+       chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0,
                                                         ablkctx->ciph_mode,
-                                                        0, 0, ivsize >> 1, 1);
-       sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
+                                                        0, 0, ivsize >> 1);
+       chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
                                                          0, 1, phys_dsgl);
  
-       key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
-       key_ctx->ctx_hdr = ablkctx->key_ctx_hdr;
+       chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
        if (op_type == CHCR_DECRYPT_OP) {
-               if (generate_copy_rrkey(ablkctx, key_ctx))
-                       goto map_fail1;
+               generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
        } else {
                if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
-                       memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len);
+                       memcpy(chcr_req->key_ctx.key, ablkctx->key,
+                              ablkctx->enckey_len);
                } else {
-                       memcpy(key_ctx->key, ablkctx->key +
+                       memcpy(chcr_req->key_ctx.key, ablkctx->key +
                               (ablkctx->enckey_len >> 1),
                               ablkctx->enckey_len >> 1);
-                       memcpy(key_ctx->key +
+                       memcpy(chcr_req->key_ctx.key +
                               (ablkctx->enckey_len >> 1),
                               ablkctx->key,
                               ablkctx->enckey_len >> 1);
                }
        }
-       phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len);
-       memcpy(ablkctx->iv, req->info, ivsize);
-       sg_init_table(&ablkctx->iv_sg, 1);
-       sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize);
-       sg_param.nents = ablkctx->dst_nents;
-       sg_param.obsize = dst_bufsize;
+       phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+       sg_param.nents = reqctx->dst_nents;
+       sg_param.obsize = req->nbytes;
        sg_param.qid = qid;
        sg_param.align = 1;
        if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst,
                goto map_fail1;
  
        skb_set_transport_header(skb, transhdr_len);
-       write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize);
-       write_sg_data_page_desc(skb, &frags, req->src, req->nbytes);
-       create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl);
-       req_ctx->skb = skb;
+       memcpy(reqctx->iv, req->info, ivsize);
+       write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+       write_sg_to_skb(skb, &frags, req->src, req->nbytes);
+       create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+                       sizeof(struct cpl_rx_phys_dsgl) + phys_dsgl);
+       reqctx->skb = skb;
        skb_get(skb);
        return skb;
  map_fail1:
@@@ -557,15 -665,9 +665,9 @@@ static int chcr_aes_cbc_setkey(struct c
  {
        struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
        struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
-       struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm);
        unsigned int ck_size, context_size;
        u16 alignment = 0;
  
-       if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize))
-               goto badkey_err;
-       memcpy(ablkctx->key, key, keylen);
-       ablkctx->enckey_len = keylen;
        if (keylen == AES_KEYSIZE_128) {
                ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
        } else if (keylen == AES_KEYSIZE_192) {
        } else {
                goto badkey_err;
        }
+       memcpy(ablkctx->key, key, keylen);
+       ablkctx->enckey_len = keylen;
+       get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
        context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
                        keylen + alignment) >> 4;
  
@@@ -592,18 -696,16 +696,18 @@@ badkey_err
  
  static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
  {
 -      int ret = 0;
 -      struct sge_ofld_txq *q;
        struct adapter *adap = netdev2adap(dev);
 +      struct sge_uld_txq_info *txq_info =
 +              adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
 +      struct sge_uld_txq *txq;
 +      int ret = 0;
  
        local_bh_disable();
 -      q = &adap->sge.ofldtxq[idx];
 -      spin_lock(&q->sendq.lock);
 -      if (q->full)
 +      txq = &txq_info->uldtxq[idx];
 +      spin_lock(&txq->sendq.lock);
 +      if (txq->full)
                ret = -1;
 -      spin_unlock(&q->sendq.lock);
 +      spin_unlock(&txq->sendq.lock);
        local_bh_enable();
        return ret;
  }
@@@ -612,7 -714,6 +716,6 @@@ static int chcr_aes_encrypt(struct ablk
  {
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
        struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
-       struct crypto_async_request *req_base = &req->base;
        struct uld_ctx *u_ctx = ULD_CTX(ctx);
        struct sk_buff *skb;
  
                        return -EBUSY;
        }
  
-       skb = create_cipher_wr(req_base, ctx,
-                              u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
+       skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id],
                               CHCR_ENCRYPT_OP);
        if (IS_ERR(skb)) {
                pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@@ -639,7 -739,6 +741,6 @@@ static int chcr_aes_decrypt(struct ablk
  {
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
        struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
-       struct crypto_async_request *req_base = &req->base;
        struct uld_ctx *u_ctx = ULD_CTX(ctx);
        struct sk_buff *skb;
  
                        return -EBUSY;
        }
  
-       skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0],
+       skb = create_cipher_wr(req, u_ctx->lldi.rxq_ids[0],
                               CHCR_DECRYPT_OP);
        if (IS_ERR(skb)) {
                pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
@@@ -676,11 -775,11 +777,11 @@@ static int chcr_device_init(struct chcr
                }
                u_ctx = ULD_CTX(ctx);
                rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
 -              ctx->dev->tx_channel_id = 0;
                rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
                rxq_idx += id % rxq_perchan;
                spin_lock(&ctx->dev->lock_chcr_dev);
                ctx->tx_channel_id = rxq_idx;
 +              ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
                spin_unlock(&ctx->dev->lock_chcr_dev);
        }
  out:
@@@ -729,50 -828,33 +830,33 @@@ static int get_alg_config(struct algo_p
        return 0;
  }
  
- static inline int
- write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx,
-                           struct sk_buff *skb, unsigned int *frags, char *bfr,
-                           u8 bfr_len)
+ static inline void chcr_free_shash(struct crypto_shash *base_hash)
  {
-       void *page_ptr = NULL;
-       skb->len += bfr_len;
-       skb->data_len += bfr_len;
-       skb->truesize += bfr_len;
-       page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA);
-       if (!page_ptr)
-               return -ENOMEM;
-       get_page(virt_to_page(page_ptr));
-       req_ctx->dummy_payload_ptr = page_ptr;
-       memcpy(page_ptr, bfr, bfr_len);
-       skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr),
-                          offset_in_page(page_ptr), bfr_len);
-       (*frags)++;
-       return 0;
+               crypto_free_shash(base_hash);
  }
  
  /**
-  *    create_final_hash_wr - Create hash work request
+  *    create_hash_wr - Create hash work request
   *    @req - Cipher req base
   */
- static struct sk_buff *create_final_hash_wr(struct ahash_request *req,
-                                           struct hash_wr_param *param)
+ static struct sk_buff *create_hash_wr(struct ahash_request *req,
+                                     struct hash_wr_param *param)
  {
        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
        struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
        struct sk_buff *skb = NULL;
-       struct _key_ctx *key_ctx;
-       struct fw_crypto_lookaside_wr *wreq;
-       struct cpl_tx_sec_pdu *sec_cpl;
+       struct chcr_wr *chcr_req;
        unsigned int frags = 0, transhdr_len, iopad_alignment = 0;
        unsigned int digestsize = crypto_ahash_digestsize(tfm);
-       unsigned int kctx_len = sizeof(*key_ctx);
+       unsigned int kctx_len = 0;
        u8 hash_size_in_response = 0;
+       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+               GFP_ATOMIC;
  
        iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
-       kctx_len += param->alg_prm.result_size + iopad_alignment;
+       kctx_len = param->alg_prm.result_size + iopad_alignment;
        if (param->opad_needed)
                kctx_len += param->alg_prm.result_size + iopad_alignment;
  
        else
                hash_size_in_response = param->alg_prm.result_size;
        transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
-       skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),
-                       GFP_ATOMIC);
+       skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
        if (!skb)
                return skb;
  
        skb_reserve(skb, sizeof(struct sge_opaque_hdr));
-       wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len);
-       memset(wreq, 0, transhdr_len);
+       chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+       memset(chcr_req, 0, transhdr_len);
  
-       sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET);
-       sec_cpl->op_ivinsrtofst =
-               FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0);
-       sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len);
+       chcr_req->sec_cpl.op_ivinsrtofst =
+               FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0);
+       chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
  
-       sec_cpl->aadstart_cipherstop_hi =
+       chcr_req->sec_cpl.aadstart_cipherstop_hi =
                FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
-       sec_cpl->cipherstop_lo_authinsert =
+       chcr_req->sec_cpl.cipherstop_lo_authinsert =
                FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
-       sec_cpl->seqno_numivs =
+       chcr_req->sec_cpl.seqno_numivs =
                FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
-                                        param->opad_needed, 0, 0);
+                                        param->opad_needed, 0);
  
-       sec_cpl->ivgen_hdrlen =
+       chcr_req->sec_cpl.ivgen_hdrlen =
                FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
  
-       key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl));
-       memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size);
+       memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
+              param->alg_prm.result_size);
  
        if (param->opad_needed)
-               memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 :
-                                      CHCR_HASH_MAX_DIGEST_SIZE),
+               memcpy(chcr_req->key_ctx.key +
+                      ((param->alg_prm.result_size <= 32) ? 32 :
+                       CHCR_HASH_MAX_DIGEST_SIZE),
                       hmacctx->opad, param->alg_prm.result_size);
  
-       key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
+       chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
                                            param->alg_prm.mk_size, 0,
                                            param->opad_needed,
-                                           (kctx_len >> 4));
-       sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1);
+                                           ((kctx_len +
+                                            sizeof(chcr_req->key_ctx)) >> 4));
+       chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
  
        skb_set_transport_header(skb, transhdr_len);
        if (param->bfr_len != 0)
-               write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr,
-                                           param->bfr_len);
+               write_buffer_to_skb(skb, &frags, req_ctx->reqbfr,
+                                   param->bfr_len);
        if (param->sg_len != 0)
-               write_sg_data_page_desc(skb, &frags, req->src, param->sg_len);
+               write_sg_to_skb(skb, &frags, req->src, param->sg_len);
  
-       create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response,
-                   0);
+       create_wreq(ctx, chcr_req, req, skb, kctx_len, hash_size_in_response, 0,
+                       DUMMY_BYTES);
        req_ctx->skb = skb;
        skb_get(skb);
        return skb;
@@@ -854,34 -936,40 +938,40 @@@ static int chcr_ahash_update(struct aha
                        return -EBUSY;
        }
  
-       if (nbytes + req_ctx->bfr_len >= bs) {
-               remainder = (nbytes + req_ctx->bfr_len) % bs;
-               nbytes = nbytes + req_ctx->bfr_len - remainder;
+       if (nbytes + req_ctx->reqlen >= bs) {
+               remainder = (nbytes + req_ctx->reqlen) % bs;
+               nbytes = nbytes + req_ctx->reqlen - remainder;
        } else {
-               sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr +
-                                  req_ctx->bfr_len, nbytes, 0);
-               req_ctx->bfr_len += nbytes;
+               sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
+                                  + req_ctx->reqlen, nbytes, 0);
+               req_ctx->reqlen += nbytes;
                return 0;
        }
  
        params.opad_needed = 0;
        params.more = 1;
        params.last = 0;
-       params.sg_len = nbytes - req_ctx->bfr_len;
-       params.bfr_len = req_ctx->bfr_len;
+       params.sg_len = nbytes - req_ctx->reqlen;
+       params.bfr_len = req_ctx->reqlen;
        params.scmd1 = 0;
        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
        req_ctx->result = 0;
        req_ctx->data_len += params.sg_len + params.bfr_len;
-       skb = create_final_hash_wr(req, &params);
+       skb = create_hash_wr(req, &params);
        if (!skb)
                return -ENOMEM;
  
-       req_ctx->bfr_len = remainder;
-       if (remainder)
+       if (remainder) {
+               u8 *temp;
+               /* Swap buffers */
+               temp = req_ctx->reqbfr;
+               req_ctx->reqbfr = req_ctx->skbfr;
+               req_ctx->skbfr = temp;
                sg_pcopy_to_buffer(req->src, sg_nents(req->src),
-                                  req_ctx->bfr, remainder, req->nbytes -
+                                  req_ctx->reqbfr, remainder, req->nbytes -
                                   remainder);
+       }
+       req_ctx->reqlen = remainder;
        skb->dev = u_ctx->lldi.ports[0];
        set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
        chcr_send_wr(skb);
@@@ -917,10 -1005,10 +1007,10 @@@ static int chcr_ahash_final(struct ahas
        params.sg_len = 0;
        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
        req_ctx->result = 1;
-       params.bfr_len = req_ctx->bfr_len;
+       params.bfr_len = req_ctx->reqlen;
        req_ctx->data_len += params.bfr_len + params.sg_len;
-       if (req_ctx->bfr && (req_ctx->bfr_len == 0)) {
-               create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+       if (req_ctx->reqlen == 0) {
+               create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
                params.last = 0;
                params.more = 1;
                params.scmd1 = 0;
                params.last = 1;
                params.more = 0;
        }
-       skb = create_final_hash_wr(req, &params);
+       skb = create_hash_wr(req, &params);
+       if (!skb)
+               return -ENOMEM;
        skb->dev = u_ctx->lldi.ports[0];
        set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
        chcr_send_wr(skb);
@@@ -963,12 -1054,12 +1056,12 @@@ static int chcr_ahash_finup(struct ahas
                params.opad_needed = 0;
  
        params.sg_len = req->nbytes;
-       params.bfr_len = req_ctx->bfr_len;
+       params.bfr_len = req_ctx->reqlen;
        get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
        req_ctx->data_len += params.bfr_len + params.sg_len;
        req_ctx->result = 1;
-       if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) {
-               create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len);
+       if ((req_ctx->reqlen + req->nbytes) == 0) {
+               create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
                params.last = 0;
                params.more = 1;
                params.scmd1 = 0;
                params.more = 0;
        }
  
-       skb = create_final_hash_wr(req, &params);
+       skb = create_hash_wr(req, &params);
        if (!skb)
                return -ENOMEM;
        skb->dev = u_ctx->lldi.ports[0];
        set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
        chcr_send_wr(skb);
@@@ -1023,13 -1115,13 +1117,13 @@@ static int chcr_ahash_digest(struct aha
        req_ctx->result = 1;
        req_ctx->data_len += params.bfr_len + params.sg_len;
  
-       if (req_ctx->bfr && req->nbytes == 0) {
-               create_last_hash_block(req_ctx->bfr, bs, 0);
+       if (req->nbytes == 0) {
+               create_last_hash_block(req_ctx->reqbfr, bs, 0);
                params.more = 1;
                params.bfr_len = bs;
        }
  
-       skb = create_final_hash_wr(req, &params);
+       skb = create_hash_wr(req, &params);
        if (!skb)
                return -ENOMEM;
  
@@@ -1044,12 -1136,12 +1138,12 @@@ static int chcr_ahash_export(struct aha
        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
        struct chcr_ahash_req_ctx *state = out;
  
-       state->bfr_len = req_ctx->bfr_len;
+       state->reqlen = req_ctx->reqlen;
        state->data_len = req_ctx->data_len;
-       memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+       memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
        memcpy(state->partial_hash, req_ctx->partial_hash,
               CHCR_HASH_MAX_DIGEST_SIZE);
-       return 0;
+               return 0;
  }
  
  static int chcr_ahash_import(struct ahash_request *areq, const void *in)
        struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
        struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
  
-       req_ctx->bfr_len = state->bfr_len;
+       req_ctx->reqlen = state->reqlen;
        req_ctx->data_len = state->data_len;
-       req_ctx->dummy_payload_ptr = NULL;
-       memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128);
+       req_ctx->reqbfr = req_ctx->bfr1;
+       req_ctx->skbfr = req_ctx->bfr2;
+       memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
        memcpy(req_ctx->partial_hash, state->partial_hash,
               CHCR_HASH_MAX_DIGEST_SIZE);
        return 0;
@@@ -1075,15 -1168,16 +1170,16 @@@ static int chcr_ahash_setkey(struct cry
        unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
        unsigned int i, err = 0, updated_digestsize;
  
-       /*
-        * use the key to calculate the ipad and opad. ipad will sent with the
+       SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
+       /* use the key to calculate the ipad and opad. ipad will sent with the
         * first request's data. opad will be sent with the final hash result
         * ipad in hmacctx->ipad and opad in hmacctx->opad location
         */
-       if (!hmacctx->desc)
-               return -EINVAL;
+       shash->tfm = hmacctx->base_hash;
+       shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
        if (keylen > bs) {
-               err = crypto_shash_digest(hmacctx->desc, key, keylen,
+               err = crypto_shash_digest(shash, key, keylen,
                                          hmacctx->ipad);
                if (err)
                        goto out;
                updated_digestsize = SHA256_DIGEST_SIZE;
        else if (digestsize == SHA384_DIGEST_SIZE)
                updated_digestsize = SHA512_DIGEST_SIZE;
-       err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad,
+       err = chcr_compute_partial_hash(shash, hmacctx->ipad,
                                        hmacctx->ipad, digestsize);
        if (err)
                goto out;
        chcr_change_order(hmacctx->ipad, updated_digestsize);
  
-       err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad,
+       err = chcr_compute_partial_hash(shash, hmacctx->opad,
                                        hmacctx->opad, digestsize);
        if (err)
                goto out;
@@@ -1124,28 -1218,29 +1220,29 @@@ static int chcr_aes_xts_setkey(struct c
  {
        struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm);
        struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
-       int status = 0;
        unsigned short context_size = 0;
  
-       if ((key_len == (AES_KEYSIZE_128 << 1)) ||
-           (key_len == (AES_KEYSIZE_256 << 1))) {
-               memcpy(ablkctx->key, key, key_len);
-               ablkctx->enckey_len = key_len;
-               context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
-               ablkctx->key_ctx_hdr =
-                       FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
-                                        CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
-                                        CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
-                                        CHCR_KEYCTX_NO_KEY, 1,
-                                        0, context_size);
-               ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
-       } else {
+       if ((key_len != (AES_KEYSIZE_128 << 1)) &&
+           (key_len != (AES_KEYSIZE_256 << 1))) {
                crypto_tfm_set_flags((struct crypto_tfm *)tfm,
                                     CRYPTO_TFM_RES_BAD_KEY_LEN);
                ablkctx->enckey_len = 0;
-               status = -EINVAL;
+               return -EINVAL;
        }
-       return status;
+       memcpy(ablkctx->key, key, key_len);
+       ablkctx->enckey_len = key_len;
+       get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
+       context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
+       ablkctx->key_ctx_hdr =
+               FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
+                                CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
+                                CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
+                                CHCR_KEYCTX_NO_KEY, 1,
+                                0, context_size);
+       ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
+       return 0;
  }
  
  static int chcr_sha_init(struct ahash_request *areq)
        int digestsize =  crypto_ahash_digestsize(tfm);
  
        req_ctx->data_len = 0;
-       req_ctx->dummy_payload_ptr = NULL;
-       req_ctx->bfr_len = 0;
+       req_ctx->reqlen = 0;
+       req_ctx->reqbfr = req_ctx->bfr1;
+       req_ctx->skbfr = req_ctx->bfr2;
        req_ctx->skb = NULL;
        req_ctx->result = 0;
        copy_hash_init_values(req_ctx->partial_hash, digestsize);
@@@ -1204,29 -1300,1184 +1302,1184 @@@ static int chcr_hmac_cra_init(struct cr
  
        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
                                 sizeof(struct chcr_ahash_req_ctx));
-       hmacctx->desc = chcr_alloc_shash(digestsize);
-       if (IS_ERR(hmacctx->desc))
-               return PTR_ERR(hmacctx->desc);
+       hmacctx->base_hash = chcr_alloc_shash(digestsize);
+       if (IS_ERR(hmacctx->base_hash))
+               return PTR_ERR(hmacctx->base_hash);
        return chcr_device_init(crypto_tfm_ctx(tfm));
  }
  
- static void chcr_free_shash(struct shash_desc *desc)
- {
-       crypto_free_shash(desc->tfm);
-       kfree(desc);
- }
  static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
  {
        struct chcr_context *ctx = crypto_tfm_ctx(tfm);
        struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
  
-       if (hmacctx->desc) {
-               chcr_free_shash(hmacctx->desc);
-               hmacctx->desc = NULL;
+       if (hmacctx->base_hash) {
+               chcr_free_shash(hmacctx->base_hash);
+               hmacctx->base_hash = NULL;
+       }
+ }
+ static int chcr_copy_assoc(struct aead_request *req,
+                               struct chcr_aead_ctx *ctx)
+ {
+       SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+       skcipher_request_set_tfm(skreq, ctx->null);
+       skcipher_request_set_callback(skreq, aead_request_flags(req),
+                       NULL, NULL);
+       skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
+                       NULL);
+       return crypto_skcipher_encrypt(skreq);
+ }
+ static unsigned char get_hmac(unsigned int authsize)
+ {
+       switch (authsize) {
+       case ICV_8:
+               return CHCR_SCMD_HMAC_CTRL_PL1;
+       case ICV_10:
+               return CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+       case ICV_12:
+               return CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+       }
+       return CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+ }
+ static struct sk_buff *create_authenc_wr(struct aead_request *req,
+                                        unsigned short qid,
+                                        int size,
+                                        unsigned short op_type)
+ {
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct chcr_context *ctx = crypto_aead_ctx(tfm);
+       struct uld_ctx *u_ctx = ULD_CTX(ctx);
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+       struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+       struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+       struct sk_buff *skb = NULL;
+       struct chcr_wr *chcr_req;
+       struct cpl_rx_phys_dsgl *phys_cpl;
+       struct phys_sge_parm sg_param;
+       struct scatterlist *src, *dst;
+       struct scatterlist src_sg[2], dst_sg[2];
+       unsigned int frags = 0, transhdr_len;
+       unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
+       unsigned int   kctx_len = 0;
+       unsigned short stop_offset = 0;
+       unsigned int  assoclen = req->assoclen;
+       unsigned int  authsize = crypto_aead_authsize(tfm);
+       int err = 0;
+       int null = 0;
+       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+               GFP_ATOMIC;
+       if (aeadctx->enckey_len == 0 || (req->cryptlen == 0))
+               goto err;
+       if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+               goto err;
+       if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+               goto err;
+       src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+       dst = src;
+       if (req->src != req->dst) {
+               err = chcr_copy_assoc(req, aeadctx);
+               if (err)
+                       return ERR_PTR(err);
+               dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+       }
+       if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
+               null = 1;
+               assoclen = 0;
+       }
+       reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+                                            (op_type ? -authsize : authsize));
+       if (reqctx->dst_nents <= 0) {
+               pr_err("AUTHENC:Invalid Destination sg entries\n");
+               goto err;
+       }
+       dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+       kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
+               - sizeof(chcr_req->key_ctx);
+       transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+       skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+       if (!skb)
+               goto err;
+       /* LLD is going to write the sge hdr. */
+       skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+       /* Write WR */
+       chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+       memset(chcr_req, 0, transhdr_len);
+       stop_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+       /*
+        * Input order  is AAD,IV and Payload. where IV should be included as
+        * the part of authdata. All other fields should be filled according
+        * to the hardware spec
+        */
+       chcr_req->sec_cpl.op_ivinsrtofst =
+               FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2,
+                                      (ivsize ? (assoclen + 1) : 0));
+       chcr_req->sec_cpl.pldlen = htonl(assoclen + ivsize + req->cryptlen);
+       chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+                                       assoclen ? 1 : 0, assoclen,
+                                       assoclen + ivsize + 1,
+                                       (stop_offset & 0x1F0) >> 4);
+       chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
+                                       stop_offset & 0xF,
+                                       null ? 0 : assoclen + ivsize + 1,
+                                       stop_offset, stop_offset);
+       chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+                                       (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
+                                       CHCR_SCMD_CIPHER_MODE_AES_CBC,
+                                       actx->auth_mode, aeadctx->hmac_ctrl,
+                                       ivsize >> 1);
+       chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+                                        0, 1, dst_size);
+       chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+       if (op_type == CHCR_ENCRYPT_OP)
+               memcpy(chcr_req->key_ctx.key, aeadctx->key,
+                      aeadctx->enckey_len);
+       else
+               memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
+                      aeadctx->enckey_len);
+       memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
+                                       4), actx->h_iopad, kctx_len -
+                               (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
+       phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+       sg_param.nents = reqctx->dst_nents;
+       sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+       sg_param.qid = qid;
+       sg_param.align = 0;
+       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+                                 &sg_param))
+               goto dstmap_fail;
+       skb_set_transport_header(skb, transhdr_len);
+       if (assoclen) {
+               /* AAD buffer in */
+               write_sg_to_skb(skb, &frags, req->src, assoclen);
+       }
+       write_buffer_to_skb(skb, &frags, req->iv, ivsize);
+       write_sg_to_skb(skb, &frags, src, req->cryptlen);
+       create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+                  sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+       reqctx->skb = skb;
+       skb_get(skb);
+       return skb;
+ dstmap_fail:
+       /* ivmap_fail: */
+       kfree_skb(skb);
+ err:
+       return ERR_PTR(-EINVAL);
+ }
+ static void aes_gcm_empty_pld_pad(struct scatterlist *sg,
+                                 unsigned short offset)
+ {
+       struct page *spage;
+       unsigned char *addr;
+       spage = sg_page(sg);
+       get_page(spage); /* so that it is not freed by NIC */
+ #ifdef KMAP_ATOMIC_ARGS
+       addr = kmap_atomic(spage, KM_SOFTIRQ0);
+ #else
+       addr = kmap_atomic(spage);
+ #endif
+       memset(addr + sg->offset, 0, offset + 1);
+       kunmap_atomic(addr);
+ }
+ static int set_msg_len(u8 *block, unsigned int msglen, int csize)
+ {
+       __be32 data;
+       memset(block, 0, csize);
+       block += csize;
+       if (csize >= 4)
+               csize = 4;
+       else if (msglen > (unsigned int)(1 << (8 * csize)))
+               return -EOVERFLOW;
+       data = cpu_to_be32(msglen);
+       memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+       return 0;
+ }
+ static void generate_b0(struct aead_request *req,
+                       struct chcr_aead_ctx *aeadctx,
+                       unsigned short op_type)
+ {
+       unsigned int l, lp, m;
+       int rc;
+       struct crypto_aead *aead = crypto_aead_reqtfm(req);
+       struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+       u8 *b0 = reqctx->scratch_pad;
+       m = crypto_aead_authsize(aead);
+       memcpy(b0, reqctx->iv, 16);
+       lp = b0[0];
+       l = lp + 1;
+       /* set m, bits 3-5 */
+       *b0 |= (8 * ((m - 2) / 2));
+       /* set adata, bit 6, if associated data is used */
+       if (req->assoclen)
+               *b0 |= 64;
+       rc = set_msg_len(b0 + 16 - l,
+                        (op_type == CHCR_DECRYPT_OP) ?
+                        req->cryptlen - m : req->cryptlen, l);
+ }
+ static inline int crypto_ccm_check_iv(const u8 *iv)
+ {
+       /* 2 <= L <= 8, so 1 <= L' <= 7. */
+       if (iv[0] < 1 || iv[0] > 7)
+               return -EINVAL;
+       return 0;
+ }
+ static int ccm_format_packet(struct aead_request *req,
+                            struct chcr_aead_ctx *aeadctx,
+                            unsigned int sub_type,
+                            unsigned short op_type)
+ {
+       struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+       int rc = 0;
+       if (req->assoclen > T5_MAX_AAD_SIZE) {
+               pr_err("CCM: Unsupported AAD data. It should be < %d\n",
+                      T5_MAX_AAD_SIZE);
+               return -EINVAL;
+       }
+       if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+               reqctx->iv[0] = 3;
+               memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
+               memcpy(reqctx->iv + 4, req->iv, 8);
+               memset(reqctx->iv + 12, 0, 4);
+               *((unsigned short *)(reqctx->scratch_pad + 16)) =
+                       htons(req->assoclen - 8);
+       } else {
+               memcpy(reqctx->iv, req->iv, 16);
+               *((unsigned short *)(reqctx->scratch_pad + 16)) =
+                       htons(req->assoclen);
+       }
+       generate_b0(req, aeadctx, op_type);
+       /* zero the ctr value */
+       memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+       return rc;
+ }
+ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
+                                 unsigned int dst_size,
+                                 struct aead_request *req,
+                                 unsigned short op_type,
+                                         struct chcr_context *chcrctx)
+ {
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       unsigned int ivsize = AES_BLOCK_SIZE;
+       unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
+       unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
+       unsigned int c_id = chcrctx->dev->tx_channel_id;
+       unsigned int ccm_xtra;
+       unsigned char tag_offset = 0, auth_offset = 0;
+       unsigned char hmac_ctrl = get_hmac(crypto_aead_authsize(tfm));
+       unsigned int assoclen;
+       if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+               assoclen = req->assoclen - 8;
+       else
+               assoclen = req->assoclen;
+       ccm_xtra = CCM_B0_SIZE +
+               ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
+       auth_offset = req->cryptlen ?
+               (assoclen + ivsize + 1 + ccm_xtra) : 0;
+       if (op_type == CHCR_DECRYPT_OP) {
+               if (crypto_aead_authsize(tfm) != req->cryptlen)
+                       tag_offset = crypto_aead_authsize(tfm);
+               else
+                       auth_offset = 0;
+       }
+       sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
+                                        2, (ivsize ?  (assoclen + 1) :  0) +
+                                        ccm_xtra);
+       sec_cpl->pldlen =
+               htonl(assoclen + ivsize + req->cryptlen + ccm_xtra);
+       /* For CCM there wil be b0 always. So AAD start will be 1 always */
+       sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+                                       1, assoclen + ccm_xtra, assoclen
+                                       + ivsize + 1 + ccm_xtra, 0);
+       sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
+                                       auth_offset, tag_offset,
+                                       (op_type == CHCR_ENCRYPT_OP) ? 0 :
+                                       crypto_aead_authsize(tfm));
+       sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+                                       (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
+                                       cipher_mode, mac_mode, hmac_ctrl,
+                                       ivsize >> 1);
+       sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
+                                       1, dst_size);
+ }
+ int aead_ccm_validate_input(unsigned short op_type,
+                           struct aead_request *req,
+                           struct chcr_aead_ctx *aeadctx,
+                           unsigned int sub_type)
+ {
+       if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+               if (crypto_ccm_check_iv(req->iv)) {
+                       pr_err("CCM: IV check fails\n");
+                       return -EINVAL;
+               }
+       } else {
+               if (req->assoclen != 16 && req->assoclen != 20) {
+                       pr_err("RFC4309: Invalid AAD length %d\n",
+                              req->assoclen);
+                       return -EINVAL;
+               }
+       }
+       if (aeadctx->enckey_len == 0) {
+               pr_err("CCM: Encryption key not set\n");
+               return -EINVAL;
+       }
+       return 0;
+ }
+ unsigned int fill_aead_req_fields(struct sk_buff *skb,
+                                 struct aead_request *req,
+                                 struct scatterlist *src,
+                                 unsigned int ivsize,
+                                 struct chcr_aead_ctx *aeadctx)
+ {
+       unsigned int frags = 0;
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+       /* b0 and aad length(if available) */
+       write_buffer_to_skb(skb, &frags, reqctx->scratch_pad, CCM_B0_SIZE +
+                               (req->assoclen ?  CCM_AAD_FIELD_SIZE : 0));
+       if (req->assoclen) {
+               if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
+                       write_sg_to_skb(skb, &frags, req->src,
+                                       req->assoclen - 8);
+               else
+                       write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+       }
+       write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+       if (req->cryptlen)
+               write_sg_to_skb(skb, &frags, src, req->cryptlen);
+       return frags;
+ }
+ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
+                                         unsigned short qid,
+                                         int size,
+                                         unsigned short op_type)
+ {
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct chcr_context *ctx = crypto_aead_ctx(tfm);
+       struct uld_ctx *u_ctx = ULD_CTX(ctx);
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+       struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+       struct sk_buff *skb = NULL;
+       struct chcr_wr *chcr_req;
+       struct cpl_rx_phys_dsgl *phys_cpl;
+       struct phys_sge_parm sg_param;
+       struct scatterlist *src, *dst;
+       struct scatterlist src_sg[2], dst_sg[2];
+       unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
+       unsigned int dst_size = 0, kctx_len;
+       unsigned int sub_type;
+       unsigned int authsize = crypto_aead_authsize(tfm);
+       int err = 0;
+       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+               GFP_ATOMIC;
+       if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+               goto err;
+       if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+               goto err;
+       sub_type = get_aead_subtype(tfm);
+       src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+       dst = src;
+       if (req->src != req->dst) {
+               err = chcr_copy_assoc(req, aeadctx);
+               if (err) {
+                       pr_err("AAD copy to destination buffer fails\n");
+                       return ERR_PTR(err);
+               }
+               dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+       }
+       reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+                                            (op_type ? -authsize : authsize));
+       if (reqctx->dst_nents <= 0) {
+               pr_err("CCM:Invalid Destination sg entries\n");
+               goto err;
+       }
+       if (aead_ccm_validate_input(op_type, req, aeadctx, sub_type))
+               goto err;
+       dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+       kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
+       transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+       skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)),  flags);
+       if (!skb)
+               goto err;
+       skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+       chcr_req = (struct chcr_wr *) __skb_put(skb, transhdr_len);
+       memset(chcr_req, 0, transhdr_len);
+       fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type, ctx);
+       chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+       memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+       memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+                                       16), aeadctx->key, aeadctx->enckey_len);
+       phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+       if (ccm_format_packet(req, aeadctx, sub_type, op_type))
+               goto dstmap_fail;
+       sg_param.nents = reqctx->dst_nents;
+       sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+       sg_param.qid = qid;
+       sg_param.align = 0;
+       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+                                 &sg_param))
+               goto dstmap_fail;
+       skb_set_transport_header(skb, transhdr_len);
+       frags = fill_aead_req_fields(skb, req, src, ivsize, aeadctx);
+       create_wreq(ctx, chcr_req, req, skb, kctx_len, 0, 1,
+                   sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+       reqctx->skb = skb;
+       skb_get(skb);
+       return skb;
+ dstmap_fail:
+       kfree_skb(skb);
+       skb = NULL;
+ err:
+       return ERR_PTR(-EINVAL);
+ }
+ static struct sk_buff *create_gcm_wr(struct aead_request *req,
+                                    unsigned short qid,
+                                    int size,
+                                    unsigned short op_type)
+ {
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct chcr_context *ctx = crypto_aead_ctx(tfm);
+       struct uld_ctx *u_ctx = ULD_CTX(ctx);
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+       struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
+       struct sk_buff *skb = NULL;
+       struct chcr_wr *chcr_req;
+       struct cpl_rx_phys_dsgl *phys_cpl;
+       struct phys_sge_parm sg_param;
+       struct scatterlist *src, *dst;
+       struct scatterlist src_sg[2], dst_sg[2];
+       unsigned int frags = 0, transhdr_len;
+       unsigned int ivsize = AES_BLOCK_SIZE;
+       unsigned int dst_size = 0, kctx_len;
+       unsigned char tag_offset = 0;
+       unsigned int crypt_len = 0;
+       unsigned int authsize = crypto_aead_authsize(tfm);
+       unsigned char hmac_ctrl = get_hmac(authsize);
+       int err = 0;
+       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+               GFP_ATOMIC;
+       /* validate key size */
+       if (aeadctx->enckey_len == 0)
+               goto err;
+       if (op_type && req->cryptlen < crypto_aead_authsize(tfm))
+               goto err;
+       if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
+               goto err;
+       src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
+       dst = src;
+       if (req->src != req->dst) {
+               err = chcr_copy_assoc(req, aeadctx);
+               if (err)
+                       return  ERR_PTR(err);
+               dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+       }
+       if (!req->cryptlen)
+               /* null-payload is not supported in the hardware.
+                * software is sending block size
+                */
+               crypt_len = AES_BLOCK_SIZE;
+       else
+               crypt_len = req->cryptlen;
+       reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+                                            (op_type ? -authsize : authsize));
+       if (reqctx->dst_nents <= 0) {
+               pr_err("GCM:Invalid Destination sg entries\n");
+               goto err;
+       }
+       dst_size = get_space_for_phys_dsgl(reqctx->dst_nents);
+       kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
+               AEAD_H_SIZE;
+       transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+       skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), flags);
+       if (!skb)
+               goto err;
+       /* NIC driver is going to write the sge hdr. */
+       skb_reserve(skb, sizeof(struct sge_opaque_hdr));
+       chcr_req = (struct chcr_wr *)__skb_put(skb, transhdr_len);
+       memset(chcr_req, 0, transhdr_len);
+       if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
+               req->assoclen -= 8;
+       tag_offset = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
+       chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
+                                       ctx->dev->tx_channel_id, 2, (ivsize ?
+                                       (req->assoclen + 1) : 0));
+       chcr_req->sec_cpl.pldlen = htonl(req->assoclen + ivsize + crypt_len);
+       chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+                                       req->assoclen ? 1 : 0, req->assoclen,
+                                       req->assoclen + ivsize + 1, 0);
+       if (req->cryptlen) {
+               chcr_req->sec_cpl.cipherstop_lo_authinsert =
+                       FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + ivsize + 1,
+                                               tag_offset, tag_offset);
+               chcr_req->sec_cpl.seqno_numivs =
+                       FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
+                                       CHCR_ENCRYPT_OP) ? 1 : 0,
+                                       CHCR_SCMD_CIPHER_MODE_AES_GCM,
+                                       CHCR_SCMD_AUTH_MODE_GHASH, hmac_ctrl,
+                                       ivsize >> 1);
+       } else {
+               chcr_req->sec_cpl.cipherstop_lo_authinsert =
+                       FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
+               chcr_req->sec_cpl.seqno_numivs =
+                       FILL_SEC_CPL_SCMD0_SEQNO(op_type,
+                                       (op_type ==  CHCR_ENCRYPT_OP) ?
+                                       1 : 0, CHCR_SCMD_CIPHER_MODE_AES_CBC,
+                                       0, 0, ivsize >> 1);
+       }
+       chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+                                       0, 1, dst_size);
+       chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
+       memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
+       memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
+                               16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+       /* prepare a 16 byte iv */
+       /* S   A   L  T |  IV | 0x00000001 */
+       if (get_aead_subtype(tfm) ==
+           CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+               memcpy(reqctx->iv, aeadctx->salt, 4);
+               memcpy(reqctx->iv + 4, req->iv, 8);
+       } else {
+               memcpy(reqctx->iv, req->iv, 12);
+       }
+       *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+       phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+       sg_param.nents = reqctx->dst_nents;
+       sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
+       sg_param.qid = qid;
+       sg_param.align = 0;
+       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+                                 &sg_param))
+               goto dstmap_fail;
+       skb_set_transport_header(skb, transhdr_len);
+       write_sg_to_skb(skb, &frags, req->src, req->assoclen);
+       write_buffer_to_skb(skb, &frags, reqctx->iv, ivsize);
+       if (req->cryptlen) {
+               write_sg_to_skb(skb, &frags, src, req->cryptlen);
+       } else {
+               aes_gcm_empty_pld_pad(req->dst, authsize - 1);
+               write_sg_to_skb(skb, &frags, dst, crypt_len);
+       }
+       create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
+                       sizeof(struct cpl_rx_phys_dsgl) + dst_size);
+       reqctx->skb = skb;
+       skb_get(skb);
+       return skb;
+ dstmap_fail:
+       /* ivmap_fail: */
+       kfree_skb(skb);
+       skb = NULL;
+ err:
+       return skb;
+ }
+ static int chcr_aead_cra_init(struct crypto_aead *tfm)
+ {
+       struct chcr_context *ctx = crypto_aead_ctx(tfm);
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+       crypto_aead_set_reqsize(tfm, sizeof(struct chcr_aead_reqctx));
+       aeadctx->null = crypto_get_default_null_skcipher();
+       if (IS_ERR(aeadctx->null))
+               return PTR_ERR(aeadctx->null);
+       return chcr_device_init(ctx);
+ }
+ static void chcr_aead_cra_exit(struct crypto_aead *tfm)
+ {
+       crypto_put_default_null_skcipher();
+ }
+ static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
+                                       unsigned int authsize)
+ {
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+       aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
+       aeadctx->mayverify = VERIFY_HW;
+       return 0;
+ }
+ static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
+                                   unsigned int authsize)
+ {
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+       u32 maxauth = crypto_aead_maxauthsize(tfm);
+       /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
+        * true for sha1. authsize == 12 condition should be before
+        * authsize == (maxauth >> 1)
+        */
+       if (authsize == ICV_4) {
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+               aeadctx->mayverify = VERIFY_HW;
+       } else if (authsize == ICV_6) {
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+               aeadctx->mayverify = VERIFY_HW;
+       } else if (authsize == ICV_10) {
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+               aeadctx->mayverify = VERIFY_HW;
+       } else if (authsize == ICV_12) {
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+               aeadctx->mayverify = VERIFY_HW;
+       } else if (authsize == ICV_14) {
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+               aeadctx->mayverify = VERIFY_HW;
+       } else if (authsize == (maxauth >> 1)) {
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+               aeadctx->mayverify = VERIFY_HW;
+       } else if (authsize == maxauth) {
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+               aeadctx->mayverify = VERIFY_HW;
+       } else {
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+               aeadctx->mayverify = VERIFY_SW;
+       }
+       return 0;
+ }
+ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+ {
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+       switch (authsize) {
+       case ICV_4:
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+               aeadctx->mayverify = VERIFY_HW;
+               break;
+       case ICV_8:
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+               aeadctx->mayverify = VERIFY_HW;
+               break;
+       case ICV_12:
+                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+                aeadctx->mayverify = VERIFY_HW;
+               break;
+       case ICV_14:
+                aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+                aeadctx->mayverify = VERIFY_HW;
+               break;
+       case ICV_16:
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+               aeadctx->mayverify = VERIFY_HW;
+               break;
+       case ICV_13:
+       case ICV_15:
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+               aeadctx->mayverify = VERIFY_SW;
+               break;
+       default:
+                 crypto_tfm_set_flags((struct crypto_tfm *) tfm,
+                       CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+       return 0;
+ }
+ static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
+                                         unsigned int authsize)
+ {
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+       switch (authsize) {
+       case ICV_8:
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+               aeadctx->mayverify = VERIFY_HW;
+               break;
+       case ICV_12:
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+               aeadctx->mayverify = VERIFY_HW;
+               break;
+       case ICV_16:
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+               aeadctx->mayverify = VERIFY_HW;
+               break;
+       default:
+               crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+                                    CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+       return 0;
+ }
+ static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
+                               unsigned int authsize)
+ {
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+       switch (authsize) {
+       case ICV_4:
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
+               aeadctx->mayverify = VERIFY_HW;
+               break;
+       case ICV_6:
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
+               aeadctx->mayverify = VERIFY_HW;
+               break;
+       case ICV_8:
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
+               aeadctx->mayverify = VERIFY_HW;
+               break;
+       case ICV_10:
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
+               aeadctx->mayverify = VERIFY_HW;
+               break;
+       case ICV_12:
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+               aeadctx->mayverify = VERIFY_HW;
+               break;
+       case ICV_14:
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+               aeadctx->mayverify = VERIFY_HW;
+               break;
+       case ICV_16:
+               aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
+               aeadctx->mayverify = VERIFY_HW;
+               break;
+       default:
+               crypto_tfm_set_flags((struct crypto_tfm *)tfm,
+                                    CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return -EINVAL;
+       }
+       return 0;
+ }
+ static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
+                               const u8 *key,
+                               unsigned int keylen)
+ {
+       struct chcr_context *ctx = crypto_aead_ctx(aead);
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+       unsigned char ck_size, mk_size;
+       int key_ctx_size = 0;
+       memcpy(aeadctx->key, key, keylen);
+       aeadctx->enckey_len = keylen;
+       key_ctx_size = sizeof(struct _key_ctx) +
+               ((DIV_ROUND_UP(keylen, 16)) << 4)  * 2;
+       if (keylen == AES_KEYSIZE_128) {
+               mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+               ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+       } else if (keylen == AES_KEYSIZE_192) {
+               ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+               mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
+       } else if (keylen == AES_KEYSIZE_256) {
+               ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+               mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+       } else {
+               crypto_tfm_set_flags((struct crypto_tfm *)aead,
+                                    CRYPTO_TFM_RES_BAD_KEY_LEN);
+               aeadctx->enckey_len = 0;
+               return  -EINVAL;
+       }
+       aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
+                                               key_ctx_size >> 4);
+       return 0;
+ }
+ static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
+                                   unsigned int keylen)
+ {
+       struct chcr_context *ctx = crypto_aead_ctx(aead);
+        struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+       if (keylen < 3) {
+               crypto_tfm_set_flags((struct crypto_tfm *)aead,
+                                    CRYPTO_TFM_RES_BAD_KEY_LEN);
+               aeadctx->enckey_len = 0;
+               return  -EINVAL;
+       }
+       keylen -= 3;
+       memcpy(aeadctx->salt, key + keylen, 3);
+       return chcr_aead_ccm_setkey(aead, key, keylen);
+ }
+ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+                          unsigned int keylen)
+ {
+       struct chcr_context *ctx = crypto_aead_ctx(aead);
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+       struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
+       struct blkcipher_desc h_desc;
+       struct scatterlist src[1];
+       unsigned int ck_size;
+       int ret = 0, key_ctx_size = 0;
+       if (get_aead_subtype(aead) ==
+           CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+               keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
+               memcpy(aeadctx->salt, key + keylen, 4);
+       }
+       if (keylen == AES_KEYSIZE_128) {
+               ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+       } else if (keylen == AES_KEYSIZE_192) {
+               ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+       } else if (keylen == AES_KEYSIZE_256) {
+               ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+       } else {
+               crypto_tfm_set_flags((struct crypto_tfm *)aead,
+                                    CRYPTO_TFM_RES_BAD_KEY_LEN);
+               aeadctx->enckey_len = 0;
+               pr_err("GCM: Invalid key length %d", keylen);
+               ret = -EINVAL;
+               goto out;
+       }
+       memcpy(aeadctx->key, key, keylen);
+       aeadctx->enckey_len = keylen;
+       key_ctx_size = sizeof(struct _key_ctx) +
+               ((DIV_ROUND_UP(keylen, 16)) << 4) +
+               AEAD_H_SIZE;
+               aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
+                                               CHCR_KEYCTX_MAC_KEY_SIZE_128,
+                                               0, 0,
+                                               key_ctx_size >> 4);
+       /* Calculate the H = CIPH(K, 0 repeated 16 times) using sync aes
+        * blkcipher It will go on key context
+        */
+       h_desc.tfm = crypto_alloc_blkcipher("cbc(aes-generic)", 0, 0);
+       if (IS_ERR(h_desc.tfm)) {
+               aeadctx->enckey_len = 0;
+               ret = -ENOMEM;
+               goto out;
+       }
+       h_desc.flags = 0;
+       ret = crypto_blkcipher_setkey(h_desc.tfm, key, keylen);
+       if (ret) {
+               aeadctx->enckey_len = 0;
+               goto out1;
+       }
+       memset(gctx->ghash_h, 0, AEAD_H_SIZE);
+       sg_init_one(&src[0], gctx->ghash_h, AEAD_H_SIZE);
+       ret = crypto_blkcipher_encrypt(&h_desc, &src[0], &src[0], AEAD_H_SIZE);
+ out1:
+       crypto_free_blkcipher(h_desc.tfm);
+ out:
+       return ret;
+ }
+ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+                                  unsigned int keylen)
+ {
+       struct chcr_context *ctx = crypto_aead_ctx(authenc);
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+       struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+       /* it contains auth and cipher key both*/
+       struct crypto_authenc_keys keys;
+       unsigned int bs;
+       unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
+       int err = 0, i, key_ctx_len = 0;
+       unsigned char ck_size = 0;
+       unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
+       struct crypto_shash *base_hash = NULL;
+       struct algo_param param;
+       int align;
+       u8 *o_ptr = NULL;
+       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+               crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               goto out;
+       }
+       if (get_alg_config(&param, max_authsize)) {
+               pr_err("chcr : Unsupported digest size\n");
+               goto out;
+       }
+       if (keys.enckeylen == AES_KEYSIZE_128) {
+               ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+       } else if (keys.enckeylen == AES_KEYSIZE_192) {
+               ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+       } else if (keys.enckeylen == AES_KEYSIZE_256) {
+               ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+       } else {
+               pr_err("chcr : Unsupported cipher key\n");
+               goto out;
+       }
+       /* Copy only encryption key. We use authkey to generate h(ipad) and
+        * h(opad) so authkey is not needed again. authkeylen size have the
+        * size of the hash digest size.
+        */
+       memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+       aeadctx->enckey_len = keys.enckeylen;
+       get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+                           aeadctx->enckey_len << 3);
+       base_hash  = chcr_alloc_shash(max_authsize);
+       if (IS_ERR(base_hash)) {
+               pr_err("chcr : Base driver cannot be loaded\n");
+               goto out;
        }
+       {
+               SHASH_DESC_ON_STACK(shash, base_hash);
+               shash->tfm = base_hash;
+               shash->flags = crypto_shash_get_flags(base_hash);
+               bs = crypto_shash_blocksize(base_hash);
+               align = KEYCTX_ALIGN_PAD(max_authsize);
+               o_ptr =  actx->h_iopad + param.result_size + align;
+               if (keys.authkeylen > bs) {
+                       err = crypto_shash_digest(shash, keys.authkey,
+                                                 keys.authkeylen,
+                                                 o_ptr);
+                       if (err) {
+                               pr_err("chcr : Base driver cannot be loaded\n");
+                               goto out;
+                       }
+                       keys.authkeylen = max_authsize;
+               } else
+                       memcpy(o_ptr, keys.authkey, keys.authkeylen);
+               /* Compute the ipad-digest*/
+               memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+               memcpy(pad, o_ptr, keys.authkeylen);
+               for (i = 0; i < bs >> 2; i++)
+                       *((unsigned int *)pad + i) ^= IPAD_DATA;
+               if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
+                                             max_authsize))
+                       goto out;
+               /* Compute the opad-digest */
+               memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
+               memcpy(pad, o_ptr, keys.authkeylen);
+               for (i = 0; i < bs >> 2; i++)
+                       *((unsigned int *)pad + i) ^= OPAD_DATA;
+               if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
+                       goto out;
+               /* convert the ipad and opad digest to network order */
+               chcr_change_order(actx->h_iopad, param.result_size);
+               chcr_change_order(o_ptr, param.result_size);
+               key_ctx_len = sizeof(struct _key_ctx) +
+                       ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
+                       (param.result_size + align) * 2;
+               aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
+                                               0, 1, key_ctx_len >> 4);
+               actx->auth_mode = param.auth_mode;
+               chcr_free_shash(base_hash);
+               return 0;
+       }
+ out:
+       aeadctx->enckey_len = 0;
+       if (base_hash)
+               chcr_free_shash(base_hash);
+       return -EINVAL;
  }
  
+ static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
+                                       const u8 *key, unsigned int keylen)
+ {
+       struct chcr_context *ctx = crypto_aead_ctx(authenc);
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
+       struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
+       struct crypto_authenc_keys keys;
+       /* it contains auth and cipher key both*/
+       int key_ctx_len = 0;
+       unsigned char ck_size = 0;
+       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
+               crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               goto out;
+       }
+       if (keys.enckeylen == AES_KEYSIZE_128) {
+               ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
+       } else if (keys.enckeylen == AES_KEYSIZE_192) {
+               ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
+       } else if (keys.enckeylen == AES_KEYSIZE_256) {
+               ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
+       } else {
+               pr_err("chcr : Unsupported cipher key\n");
+               goto out;
+       }
+       memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
+       aeadctx->enckey_len = keys.enckeylen;
+       get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
+                                   aeadctx->enckey_len << 3);
+       key_ctx_len =  sizeof(struct _key_ctx)
+               + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
+       aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
+                                               0, key_ctx_len >> 4);
+       actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
+       return 0;
+ out:
+       aeadctx->enckey_len = 0;
+       return -EINVAL;
+ }
+ static int chcr_aead_encrypt(struct aead_request *req)
+ {
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+       reqctx->verify = VERIFY_HW;
+       switch (get_aead_subtype(tfm)) {
+       case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+       case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+               return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+                                   create_authenc_wr);
+       case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+       case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+               return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+                                   create_aead_ccm_wr);
+       default:
+               return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
+                                   create_gcm_wr);
+       }
+ }
+ static int chcr_aead_decrypt(struct aead_request *req)
+ {
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct chcr_aead_ctx *aeadctx = AEAD_CTX(crypto_aead_ctx(tfm));
+       struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+       int size;
+       if (aeadctx->mayverify == VERIFY_SW) {
+               size = crypto_aead_maxauthsize(tfm);
+               reqctx->verify = VERIFY_SW;
+       } else {
+               size = 0;
+               reqctx->verify = VERIFY_HW;
+       }
+       switch (get_aead_subtype(tfm)) {
+       case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
+       case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
+               return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+                                   create_authenc_wr);
+       case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
+       case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
+               return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+                                   create_aead_ccm_wr);
+       default:
+               return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
+                                   create_gcm_wr);
+       }
+ }
+ static int chcr_aead_op(struct aead_request *req,
+                         unsigned short op_type,
+                         int size,
+                         create_wr_t create_wr_fn)
+ {
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct chcr_context *ctx = crypto_aead_ctx(tfm);
+       struct uld_ctx *u_ctx = ULD_CTX(ctx);
+       struct sk_buff *skb;
+       if (ctx && !ctx->dev) {
+               pr_err("chcr : %s : No crypto device.\n", __func__);
+               return -ENXIO;
+       }
+       if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+                                  ctx->tx_channel_id)) {
+               if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+                       return -EBUSY;
+       }
+       /* Form a WR from req */
+       skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[ctx->tx_channel_id], size,
+                          op_type);
+       if (IS_ERR(skb) || skb == NULL) {
+               pr_err("chcr : %s : failed to form WR. No memory\n", __func__);
+               return PTR_ERR(skb);
+       }
+       skb->dev = u_ctx->lldi.ports[0];
+       set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id);
+       chcr_send_wr(skb);
+       return -EINPROGRESS;
+ }
  static struct chcr_alg_template driver_algs[] = {
        /* AES-CBC */
        {
                .is_registered = 0,
                .alg.crypto = {
                        .cra_name               = "cbc(aes)",
-                       .cra_driver_name        = "cbc(aes-chcr)",
+                       .cra_driver_name        = "cbc-aes-chcr",
                        .cra_priority           = CHCR_CRA_PRIORITY,
                        .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
                                CRYPTO_ALG_ASYNC,
                .is_registered = 0,
                .alg.crypto =   {
                        .cra_name               = "xts(aes)",
-                       .cra_driver_name        = "xts(aes-chcr)",
+                       .cra_driver_name        = "xts-aes-chcr",
                        .cra_priority           = CHCR_CRA_PRIORITY,
                        .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER |
                                CRYPTO_ALG_ASYNC,
                        .halg.digestsize = SHA1_DIGEST_SIZE,
                        .halg.base = {
                                .cra_name = "hmac(sha1)",
-                               .cra_driver_name = "hmac(sha1-chcr)",
+                               .cra_driver_name = "hmac-sha1-chcr",
                                .cra_blocksize = SHA1_BLOCK_SIZE,
                        }
                }
                        .halg.digestsize = SHA224_DIGEST_SIZE,
                        .halg.base = {
                                .cra_name = "hmac(sha224)",
-                               .cra_driver_name = "hmac(sha224-chcr)",
+                               .cra_driver_name = "hmac-sha224-chcr",
                                .cra_blocksize = SHA224_BLOCK_SIZE,
                        }
                }
                        .halg.digestsize = SHA256_DIGEST_SIZE,
                        .halg.base = {
                                .cra_name = "hmac(sha256)",
-                               .cra_driver_name = "hmac(sha256-chcr)",
+                               .cra_driver_name = "hmac-sha256-chcr",
                                .cra_blocksize = SHA256_BLOCK_SIZE,
                        }
                }
                        .halg.digestsize = SHA384_DIGEST_SIZE,
                        .halg.base = {
                                .cra_name = "hmac(sha384)",
-                               .cra_driver_name = "hmac(sha384-chcr)",
+                               .cra_driver_name = "hmac-sha384-chcr",
                                .cra_blocksize = SHA384_BLOCK_SIZE,
                        }
                }
                        .halg.digestsize = SHA512_DIGEST_SIZE,
                        .halg.base = {
                                .cra_name = "hmac(sha512)",
-                               .cra_driver_name = "hmac(sha512-chcr)",
+                               .cra_driver_name = "hmac-sha512-chcr",
                                .cra_blocksize = SHA512_BLOCK_SIZE,
                        }
                }
        },
+       /* Add AEAD Algorithms */
+       {
+               .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
+               .is_registered = 0,
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "gcm(aes)",
+                               .cra_driver_name = "gcm-aes-chcr",
+                               .cra_blocksize  = 1,
+                               .cra_ctxsize =  sizeof(struct chcr_context) +
+                                               sizeof(struct chcr_aead_ctx) +
+                                               sizeof(struct chcr_gcm_ctx),
+                       },
+                       .ivsize = 12,
+                       .maxauthsize = GHASH_DIGEST_SIZE,
+                       .setkey = chcr_gcm_setkey,
+                       .setauthsize = chcr_gcm_setauthsize,
+               }
+       },
+       {
+               .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
+               .is_registered = 0,
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "rfc4106(gcm(aes))",
+                               .cra_driver_name = "rfc4106-gcm-aes-chcr",
+                               .cra_blocksize   = 1,
+                               .cra_ctxsize =  sizeof(struct chcr_context) +
+                                               sizeof(struct chcr_aead_ctx) +
+                                               sizeof(struct chcr_gcm_ctx),
+                       },
+                       .ivsize = 8,
+                       .maxauthsize    = GHASH_DIGEST_SIZE,
+                       .setkey = chcr_gcm_setkey,
+                       .setauthsize    = chcr_4106_4309_setauthsize,
+               }
+       },
+       {
+               .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
+               .is_registered = 0,
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "ccm(aes)",
+                               .cra_driver_name = "ccm-aes-chcr",
+                               .cra_blocksize   = 1,
+                               .cra_ctxsize =  sizeof(struct chcr_context) +
+                                               sizeof(struct chcr_aead_ctx),
+                       },
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize    = GHASH_DIGEST_SIZE,
+                       .setkey = chcr_aead_ccm_setkey,
+                       .setauthsize    = chcr_ccm_setauthsize,
+               }
+       },
+       {
+               .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
+               .is_registered = 0,
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "rfc4309(ccm(aes))",
+                               .cra_driver_name = "rfc4309-ccm-aes-chcr",
+                               .cra_blocksize   = 1,
+                               .cra_ctxsize =  sizeof(struct chcr_context) +
+                                               sizeof(struct chcr_aead_ctx),
+                       },
+                       .ivsize = 8,
+                       .maxauthsize    = GHASH_DIGEST_SIZE,
+                       .setkey = chcr_aead_rfc4309_setkey,
+                       .setauthsize = chcr_4106_4309_setauthsize,
+               }
+       },
+       {
+               .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+               .is_registered = 0,
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha1),cbc(aes))",
+                               .cra_driver_name =
+                                       "authenc-hmac-sha1-cbc-aes-chcr",
+                               .cra_blocksize   = AES_BLOCK_SIZE,
+                               .cra_ctxsize =  sizeof(struct chcr_context) +
+                                               sizeof(struct chcr_aead_ctx) +
+                                               sizeof(struct chcr_authenc_ctx),
+                       },
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
+                       .setkey = chcr_authenc_setkey,
+                       .setauthsize = chcr_authenc_setauthsize,
+               }
+       },
+       {
+               .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+               .is_registered = 0,
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha256),cbc(aes))",
+                               .cra_driver_name =
+                                       "authenc-hmac-sha256-cbc-aes-chcr",
+                               .cra_blocksize   = AES_BLOCK_SIZE,
+                               .cra_ctxsize =  sizeof(struct chcr_context) +
+                                               sizeof(struct chcr_aead_ctx) +
+                                               sizeof(struct chcr_authenc_ctx),
+                       },
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize    = SHA256_DIGEST_SIZE,
+                       .setkey = chcr_authenc_setkey,
+                       .setauthsize = chcr_authenc_setauthsize,
+               }
+       },
+       {
+               .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+               .is_registered = 0,
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha224),cbc(aes))",
+                               .cra_driver_name =
+                                       "authenc-hmac-sha224-cbc-aes-chcr",
+                               .cra_blocksize   = AES_BLOCK_SIZE,
+                               .cra_ctxsize =  sizeof(struct chcr_context) +
+                                               sizeof(struct chcr_aead_ctx) +
+                                               sizeof(struct chcr_authenc_ctx),
+                       },
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA224_DIGEST_SIZE,
+                       .setkey = chcr_authenc_setkey,
+                       .setauthsize = chcr_authenc_setauthsize,
+               }
+       },
+       {
+               .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+               .is_registered = 0,
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha384),cbc(aes))",
+                               .cra_driver_name =
+                                       "authenc-hmac-sha384-cbc-aes-chcr",
+                               .cra_blocksize   = AES_BLOCK_SIZE,
+                               .cra_ctxsize =  sizeof(struct chcr_context) +
+                                               sizeof(struct chcr_aead_ctx) +
+                                               sizeof(struct chcr_authenc_ctx),
+                       },
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA384_DIGEST_SIZE,
+                       .setkey = chcr_authenc_setkey,
+                       .setauthsize = chcr_authenc_setauthsize,
+               }
+       },
+       {
+               .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
+               .is_registered = 0,
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(hmac(sha512),cbc(aes))",
+                               .cra_driver_name =
+                                       "authenc-hmac-sha512-cbc-aes-chcr",
+                               .cra_blocksize   = AES_BLOCK_SIZE,
+                               .cra_ctxsize =  sizeof(struct chcr_context) +
+                                               sizeof(struct chcr_aead_ctx) +
+                                               sizeof(struct chcr_authenc_ctx),
+                       },
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA512_DIGEST_SIZE,
+                       .setkey = chcr_authenc_setkey,
+                       .setauthsize = chcr_authenc_setauthsize,
+               }
+       },
+       {
+               .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
+               .is_registered = 0,
+               .alg.aead = {
+                       .base = {
+                               .cra_name = "authenc(digest_null,cbc(aes))",
+                               .cra_driver_name =
+                                       "authenc-digest_null-cbc-aes-chcr",
+                               .cra_blocksize   = AES_BLOCK_SIZE,
+                               .cra_ctxsize =  sizeof(struct chcr_context) +
+                                               sizeof(struct chcr_aead_ctx) +
+                                               sizeof(struct chcr_authenc_ctx),
+                       },
+                       .ivsize  = AES_BLOCK_SIZE,
+                       .maxauthsize = 0,
+                       .setkey  = chcr_aead_digest_null_setkey,
+                       .setauthsize = chcr_authenc_null_setauthsize,
+               }
+       },
  };
  
  /*
@@@ -1424,6 -2869,11 +2871,11 @@@ static int chcr_unregister_alg(void
                                crypto_unregister_alg(
                                                &driver_algs[i].alg.crypto);
                        break;
+               case CRYPTO_ALG_TYPE_AEAD:
+                       if (driver_algs[i].is_registered)
+                               crypto_unregister_aead(
+                                               &driver_algs[i].alg.aead);
+                       break;
                case CRYPTO_ALG_TYPE_AHASH:
                        if (driver_algs[i].is_registered)
                                crypto_unregister_ahash(
@@@ -1458,6 -2908,19 +2910,19 @@@ static int chcr_register_alg(void
                        err = crypto_register_alg(&driver_algs[i].alg.crypto);
                        name = driver_algs[i].alg.crypto.cra_driver_name;
                        break;
+               case CRYPTO_ALG_TYPE_AEAD:
+                       driver_algs[i].alg.aead.base.cra_priority =
+                               CHCR_CRA_PRIORITY;
+                       driver_algs[i].alg.aead.base.cra_flags =
+                               CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+                       driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
+                       driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
+                       driver_algs[i].alg.aead.init = chcr_aead_cra_init;
+                       driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
+                       driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
+                       err = crypto_register_aead(&driver_algs[i].alg.aead);
+                       name = driver_algs[i].alg.aead.base.cra_driver_name;
+                       break;
                case CRYPTO_ALG_TYPE_AHASH:
                        a_hash = &driver_algs[i].alg.hash;
                        a_hash->update = chcr_ahash_update;
@@@ -42,7 -42,6 +42,7 @@@ static chcr_handler_func work_handlers[
  static struct cxgb4_uld_info chcr_uld_info = {
        .name = DRV_MODULE_NAME,
        .nrxq = MAX_ULD_QSETS,
 +      .ntxq = MAX_ULD_QSETS,
        .rxq_size = 1024,
        .add = chcr_uld_add,
        .state_change = chcr_uld_state_change,
@@@ -110,14 -109,12 +110,12 @@@ static int cpl_fw6_pld_handler(struct c
        if (ack_err_status) {
                if (CHK_MAC_ERR_BIT(ack_err_status) ||
                    CHK_PAD_ERR_BIT(ack_err_status))
-                       error_status = -EINVAL;
+                       error_status = -EBADMSG;
        }
        /* call completion callback with failure status */
        if (req) {
-               if (!chcr_handle_resp(req, input, error_status))
-                       req->complete(req, error_status);
-               else
-                       return -EINVAL;
+               error_status = chcr_handle_resp(req, input, error_status);
+               req->complete(req, error_status);
        } else {
                pr_err("Incorrect request address from the firmware\n");
                return -EFAULT;
  
  int chcr_send_wr(struct sk_buff *skb)
  {
 -      return cxgb4_ofld_send(skb->dev, skb);
 +      return cxgb4_crypto_send(skb->dev, skb);
  }
  
  static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
@@@ -168,11 -168,12 +168,11 @@@ static void mv_cesa_ahash_std_step(stru
        mv_cesa_adjust_op(engine, &creq->op_tmpl);
        memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
  
 -      digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
 -      for (i = 0; i < digsize / 4; i++)
 -              writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
 -
 -      mv_cesa_adjust_op(engine, &creq->op_tmpl);
 -      memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
 +      if (!sreq->offset) {
 +              digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
 +              for (i = 0; i < digsize / 4; i++)
 +                      writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i));
 +      }
  
        if (creq->cache_ptr)
                memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
@@@ -311,24 -312,40 +311,40 @@@ static void mv_cesa_ahash_complete(stru
        int i;
  
        digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
-       for (i = 0; i < digsize / 4; i++)
-               creq->state[i] = readl_relaxed(engine->regs + CESA_IVDIG(i));
  
-       if (creq->last_req) {
+       if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
+           (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) {
+               __le32 *data = NULL;
                /*
-                * Hardware's MD5 digest is in little endian format, but
-                * SHA in big endian format
+                * Result is already in the correct endianess when the SA is
+                * used
                 */
-               if (creq->algo_le) {
-                       __le32 *result = (void *)ahashreq->result;
+               data = creq->base.chain.last->op->ctx.hash.hash;
+               for (i = 0; i < digsize / 4; i++)
+                       creq->state[i] = cpu_to_le32(data[i]);
  
-                       for (i = 0; i < digsize / 4; i++)
-                               result[i] = cpu_to_le32(creq->state[i]);
-               } else {
-                       __be32 *result = (void *)ahashreq->result;
+               memcpy(ahashreq->result, data, digsize);
+       } else {
+               for (i = 0; i < digsize / 4; i++)
+                       creq->state[i] = readl_relaxed(engine->regs +
+                                                      CESA_IVDIG(i));
+               if (creq->last_req) {
+                       /*
+                       * Hardware's MD5 digest is in little endian format, but
+                       * SHA in big endian format
+                       */
+                       if (creq->algo_le) {
+                               __le32 *result = (void *)ahashreq->result;
+                               for (i = 0; i < digsize / 4; i++)
+                                       result[i] = cpu_to_le32(creq->state[i]);
+                       } else {
+                               __be32 *result = (void *)ahashreq->result;
  
-                       for (i = 0; i < digsize / 4; i++)
-                               result[i] = cpu_to_be32(creq->state[i]);
+                               for (i = 0; i < digsize / 4; i++)
+                                       result[i] = cpu_to_be32(creq->state[i]);
+                       }
                }
        }
  
@@@ -503,6 -520,12 +519,12 @@@ mv_cesa_ahash_dma_last_req(struct mv_ce
                                                CESA_SA_DESC_CFG_LAST_FRAG,
                                      CESA_SA_DESC_CFG_FRAG_MSK);
  
+               ret = mv_cesa_dma_add_result_op(chain,
+                                               CESA_SA_CFG_SRAM_OFFSET,
+                                               CESA_SA_DATA_SRAM_OFFSET,
+                                               CESA_TDMA_SRC_IN_SRAM, flags);
+               if (ret)
+                       return ERR_PTR(-ENOMEM);
                return op;
        }
  
@@@ -563,6 -586,7 +585,7 @@@ static int mv_cesa_ahash_dma_req_init(s
        struct mv_cesa_op_ctx *op = NULL;
        unsigned int frag_len;
        int ret;
+       u32 type;
  
        basereq->chain.first = NULL;
        basereq->chain.last = NULL;
                goto err_free_tdma;
        }
  
-       if (op) {
+       /*
+        * If results are copied via DMA, this means that this
+        * request can be directly processed by the engine,
+        * without partial updates. So we can chain it at the
+        * DMA level with other requests.
+        */
+       type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
+       if (op && type != CESA_TDMA_RESULT) {
                /* Add dummy desc to wait for crypto operation end */
                ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
                if (ret)
        else
                creq->cache_ptr = 0;
  
-       basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
-                                      CESA_TDMA_BREAK_CHAIN);
+       basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
+       if (type != CESA_TDMA_RESULT)
+               basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
  
        return 0;