Merge tag 'v6.6-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 29 Aug 2023 18:23:29 +0000 (11:23 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 29 Aug 2023 18:23:29 +0000 (11:23 -0700)
Pull crypto updates from Herbert Xu:
 "API:
   - Move crypto engine callback from tfm ctx into algorithm object
   - Fix atomic sleep bug in crypto_destroy_instance
   - Move lib/mpi into lib/crypto

  Algorithms:
   - Add chacha20 and poly1305 implementation for powerpc p10

  Drivers:
   - Add AES skcipher and aead support to starfive
   - Add Dynamic Boost Control support to ccp
   - Add support for STM32P13 platform to stm32"

* tag 'v6.6-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (149 commits)
  Revert "dt-bindings: crypto: qcom,prng: Add SM8450"
  crypto: chelsio - Remove unused declarations
  X.509: if signature is unsupported skip validation
  crypto: qat - fix crypto capability detection for 4xxx
  crypto: drivers - Explicitly include correct DT includes
  crypto: engine - Remove crypto_engine_ctx
  crypto: zynqmp - Use new crypto_engine_op interface
  crypto: virtio - Use new crypto_engine_op interface
  crypto: stm32 - Use new crypto_engine_op interface
  crypto: jh7110 - Use new crypto_engine_op interface
  crypto: rk3288 - Use new crypto_engine_op interface
  crypto: omap - Use new crypto_engine_op interface
  crypto: keembay - Use new crypto_engine_op interface
  crypto: sl3516 - Use new crypto_engine_op interface
  crypto: caam - Use new crypto_engine_op interface
  crypto: aspeed - Remove non-standard sha512 algorithms
  crypto: aspeed - Use new crypto_engine_op interface
  crypto: amlogic - Use new crypto_engine_op interface
  crypto: sun8i-ss - Use new crypto_engine_op interface
  crypto: sun8i-ce - Use new crypto_engine_op interface
  ...

205 files changed:
Documentation/ABI/testing/debugfs-driver-qat [new file with mode: 0644]
Documentation/ABI/testing/sysfs-driver-ccp
Documentation/devicetree/bindings/crypto/st,stm32-hash.yaml
MAINTAINERS
arch/arm64/crypto/Makefile
arch/arm64/crypto/aes-glue-ce.c [new file with mode: 0644]
arch/arm64/crypto/aes-glue-neon.c [new file with mode: 0644]
arch/powerpc/crypto/Kconfig
arch/powerpc/crypto/Makefile
arch/powerpc/crypto/chacha-p10-glue.c [new file with mode: 0644]
arch/powerpc/crypto/chacha-p10le-8x.S [new file with mode: 0644]
arch/powerpc/crypto/poly1305-p10-glue.c [new file with mode: 0644]
arch/powerpc/crypto/poly1305-p10le_64.S [new file with mode: 0644]
arch/x86/crypto/aesni-intel_glue.c
crypto/af_alg.c
crypto/algapi.c
crypto/asymmetric_keys/public_key.c
crypto/asymmetric_keys/verify_pefile.c
crypto/asymmetric_keys/x509_public_key.c
crypto/crypto_engine.c
crypto/jitterentropy.c
crypto/lrw.c
crypto/sig.c
crypto/xts.c
drivers/char/hw_random/Kconfig
drivers/char/hw_random/amd-rng.c
drivers/char/hw_random/arm_smccc_trng.c
drivers/char/hw_random/atmel-rng.c
drivers/char/hw_random/ba431-rng.c
drivers/char/hw_random/bcm2835-rng.c
drivers/char/hw_random/cctrng.c
drivers/char/hw_random/cn10k-rng.c
drivers/char/hw_random/core.c
drivers/char/hw_random/exynos-trng.c
drivers/char/hw_random/imx-rngc.c
drivers/char/hw_random/ingenic-rng.c
drivers/char/hw_random/ingenic-trng.c
drivers/char/hw_random/iproc-rng200.c
drivers/char/hw_random/nomadik-rng.c
drivers/char/hw_random/npcm-rng.c
drivers/char/hw_random/omap-rng.c
drivers/char/hw_random/omap3-rom-rng.c
drivers/char/hw_random/pasemi-rng.c
drivers/char/hw_random/pic32-rng.c
drivers/char/hw_random/stm32-rng.c
drivers/char/hw_random/timeriomem-rng.c
drivers/char/hw_random/xgene-rng.c
drivers/char/hw_random/xiphera-trng.c
drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h
drivers/crypto/amlogic/amlogic-gxl-cipher.c
drivers/crypto/amlogic/amlogic-gxl-core.c
drivers/crypto/amlogic/amlogic-gxl.h
drivers/crypto/aspeed/aspeed-acry.c
drivers/crypto/aspeed/aspeed-hace-crypto.c
drivers/crypto/aspeed/aspeed-hace-hash.c
drivers/crypto/aspeed/aspeed-hace.c
drivers/crypto/aspeed/aspeed-hace.h
drivers/crypto/atmel-aes.c
drivers/crypto/atmel-ecc.c
drivers/crypto/atmel-sha.c
drivers/crypto/atmel-tdes.c
drivers/crypto/bcm/cipher.c
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/caamhash.c
drivers/crypto/caam/caampkc.c
drivers/crypto/caam/caampkc.h
drivers/crypto/caam/ctrl.c
drivers/crypto/caam/intern.h
drivers/crypto/caam/jr.c
drivers/crypto/caam/qi.c
drivers/crypto/caam/regs.h
drivers/crypto/ccp/Makefile
drivers/crypto/ccp/dbc.c [new file with mode: 0644]
drivers/crypto/ccp/dbc.h [new file with mode: 0644]
drivers/crypto/ccp/psp-dev.c
drivers/crypto/ccp/psp-dev.h
drivers/crypto/ccp/sp-dev.h
drivers/crypto/ccp/sp-pci.c
drivers/crypto/ccree/cc_driver.c
drivers/crypto/chelsio/chcr_algo.c
drivers/crypto/chelsio/chcr_core.h
drivers/crypto/chelsio/chcr_crypto.h
drivers/crypto/exynos-rng.c
drivers/crypto/gemini/sl3516-ce-cipher.c
drivers/crypto/gemini/sl3516-ce-core.c
drivers/crypto/gemini/sl3516-ce.h
drivers/crypto/hisilicon/hpre/hpre_crypto.c
drivers/crypto/hisilicon/hpre/hpre_main.c
drivers/crypto/hisilicon/qm.c
drivers/crypto/hisilicon/sec/sec_drv.c
drivers/crypto/img-hash.c
drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
drivers/crypto/intel/keembay/keembay-ocs-ecc.c
drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.h
drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.h
drivers/crypto/intel/qat/qat_common/Makefile
drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
drivers/crypto/intel/qat/qat_common/adf_admin.c
drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h
drivers/crypto/intel/qat/qat_common/adf_clock.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_clock.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_common_drv.h
drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
drivers/crypto/intel/qat/qat_common/adf_fw_counters.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_fw_counters.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen2_config.c
drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.h
drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_heartbeat.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_heartbeat.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h [new file with mode: 0644]
drivers/crypto/intel/qat/qat_common/adf_init.c
drivers/crypto/intel/qat/qat_common/adf_isr.c
drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
drivers/crypto/intel/qat/qat_common/qat_compression.c
drivers/crypto/intel/qat/qat_common/qat_uclo.c
drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
drivers/crypto/n2_core.c
drivers/crypto/omap-aes-gcm.c
drivers/crypto/omap-aes.c
drivers/crypto/omap-aes.h
drivers/crypto/omap-des.c
drivers/crypto/omap-sham.c
drivers/crypto/qcom-rng.c
drivers/crypto/rockchip/rk3288_crypto.c
drivers/crypto/rockchip/rk3288_crypto.h
drivers/crypto/rockchip/rk3288_crypto_ahash.c
drivers/crypto/rockchip/rk3288_crypto_skcipher.c
drivers/crypto/s5p-sss.c
drivers/crypto/sa2ul.c
drivers/crypto/sahara.c
drivers/crypto/starfive/Kconfig
drivers/crypto/starfive/Makefile
drivers/crypto/starfive/jh7110-aes.c [new file with mode: 0644]
drivers/crypto/starfive/jh7110-cryp.c
drivers/crypto/starfive/jh7110-cryp.h
drivers/crypto/starfive/jh7110-hash.c
drivers/crypto/stm32/Kconfig
drivers/crypto/stm32/stm32-cryp.c
drivers/crypto/stm32/stm32-hash.c
drivers/crypto/talitos.c
drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
drivers/crypto/xilinx/zynqmp-aes-gcm.c
drivers/crypto/xilinx/zynqmp-sha.c
include/crypto/algapi.h
include/crypto/engine.h
include/crypto/internal/engine.h [new file with mode: 0644]
include/linux/hisi_acc_qm.h
include/linux/psp-platform-access.h
include/uapi/linux/psp-dbc.h [new file with mode: 0644]
lib/Makefile
lib/crypto/Makefile
lib/crypto/mpi/Makefile [moved from lib/mpi/Makefile with 100% similarity]
lib/crypto/mpi/ec.c [moved from lib/mpi/ec.c with 100% similarity]
lib/crypto/mpi/generic_mpih-add1.c [moved from lib/mpi/generic_mpih-add1.c with 100% similarity]
lib/crypto/mpi/generic_mpih-lshift.c [moved from lib/mpi/generic_mpih-lshift.c with 100% similarity]
lib/crypto/mpi/generic_mpih-mul1.c [moved from lib/mpi/generic_mpih-mul1.c with 100% similarity]
lib/crypto/mpi/generic_mpih-mul2.c [moved from lib/mpi/generic_mpih-mul2.c with 100% similarity]
lib/crypto/mpi/generic_mpih-mul3.c [moved from lib/mpi/generic_mpih-mul3.c with 100% similarity]
lib/crypto/mpi/generic_mpih-rshift.c [moved from lib/mpi/generic_mpih-rshift.c with 100% similarity]
lib/crypto/mpi/generic_mpih-sub1.c [moved from lib/mpi/generic_mpih-sub1.c with 100% similarity]
lib/crypto/mpi/longlong.h [moved from lib/mpi/longlong.h with 100% similarity]
lib/crypto/mpi/mpi-add.c [moved from lib/mpi/mpi-add.c with 100% similarity]
lib/crypto/mpi/mpi-bit.c [moved from lib/mpi/mpi-bit.c with 100% similarity]
lib/crypto/mpi/mpi-cmp.c [moved from lib/mpi/mpi-cmp.c with 96% similarity]
lib/crypto/mpi/mpi-div.c [moved from lib/mpi/mpi-div.c with 100% similarity]
lib/crypto/mpi/mpi-inline.h [moved from lib/mpi/mpi-inline.h with 100% similarity]
lib/crypto/mpi/mpi-internal.h [moved from lib/mpi/mpi-internal.h with 100% similarity]
lib/crypto/mpi/mpi-inv.c [moved from lib/mpi/mpi-inv.c with 100% similarity]
lib/crypto/mpi/mpi-mod.c [moved from lib/mpi/mpi-mod.c with 100% similarity]
lib/crypto/mpi/mpi-mul.c [moved from lib/mpi/mpi-mul.c with 100% similarity]
lib/crypto/mpi/mpi-pow.c [moved from lib/mpi/mpi-pow.c with 100% similarity]
lib/crypto/mpi/mpi-sub-ui.c [moved from lib/mpi/mpi-sub-ui.c with 100% similarity]
lib/crypto/mpi/mpicoder.c [moved from lib/mpi/mpicoder.c with 100% similarity]
lib/crypto/mpi/mpih-cmp.c [moved from lib/mpi/mpih-cmp.c with 100% similarity]
lib/crypto/mpi/mpih-div.c [moved from lib/mpi/mpih-div.c with 100% similarity]
lib/crypto/mpi/mpih-mul.c [moved from lib/mpi/mpih-mul.c with 100% similarity]
lib/crypto/mpi/mpiutil.c [moved from lib/mpi/mpiutil.c with 100% similarity]
tools/crypto/ccp/.gitignore [new file with mode: 0644]
tools/crypto/ccp/Makefile [new file with mode: 0644]
tools/crypto/ccp/dbc.c [new file with mode: 0644]
tools/crypto/ccp/dbc.py [new file with mode: 0644]
tools/crypto/ccp/dbc_cli.py [new file with mode: 0755]
tools/crypto/ccp/test_dbc.py [new file with mode: 0755]

diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat
new file mode 100644 (file)
index 0000000..6731ffa
--- /dev/null
@@ -0,0 +1,61 @@
+What:          /sys/kernel/debug/qat_<device>_<BDF>/qat/fw_counters
+Date:          November 2023
+KernelVersion: 6.6
+Contact:       qat-linux@intel.com
+Description:   (RO) Read returns the number of requests sent to the FW and the number of responses
+               received from the FW for each Acceleration Engine
+               Reported firmware counters::
+
+                       <N>: Number of requests sent from Acceleration Engine N to FW and responses
+                            Acceleration Engine N received from FW
+
+What:          /sys/kernel/debug/qat_<device>_<BDF>/heartbeat/config
+Date:          November 2023
+KernelVersion: 6.6
+Contact:       qat-linux@intel.com
+Description:   (RW) Read returns value of the Heartbeat update period.
+               Write to the file changes this period value.
+
+               This period should reflect planned polling interval of device
+               health status. High frequency Heartbeat monitoring wastes CPU cycles
+               but minimizes the customer’s system downtime. Also, if there are
+               large service requests that take some time to complete, high frequency
+               Heartbeat monitoring could result in false reports of unresponsiveness
+               and in those cases, period needs to be increased.
+
+               This parameter is effective only for c3xxx, c62x, dh895xcc devices.
+               4xxx has this value internally fixed to 200ms.
+
+               Default value is set to 500. Minimal allowed value is 200.
+               All values are expressed in milliseconds.
+
+What:          /sys/kernel/debug/qat_<device>_<BDF>/heartbeat/queries_failed
+Date:          November 2023
+KernelVersion: 6.6
+Contact:       qat-linux@intel.com
+Description:   (RO) Read returns the number of times the device became unresponsive.
+
+               Attribute returns value of the counter which is incremented when
+               status query results negative.
+
+What:          /sys/kernel/debug/qat_<device>_<BDF>/heartbeat/queries_sent
+Date:          November 2023
+KernelVersion: 6.6
+Contact:       qat-linux@intel.com
+Description:   (RO) Read returns the number of times the control process checked
+               if the device is responsive.
+
+               Attribute returns value of the counter which is incremented on
+               every status query.
+
+What:          /sys/kernel/debug/qat_<device>_<BDF>/heartbeat/status
+Date:          November 2023
+KernelVersion: 6.6
+Contact:       qat-linux@intel.com
+Description:   (RO) Read returns the device health status.
+
+               Returns 0 when device is healthy or -1 when is unresponsive
+               or the query failed to send.
+
+               The driver does not monitor for Heartbeat. It is left for a user
+               to poll the status periodically.
index 7aded9b..ee6b787 100644 (file)
@@ -85,3 +85,21 @@ Description:
                Possible values:
                0: Not enforced
                1: Enforced
+
+What:          /sys/bus/pci/devices/<BDF>/bootloader_version
+Date:          June 2023
+KernelVersion: 6.4
+Contact:       mario.limonciello@amd.com
+Description:
+               The /sys/bus/pci/devices/<BDF>/bootloader_version
+               file reports the firmware version of the AMD AGESA
+               bootloader.
+
+What:          /sys/bus/pci/devices/<BDF>/tee_version
+Date:          June 2023
+KernelVersion: 6.4
+Contact:       mario.limonciello@amd.com
+Description:
+               The /sys/bus/pci/devices/<BDF>/tee_version
+               file reports the firmware version of the AMD Trusted
+               Execution Environment (TEE).
index b767ec7..ac48076 100644 (file)
@@ -20,6 +20,7 @@ properties:
       - stericsson,ux500-hash
       - st,stm32f456-hash
       - st,stm32f756-hash
+      - st,stm32mp13-hash
 
   reg:
     maxItems: 1
index a9f7c7e..0a24607 100644 (file)
@@ -915,6 +915,18 @@ S: Supported
 F:     drivers/crypto/ccp/sev*
 F:     include/uapi/linux/psp-sev.h
 
+AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER - DBC SUPPORT
+M:     Mario Limonciello <mario.limonciello@amd.com>
+L:     linux-crypto@vger.kernel.org
+S:     Supported
+F:     drivers/crypto/ccp/dbc.c
+F:     drivers/crypto/ccp/dbc.h
+F:     drivers/crypto/ccp/platform-access.c
+F:     drivers/crypto/ccp/platform-access.h
+F:     include/uapi/linux/psp-dbc.h
+F:     tools/crypto/ccp/*.c
+F:     tools/crypto/ccp/*.py
+
 AMD DISPLAY CORE
 M:     Harry Wentland <harry.wentland@amd.com>
 M:     Leo Li <sunpeng.li@amd.com>
index 4818e20..fbe64dc 100644 (file)
@@ -81,11 +81,6 @@ aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o
 obj-$(CONFIG_CRYPTO_AES_ARM64_BS) += aes-neon-bs.o
 aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
 
-CFLAGS_aes-glue-ce.o   := -DUSE_V8_CRYPTO_EXTENSIONS
-
-$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
-       $(call if_changed_rule,cc_o_c)
-
 quiet_cmd_perlasm = PERLASM $@
       cmd_perlasm = $(PERL) $(<) void $(@)
 
diff --git a/arch/arm64/crypto/aes-glue-ce.c b/arch/arm64/crypto/aes-glue-ce.c
new file mode 100644 (file)
index 0000000..7d309ce
--- /dev/null
@@ -0,0 +1,2 @@
+#define USE_V8_CRYPTO_EXTENSIONS
+#include "aes-glue.c"
diff --git a/arch/arm64/crypto/aes-glue-neon.c b/arch/arm64/crypto/aes-glue-neon.c
new file mode 100644 (file)
index 0000000..8ba0463
--- /dev/null
@@ -0,0 +1 @@
+#include "aes-glue.c"
index ad18725..f25024a 100644 (file)
@@ -111,4 +111,30 @@ config CRYPTO_AES_GCM_P10
          Support for cryptographic acceleration instructions on Power10 or
          later CPU. This module supports stitched acceleration for AES/GCM.
 
+config CRYPTO_CHACHA20_P10
+       tristate "Ciphers: ChaCha20, XChacha20, XChacha12 (P10 or later)"
+       depends on PPC64 && CPU_LITTLE_ENDIAN
+       select CRYPTO_SKCIPHER
+       select CRYPTO_LIB_CHACHA_GENERIC
+       select CRYPTO_ARCH_HAVE_LIB_CHACHA
+       help
+         Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12
+         stream cipher algorithms
+
+         Architecture: PowerPC64
+         - Power10 or later
+         - Little-endian
+
+config CRYPTO_POLY1305_P10
+       tristate "Hash functions: Poly1305 (P10 or later)"
+       depends on PPC64 && CPU_LITTLE_ENDIAN
+       select CRYPTO_HASH
+       select CRYPTO_LIB_POLY1305_GENERIC
+       help
+         Poly1305 authenticator algorithm (RFC7539)
+
+         Architecture: PowerPC64
+         - Power10 or later
+         - Little-endian
+
 endmenu
index 7b4f516..ebdac1b 100644 (file)
@@ -14,6 +14,8 @@ obj-$(CONFIG_CRYPTO_CRC32C_VPMSUM) += crc32c-vpmsum.o
 obj-$(CONFIG_CRYPTO_CRCT10DIF_VPMSUM) += crct10dif-vpmsum.o
 obj-$(CONFIG_CRYPTO_VPMSUM_TESTER) += crc-vpmsum_test.o
 obj-$(CONFIG_CRYPTO_AES_GCM_P10) += aes-gcm-p10-crypto.o
+obj-$(CONFIG_CRYPTO_CHACHA20_P10) += chacha-p10-crypto.o
+obj-$(CONFIG_CRYPTO_POLY1305_P10) += poly1305-p10-crypto.o
 
 aes-ppc-spe-y := aes-spe-core.o aes-spe-keys.o aes-tab-4k.o aes-spe-modes.o aes-spe-glue.o
 md5-ppc-y := md5-asm.o md5-glue.o
@@ -23,6 +25,8 @@ sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
 crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
 crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o
 aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o
+chacha-p10-crypto-y := chacha-p10-glue.o chacha-p10le-8x.o
+poly1305-p10-crypto-y := poly1305-p10-glue.o poly1305-p10le_64.o
 
 quiet_cmd_perl = PERL    $@
       cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@
diff --git a/arch/powerpc/crypto/chacha-p10-glue.c b/arch/powerpc/crypto/chacha-p10-glue.c
new file mode 100644 (file)
index 0000000..74fb86b
--- /dev/null
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PowerPC P10 (ppc64le) accelerated ChaCha and XChaCha stream ciphers,
+ * including ChaCha20 (RFC7539)
+ *
+ * Copyright 2023- IBM Corp. All rights reserved.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/chacha.h>
+#include <crypto/internal/simd.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+#include <linux/sizes.h>
+#include <asm/simd.h>
+#include <asm/switch_to.h>
+
+asmlinkage void chacha_p10le_8x(u32 *state, u8 *dst, const u8 *src,
+                               unsigned int len, int nrounds);
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_p10);
+
+static void vsx_begin(void)
+{
+       preempt_disable();
+       enable_kernel_vsx();
+}
+
+static void vsx_end(void)
+{
+       disable_kernel_vsx();
+       preempt_enable();
+}
+
+static void chacha_p10_do_8x(u32 *state, u8 *dst, const u8 *src,
+                            unsigned int bytes, int nrounds)
+{
+       unsigned int l = bytes & ~0x0FF;
+
+       if (l > 0) {
+               chacha_p10le_8x(state, dst, src, l, nrounds);
+               bytes -= l;
+               src += l;
+               dst += l;
+               state[12] += l / CHACHA_BLOCK_SIZE;
+       }
+
+       if (bytes > 0)
+               chacha_crypt_generic(state, dst, src, bytes, nrounds);
+}
+
+void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds)
+{
+       hchacha_block_generic(state, stream, nrounds);
+}
+EXPORT_SYMBOL(hchacha_block_arch);
+
+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv)
+{
+       chacha_init_generic(state, key, iv);
+}
+EXPORT_SYMBOL(chacha_init_arch);
+
+void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
+                      int nrounds)
+{
+       if (!static_branch_likely(&have_p10) || bytes <= CHACHA_BLOCK_SIZE ||
+           !crypto_simd_usable())
+               return chacha_crypt_generic(state, dst, src, bytes, nrounds);
+
+       do {
+               unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+               vsx_begin();
+               chacha_p10_do_8x(state, dst, src, todo, nrounds);
+               vsx_end();
+
+               bytes -= todo;
+               src += todo;
+               dst += todo;
+       } while (bytes);
+}
+EXPORT_SYMBOL(chacha_crypt_arch);
+
+static int chacha_p10_stream_xor(struct skcipher_request *req,
+                                const struct chacha_ctx *ctx, const u8 *iv)
+{
+       struct skcipher_walk walk;
+       u32 state[16];
+       int err;
+
+       err = skcipher_walk_virt(&walk, req, false);
+       if (err)
+               return err;
+
+       chacha_init_generic(state, ctx->key, iv);
+
+       while (walk.nbytes > 0) {
+               unsigned int nbytes = walk.nbytes;
+
+               if (nbytes < walk.total)
+                       nbytes = rounddown(nbytes, walk.stride);
+
+               if (!crypto_simd_usable()) {
+                       chacha_crypt_generic(state, walk.dst.virt.addr,
+                                            walk.src.virt.addr, nbytes,
+                                            ctx->nrounds);
+               } else {
+                       vsx_begin();
+                       chacha_p10_do_8x(state, walk.dst.virt.addr,
+                                     walk.src.virt.addr, nbytes, ctx->nrounds);
+                       vsx_end();
+               }
+               err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+               if (err)
+                       break;
+       }
+
+       return err;
+}
+
+static int chacha_p10(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       return chacha_p10_stream_xor(req, ctx, req->iv);
+}
+
+static int xchacha_p10(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct chacha_ctx subctx;
+       u32 state[16];
+       u8 real_iv[16];
+
+       chacha_init_generic(state, ctx->key, req->iv);
+       hchacha_block_arch(state, subctx.key, ctx->nrounds);
+       subctx.nrounds = ctx->nrounds;
+
+       memcpy(&real_iv[0], req->iv + 24, 8);
+       memcpy(&real_iv[8], req->iv + 16, 8);
+       return chacha_p10_stream_xor(req, &subctx, real_iv);
+}
+
+static struct skcipher_alg algs[] = {
+       {
+               .base.cra_name          = "chacha20",
+               .base.cra_driver_name   = "chacha20-p10",
+               .base.cra_priority      = 300,
+               .base.cra_blocksize     = 1,
+               .base.cra_ctxsize       = sizeof(struct chacha_ctx),
+               .base.cra_module        = THIS_MODULE,
+
+               .min_keysize            = CHACHA_KEY_SIZE,
+               .max_keysize            = CHACHA_KEY_SIZE,
+               .ivsize                 = CHACHA_IV_SIZE,
+               .chunksize              = CHACHA_BLOCK_SIZE,
+               .setkey                 = chacha20_setkey,
+               .encrypt                = chacha_p10,
+               .decrypt                = chacha_p10,
+       }, {
+               .base.cra_name          = "xchacha20",
+               .base.cra_driver_name   = "xchacha20-p10",
+               .base.cra_priority      = 300,
+               .base.cra_blocksize     = 1,
+               .base.cra_ctxsize       = sizeof(struct chacha_ctx),
+               .base.cra_module        = THIS_MODULE,
+
+               .min_keysize            = CHACHA_KEY_SIZE,
+               .max_keysize            = CHACHA_KEY_SIZE,
+               .ivsize                 = XCHACHA_IV_SIZE,
+               .chunksize              = CHACHA_BLOCK_SIZE,
+               .setkey                 = chacha20_setkey,
+               .encrypt                = xchacha_p10,
+               .decrypt                = xchacha_p10,
+       }, {
+               .base.cra_name          = "xchacha12",
+               .base.cra_driver_name   = "xchacha12-p10",
+               .base.cra_priority      = 300,
+               .base.cra_blocksize     = 1,
+               .base.cra_ctxsize       = sizeof(struct chacha_ctx),
+               .base.cra_module        = THIS_MODULE,
+
+               .min_keysize            = CHACHA_KEY_SIZE,
+               .max_keysize            = CHACHA_KEY_SIZE,
+               .ivsize                 = XCHACHA_IV_SIZE,
+               .chunksize              = CHACHA_BLOCK_SIZE,
+               .setkey                 = chacha12_setkey,
+               .encrypt                = xchacha_p10,
+               .decrypt                = xchacha_p10,
+       }
+};
+
+static int __init chacha_p10_init(void)
+{
+       static_branch_enable(&have_p10);
+
+       return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit chacha_p10_exit(void)
+{
+       crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+module_cpu_feature_match(PPC_MODULE_FEATURE_P10, chacha_p10_init);
+module_exit(chacha_p10_exit);
+
+MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)");
+MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-p10");
+MODULE_ALIAS_CRYPTO("xchacha20");
+MODULE_ALIAS_CRYPTO("xchacha20-p10");
+MODULE_ALIAS_CRYPTO("xchacha12");
+MODULE_ALIAS_CRYPTO("xchacha12-p10");
diff --git a/arch/powerpc/crypto/chacha-p10le-8x.S b/arch/powerpc/crypto/chacha-p10le-8x.S
new file mode 100644 (file)
index 0000000..17bedb6
--- /dev/null
@@ -0,0 +1,842 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#
+# Accelerated chacha20 implementation for ppc64le.
+#
+# Copyright 2023- IBM Corp. All rights reserved
+#
+#===================================================================================
+# Written by Danny Tsen <dtsen@us.ibm.com>
+#
+# chacha_p10le_8x(u32 *state, byte *dst, const byte *src,
+#                               size_t len, int nrounds);
+#
+# do rounds,  8 quarter rounds
+# 1.  a += b; d ^= a; d <<<= 16;
+# 2.  c += d; b ^= c; b <<<= 12;
+# 3.  a += b; d ^= a; d <<<= 8;
+# 4.  c += d; b ^= c; b <<<= 7
+#
+# row1 = (row1 + row2),  row4 = row1 xor row4,  row4 rotate each word by 16
+# row3 = (row3 + row4),  row2 = row3 xor row2,  row2 rotate each word by 12
+# row1 = (row1 + row2), row4 = row1 xor row4,  row4 rotate each word by 8
+# row3 = (row3 + row4), row2 = row3 xor row2,  row2 rotate each word by 7
+#
+# 4 blocks (a b c d)
+#
+# a0 b0 c0 d0
+# a1 b1 c1 d1
+# ...
+# a4 b4 c4 d4
+# ...
+# a8 b8 c8 d8
+# ...
+# a12 b12 c12 d12
+# a13 ...
+# a14 ...
+# a15 b15 c15 d15
+#
+# Column round (v0, v4,  v8, v12, v1, v5,  v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
+# Diagnal round (v0, v5, v10, v15, v1, v6, v11, v12, v2, v7,  v8, v13, v3, v4,  v9, v14)
+#
+
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
+#include <linux/linkage.h>
+
+.machine       "any"
+.text
+
+.macro SAVE_GPR GPR OFFSET FRAME
+       std     \GPR,\OFFSET(\FRAME)
+.endm
+
+.macro SAVE_VRS VRS OFFSET FRAME
+       li      16, \OFFSET
+       stvx    \VRS, 16, \FRAME
+.endm
+
+.macro SAVE_VSX VSX OFFSET FRAME
+       li      16, \OFFSET
+       stxvx   \VSX, 16, \FRAME
+.endm
+
+.macro RESTORE_GPR GPR OFFSET FRAME
+       ld      \GPR,\OFFSET(\FRAME)
+.endm
+
+.macro RESTORE_VRS VRS OFFSET FRAME
+       li      16, \OFFSET
+       lvx     \VRS, 16, \FRAME
+.endm
+
+.macro RESTORE_VSX VSX OFFSET FRAME
+       li      16, \OFFSET
+       lxvx    \VSX, 16, \FRAME
+.endm
+
+.macro SAVE_REGS
+       mflr 0
+       std 0, 16(1)
+       stdu 1,-752(1)
+
+       SAVE_GPR 14, 112, 1
+       SAVE_GPR 15, 120, 1
+       SAVE_GPR 16, 128, 1
+       SAVE_GPR 17, 136, 1
+       SAVE_GPR 18, 144, 1
+       SAVE_GPR 19, 152, 1
+       SAVE_GPR 20, 160, 1
+       SAVE_GPR 21, 168, 1
+       SAVE_GPR 22, 176, 1
+       SAVE_GPR 23, 184, 1
+       SAVE_GPR 24, 192, 1
+       SAVE_GPR 25, 200, 1
+       SAVE_GPR 26, 208, 1
+       SAVE_GPR 27, 216, 1
+       SAVE_GPR 28, 224, 1
+       SAVE_GPR 29, 232, 1
+       SAVE_GPR 30, 240, 1
+       SAVE_GPR 31, 248, 1
+
+       addi    9, 1, 256
+       SAVE_VRS 20, 0, 9
+       SAVE_VRS 21, 16, 9
+       SAVE_VRS 22, 32, 9
+       SAVE_VRS 23, 48, 9
+       SAVE_VRS 24, 64, 9
+       SAVE_VRS 25, 80, 9
+       SAVE_VRS 26, 96, 9
+       SAVE_VRS 27, 112, 9
+       SAVE_VRS 28, 128, 9
+       SAVE_VRS 29, 144, 9
+       SAVE_VRS 30, 160, 9
+       SAVE_VRS 31, 176, 9
+
+       SAVE_VSX 14, 192, 9
+       SAVE_VSX 15, 208, 9
+       SAVE_VSX 16, 224, 9
+       SAVE_VSX 17, 240, 9
+       SAVE_VSX 18, 256, 9
+       SAVE_VSX 19, 272, 9
+       SAVE_VSX 20, 288, 9
+       SAVE_VSX 21, 304, 9
+       SAVE_VSX 22, 320, 9
+       SAVE_VSX 23, 336, 9
+       SAVE_VSX 24, 352, 9
+       SAVE_VSX 25, 368, 9
+       SAVE_VSX 26, 384, 9
+       SAVE_VSX 27, 400, 9
+       SAVE_VSX 28, 416, 9
+       SAVE_VSX 29, 432, 9
+       SAVE_VSX 30, 448, 9
+       SAVE_VSX 31, 464, 9
+.endm # SAVE_REGS
+
+.macro RESTORE_REGS
+       addi    9, 1, 256
+       RESTORE_VRS 20, 0, 9
+       RESTORE_VRS 21, 16, 9
+       RESTORE_VRS 22, 32, 9
+       RESTORE_VRS 23, 48, 9
+       RESTORE_VRS 24, 64, 9
+       RESTORE_VRS 25, 80, 9
+       RESTORE_VRS 26, 96, 9
+       RESTORE_VRS 27, 112, 9
+       RESTORE_VRS 28, 128, 9
+       RESTORE_VRS 29, 144, 9
+       RESTORE_VRS 30, 160, 9
+       RESTORE_VRS 31, 176, 9
+
+       RESTORE_VSX 14, 192, 9
+       RESTORE_VSX 15, 208, 9
+       RESTORE_VSX 16, 224, 9
+       RESTORE_VSX 17, 240, 9
+       RESTORE_VSX 18, 256, 9
+       RESTORE_VSX 19, 272, 9
+       RESTORE_VSX 20, 288, 9
+       RESTORE_VSX 21, 304, 9
+       RESTORE_VSX 22, 320, 9
+       RESTORE_VSX 23, 336, 9
+       RESTORE_VSX 24, 352, 9
+       RESTORE_VSX 25, 368, 9
+       RESTORE_VSX 26, 384, 9
+       RESTORE_VSX 27, 400, 9
+       RESTORE_VSX 28, 416, 9
+       RESTORE_VSX 29, 432, 9
+       RESTORE_VSX 30, 448, 9
+       RESTORE_VSX 31, 464, 9
+
+       RESTORE_GPR 14, 112, 1
+       RESTORE_GPR 15, 120, 1
+       RESTORE_GPR 16, 128, 1
+       RESTORE_GPR 17, 136, 1
+       RESTORE_GPR 18, 144, 1
+       RESTORE_GPR 19, 152, 1
+       RESTORE_GPR 20, 160, 1
+       RESTORE_GPR 21, 168, 1
+       RESTORE_GPR 22, 176, 1
+       RESTORE_GPR 23, 184, 1
+       RESTORE_GPR 24, 192, 1
+       RESTORE_GPR 25, 200, 1
+       RESTORE_GPR 26, 208, 1
+       RESTORE_GPR 27, 216, 1
+       RESTORE_GPR 28, 224, 1
+       RESTORE_GPR 29, 232, 1
+       RESTORE_GPR 30, 240, 1
+       RESTORE_GPR 31, 248, 1
+
+       addi    1, 1, 752
+       ld 0, 16(1)
+       mtlr 0
+.endm # RESTORE_REGS
+
+.macro QT_loop_8x
+       # QR(v0, v4,  v8, v12, v1, v5,  v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
+       xxlor   0, 32+25, 32+25
+       xxlor   32+25, 20, 20
+       vadduwm 0, 0, 4
+       vadduwm 1, 1, 5
+       vadduwm 2, 2, 6
+       vadduwm 3, 3, 7
+         vadduwm 16, 16, 20
+         vadduwm 17, 17, 21
+         vadduwm 18, 18, 22
+         vadduwm 19, 19, 23
+
+         vpermxor 12, 12, 0, 25
+         vpermxor 13, 13, 1, 25
+         vpermxor 14, 14, 2, 25
+         vpermxor 15, 15, 3, 25
+         vpermxor 28, 28, 16, 25
+         vpermxor 29, 29, 17, 25
+         vpermxor 30, 30, 18, 25
+         vpermxor 31, 31, 19, 25
+       xxlor   32+25, 0, 0
+       vadduwm 8, 8, 12
+       vadduwm 9, 9, 13
+       vadduwm 10, 10, 14
+       vadduwm 11, 11, 15
+         vadduwm 24, 24, 28
+         vadduwm 25, 25, 29
+         vadduwm 26, 26, 30
+         vadduwm 27, 27, 31
+       vxor 4, 4, 8
+       vxor 5, 5, 9
+       vxor 6, 6, 10
+       vxor 7, 7, 11
+         vxor 20, 20, 24
+         vxor 21, 21, 25
+         vxor 22, 22, 26
+         vxor 23, 23, 27
+
+       xxlor   0, 32+25, 32+25
+       xxlor   32+25, 21, 21
+       vrlw 4, 4, 25  #
+       vrlw 5, 5, 25
+       vrlw 6, 6, 25
+       vrlw 7, 7, 25
+         vrlw 20, 20, 25  #
+         vrlw 21, 21, 25
+         vrlw 22, 22, 25
+         vrlw 23, 23, 25
+       xxlor   32+25, 0, 0
+       vadduwm 0, 0, 4
+       vadduwm 1, 1, 5
+       vadduwm 2, 2, 6
+       vadduwm 3, 3, 7
+         vadduwm 16, 16, 20
+         vadduwm 17, 17, 21
+         vadduwm 18, 18, 22
+         vadduwm 19, 19, 23
+
+       xxlor   0, 32+25, 32+25
+       xxlor   32+25, 22, 22
+         vpermxor 12, 12, 0, 25
+         vpermxor 13, 13, 1, 25
+         vpermxor 14, 14, 2, 25
+         vpermxor 15, 15, 3, 25
+         vpermxor 28, 28, 16, 25
+         vpermxor 29, 29, 17, 25
+         vpermxor 30, 30, 18, 25
+         vpermxor 31, 31, 19, 25
+       xxlor   32+25, 0, 0
+       vadduwm 8, 8, 12
+       vadduwm 9, 9, 13
+       vadduwm 10, 10, 14
+       vadduwm 11, 11, 15
+         vadduwm 24, 24, 28
+         vadduwm 25, 25, 29
+         vadduwm 26, 26, 30
+         vadduwm 27, 27, 31
+       xxlor   0, 32+28, 32+28
+       xxlor   32+28, 23, 23
+       vxor 4, 4, 8
+       vxor 5, 5, 9
+       vxor 6, 6, 10
+       vxor 7, 7, 11
+         vxor 20, 20, 24
+         vxor 21, 21, 25
+         vxor 22, 22, 26
+         vxor 23, 23, 27
+       vrlw 4, 4, 28  #
+       vrlw 5, 5, 28
+       vrlw 6, 6, 28
+       vrlw 7, 7, 28
+         vrlw 20, 20, 28  #
+         vrlw 21, 21, 28
+         vrlw 22, 22, 28
+         vrlw 23, 23, 28
+       xxlor   32+28, 0, 0
+
+       # QR(v0, v5, v10, v15, v1, v6, v11, v12, v2, v7,  v8, v13, v3, v4,  v9, v14)
+       xxlor   0, 32+25, 32+25
+       xxlor   32+25, 20, 20
+       vadduwm 0, 0, 5
+       vadduwm 1, 1, 6
+       vadduwm 2, 2, 7
+       vadduwm 3, 3, 4
+         vadduwm 16, 16, 21
+         vadduwm 17, 17, 22
+         vadduwm 18, 18, 23
+         vadduwm 19, 19, 20
+
+         vpermxor 15, 15, 0, 25
+         vpermxor 12, 12, 1, 25
+         vpermxor 13, 13, 2, 25
+         vpermxor 14, 14, 3, 25
+         vpermxor 31, 31, 16, 25
+         vpermxor 28, 28, 17, 25
+         vpermxor 29, 29, 18, 25
+         vpermxor 30, 30, 19, 25
+
+       xxlor   32+25, 0, 0
+       vadduwm 10, 10, 15
+       vadduwm 11, 11, 12
+       vadduwm 8, 8, 13
+       vadduwm 9, 9, 14
+         vadduwm 26, 26, 31
+         vadduwm 27, 27, 28
+         vadduwm 24, 24, 29
+         vadduwm 25, 25, 30
+       vxor 5, 5, 10
+       vxor 6, 6, 11
+       vxor 7, 7, 8
+       vxor 4, 4, 9
+         vxor 21, 21, 26
+         vxor 22, 22, 27
+         vxor 23, 23, 24
+         vxor 20, 20, 25
+
+       xxlor   0, 32+25, 32+25
+       xxlor   32+25, 21, 21
+       vrlw 5, 5, 25
+       vrlw 6, 6, 25
+       vrlw 7, 7, 25
+       vrlw 4, 4, 25
+         vrlw 21, 21, 25
+         vrlw 22, 22, 25
+         vrlw 23, 23, 25
+         vrlw 20, 20, 25
+       xxlor   32+25, 0, 0
+
+       vadduwm 0, 0, 5
+       vadduwm 1, 1, 6
+       vadduwm 2, 2, 7
+       vadduwm 3, 3, 4
+         vadduwm 16, 16, 21
+         vadduwm 17, 17, 22
+         vadduwm 18, 18, 23
+         vadduwm 19, 19, 20
+
+       xxlor   0, 32+25, 32+25
+       xxlor   32+25, 22, 22
+         vpermxor 15, 15, 0, 25
+         vpermxor 12, 12, 1, 25
+         vpermxor 13, 13, 2, 25
+         vpermxor 14, 14, 3, 25
+         vpermxor 31, 31, 16, 25
+         vpermxor 28, 28, 17, 25
+         vpermxor 29, 29, 18, 25
+         vpermxor 30, 30, 19, 25
+       xxlor   32+25, 0, 0
+
+       vadduwm 10, 10, 15
+       vadduwm 11, 11, 12
+       vadduwm 8, 8, 13
+       vadduwm 9, 9, 14
+         vadduwm 26, 26, 31
+         vadduwm 27, 27, 28
+         vadduwm 24, 24, 29
+         vadduwm 25, 25, 30
+
+       xxlor   0, 32+28, 32+28
+       xxlor   32+28, 23, 23
+       vxor 5, 5, 10
+       vxor 6, 6, 11
+       vxor 7, 7, 8
+       vxor 4, 4, 9
+         vxor 21, 21, 26
+         vxor 22, 22, 27
+         vxor 23, 23, 24
+         vxor 20, 20, 25
+       vrlw 5, 5, 28
+       vrlw 6, 6, 28
+       vrlw 7, 7, 28
+       vrlw 4, 4, 28
+         vrlw 21, 21, 28
+         vrlw 22, 22, 28
+         vrlw 23, 23, 28
+         vrlw 20, 20, 28
+       xxlor   32+28, 0, 0
+.endm
+
+.macro QT_loop_4x
+       # QR(v0, v4,  v8, v12, v1, v5,  v9, v13, v2, v6, v10, v14, v3, v7, v11, v15)
+       vadduwm 0, 0, 4
+       vadduwm 1, 1, 5
+       vadduwm 2, 2, 6
+       vadduwm 3, 3, 7
+         vpermxor 12, 12, 0, 20
+         vpermxor 13, 13, 1, 20
+         vpermxor 14, 14, 2, 20
+         vpermxor 15, 15, 3, 20
+       vadduwm 8, 8, 12
+       vadduwm 9, 9, 13
+       vadduwm 10, 10, 14
+       vadduwm 11, 11, 15
+       vxor 4, 4, 8
+       vxor 5, 5, 9
+       vxor 6, 6, 10
+       vxor 7, 7, 11
+       vrlw 4, 4, 21
+       vrlw 5, 5, 21
+       vrlw 6, 6, 21
+       vrlw 7, 7, 21
+       vadduwm 0, 0, 4
+       vadduwm 1, 1, 5
+       vadduwm 2, 2, 6
+       vadduwm 3, 3, 7
+         vpermxor 12, 12, 0, 22
+         vpermxor 13, 13, 1, 22
+         vpermxor 14, 14, 2, 22
+         vpermxor 15, 15, 3, 22
+       vadduwm 8, 8, 12
+       vadduwm 9, 9, 13
+       vadduwm 10, 10, 14
+       vadduwm 11, 11, 15
+       vxor 4, 4, 8
+       vxor 5, 5, 9
+       vxor 6, 6, 10
+       vxor 7, 7, 11
+       vrlw 4, 4, 23
+       vrlw 5, 5, 23
+       vrlw 6, 6, 23
+       vrlw 7, 7, 23
+
+       # QR(v0, v5, v10, v15, v1, v6, v11, v12, v2, v7,  v8, v13, v3, v4,  v9, v14)
+       vadduwm 0, 0, 5
+       vadduwm 1, 1, 6
+       vadduwm 2, 2, 7
+       vadduwm 3, 3, 4
+         vpermxor 15, 15, 0, 20
+         vpermxor 12, 12, 1, 20
+         vpermxor 13, 13, 2, 20
+         vpermxor 14, 14, 3, 20
+       vadduwm 10, 10, 15
+       vadduwm 11, 11, 12
+       vadduwm 8, 8, 13
+       vadduwm 9, 9, 14
+       vxor 5, 5, 10
+       vxor 6, 6, 11
+       vxor 7, 7, 8
+       vxor 4, 4, 9
+       vrlw 5, 5, 21
+       vrlw 6, 6, 21
+       vrlw 7, 7, 21
+       vrlw 4, 4, 21
+       vadduwm 0, 0, 5
+       vadduwm 1, 1, 6
+       vadduwm 2, 2, 7
+       vadduwm 3, 3, 4
+         vpermxor 15, 15, 0, 22
+         vpermxor 12, 12, 1, 22
+         vpermxor 13, 13, 2, 22
+         vpermxor 14, 14, 3, 22
+       vadduwm 10, 10, 15
+       vadduwm 11, 11, 12
+       vadduwm 8, 8, 13
+       vadduwm 9, 9, 14
+       vxor 5, 5, 10
+       vxor 6, 6, 11
+       vxor 7, 7, 8
+       vxor 4, 4, 9
+       vrlw 5, 5, 23
+       vrlw 6, 6, 23
+       vrlw 7, 7, 23
+       vrlw 4, 4, 23
+.endm
+
+# Transpose
+.macro TP_4x a0 a1 a2 a3
+       xxmrghw  10, 32+\a0, 32+\a1     # a0, a1, b0, b1
+       xxmrghw  11, 32+\a2, 32+\a3     # a2, a3, b2, b3
+       xxmrglw  12, 32+\a0, 32+\a1     # c0, c1, d0, d1
+       xxmrglw  13, 32+\a2, 32+\a3     # c2, c3, d2, d3
+       xxpermdi        32+\a0, 10, 11, 0       # a0, a1, a2, a3
+       xxpermdi        32+\a1, 10, 11, 3       # b0, b1, b2, b3
+       xxpermdi        32+\a2, 12, 13, 0       # c0, c1, c2, c3
+       xxpermdi        32+\a3, 12, 13, 3       # d0, d1, d2, d3
+.endm
+
+# key stream = working state + state
+.macro Add_state S
+       vadduwm \S+0, \S+0, 16-\S
+       vadduwm \S+4, \S+4, 17-\S
+       vadduwm \S+8, \S+8, 18-\S
+       vadduwm \S+12, \S+12, 19-\S
+
+       vadduwm \S+1, \S+1, 16-\S
+       vadduwm \S+5, \S+5, 17-\S
+       vadduwm \S+9, \S+9, 18-\S
+       vadduwm \S+13, \S+13, 19-\S
+
+       vadduwm \S+2, \S+2, 16-\S
+       vadduwm \S+6, \S+6, 17-\S
+       vadduwm \S+10, \S+10, 18-\S
+       vadduwm \S+14, \S+14, 19-\S
+
+       vadduwm \S+3, \S+3, 16-\S
+       vadduwm \S+7, \S+7, 17-\S
+       vadduwm \S+11, \S+11, 18-\S
+       vadduwm \S+15, \S+15, 19-\S
+.endm
+
+#
+# write 256 bytes
+#
+.macro Write_256 S
+       add 9, 14, 5
+       add 16, 14, 4
+       lxvw4x 0, 0, 9
+       lxvw4x 1, 17, 9
+       lxvw4x 2, 18, 9
+       lxvw4x 3, 19, 9
+       lxvw4x 4, 20, 9
+       lxvw4x 5, 21, 9
+       lxvw4x 6, 22, 9
+       lxvw4x 7, 23, 9
+       lxvw4x 8, 24, 9
+       lxvw4x 9, 25, 9
+       lxvw4x 10, 26, 9
+       lxvw4x 11, 27, 9
+       lxvw4x 12, 28, 9
+       lxvw4x 13, 29, 9
+       lxvw4x 14, 30, 9
+       lxvw4x 15, 31, 9
+
+       xxlxor \S+32, \S+32, 0
+       xxlxor \S+36, \S+36, 1
+       xxlxor \S+40, \S+40, 2
+       xxlxor \S+44, \S+44, 3
+       xxlxor \S+33, \S+33, 4
+       xxlxor \S+37, \S+37, 5
+       xxlxor \S+41, \S+41, 6
+       xxlxor \S+45, \S+45, 7
+       xxlxor \S+34, \S+34, 8
+       xxlxor \S+38, \S+38, 9
+       xxlxor \S+42, \S+42, 10
+       xxlxor \S+46, \S+46, 11
+       xxlxor \S+35, \S+35, 12
+       xxlxor \S+39, \S+39, 13
+       xxlxor \S+43, \S+43, 14
+       xxlxor \S+47, \S+47, 15
+
+       stxvw4x \S+32, 0, 16
+       stxvw4x \S+36, 17, 16
+       stxvw4x \S+40, 18, 16
+       stxvw4x \S+44, 19, 16
+
+       stxvw4x \S+33, 20, 16
+       stxvw4x \S+37, 21, 16
+       stxvw4x \S+41, 22, 16
+       stxvw4x \S+45, 23, 16
+
+       stxvw4x \S+34, 24, 16
+       stxvw4x \S+38, 25, 16
+       stxvw4x \S+42, 26, 16
+       stxvw4x \S+46, 27, 16
+
+       stxvw4x \S+35, 28, 16
+       stxvw4x \S+39, 29, 16
+       stxvw4x \S+43, 30, 16
+       stxvw4x \S+47, 31, 16
+
+.endm
+
+#
+# chacha20_p10le_8x(u32 *state, byte *dst, const byte *src, size_t len, int nrounds);
+#
+SYM_FUNC_START(chacha_p10le_8x)
+.align 5
+       cmpdi   6, 0
+       ble     Out_no_chacha
+
+       SAVE_REGS
+
+       # r17 - r31 mainly for Write_256 macro.
+       li      17, 16
+       li      18, 32
+       li      19, 48
+       li      20, 64
+       li      21, 80
+       li      22, 96
+       li      23, 112
+       li      24, 128
+       li      25, 144
+       li      26, 160
+       li      27, 176
+       li      28, 192
+       li      29, 208
+       li      30, 224
+       li      31, 240
+
+       mr 15, 6                        # len
+       li 14, 0                        # offset to inp and outp
+
+        lxvw4x 48, 0, 3                #  vr16, constants
+       lxvw4x  49, 17, 3               #  vr17, key 1
+       lxvw4x  50, 18, 3               #  vr18, key 2
+       lxvw4x  51, 19, 3               #  vr19, counter, nonce
+
+       # create (0, 1, 2, 3) counters
+       vspltisw 0, 0
+       vspltisw 1, 1
+       vspltisw 2, 2
+       vspltisw 3, 3
+       vmrghw  4, 0, 1
+       vmrglw  5, 2, 3
+       vsldoi  30, 4, 5, 8             # vr30 counter, 4 (0, 1, 2, 3)
+
+       vspltisw 21, 12
+       vspltisw 23, 7
+
+       addis   11, 2, permx@toc@ha
+       addi    11, 11, permx@toc@l
+       lxvw4x  32+20, 0, 11
+       lxvw4x  32+22, 17, 11
+
+       sradi   8, 7, 1
+
+       mtctr 8
+
+       # save constants to vsx
+       xxlor   16, 48, 48
+       xxlor   17, 49, 49
+       xxlor   18, 50, 50
+       xxlor   19, 51, 51
+
+       vspltisw 25, 4
+       vspltisw 26, 8
+
+       xxlor   25, 32+26, 32+26
+       xxlor   24, 32+25, 32+25
+
+       vadduwm 31, 30, 25              # counter = (0, 1, 2, 3) + (4, 4, 4, 4)
+       xxlor   30, 32+30, 32+30
+       xxlor   31, 32+31, 32+31
+
+       xxlor   20, 32+20, 32+20
+       xxlor   21, 32+21, 32+21
+       xxlor   22, 32+22, 32+22
+       xxlor   23, 32+23, 32+23
+
+       cmpdi   6, 512
+       blt     Loop_last
+
+Loop_8x:
+       xxspltw  32+0, 16, 0
+       xxspltw  32+1, 16, 1
+       xxspltw  32+2, 16, 2
+       xxspltw  32+3, 16, 3
+
+       xxspltw  32+4, 17, 0
+       xxspltw  32+5, 17, 1
+       xxspltw  32+6, 17, 2
+       xxspltw  32+7, 17, 3
+       xxspltw  32+8, 18, 0
+       xxspltw  32+9, 18, 1
+       xxspltw  32+10, 18, 2
+       xxspltw  32+11, 18, 3
+       xxspltw  32+12, 19, 0
+       xxspltw  32+13, 19, 1
+       xxspltw  32+14, 19, 2
+       xxspltw  32+15, 19, 3
+       vadduwm 12, 12, 30      # increase counter
+
+       xxspltw  32+16, 16, 0
+       xxspltw  32+17, 16, 1
+       xxspltw  32+18, 16, 2
+       xxspltw  32+19, 16, 3
+
+       xxspltw  32+20, 17, 0
+       xxspltw  32+21, 17, 1
+       xxspltw  32+22, 17, 2
+       xxspltw  32+23, 17, 3
+       xxspltw  32+24, 18, 0
+       xxspltw  32+25, 18, 1
+       xxspltw  32+26, 18, 2
+       xxspltw  32+27, 18, 3
+       xxspltw  32+28, 19, 0
+       xxspltw  32+29, 19, 1
+       vadduwm 28, 28, 31      # increase counter
+       xxspltw  32+30, 19, 2
+       xxspltw  32+31, 19, 3
+
+.align 5
+quarter_loop_8x:
+       QT_loop_8x
+
+       bdnz    quarter_loop_8x
+
+       xxlor   0, 32+30, 32+30
+       xxlor   32+30, 30, 30
+       vadduwm 12, 12, 30
+       xxlor   32+30, 0, 0
+       TP_4x 0, 1, 2, 3
+       TP_4x 4, 5, 6, 7
+       TP_4x 8, 9, 10, 11
+       TP_4x 12, 13, 14, 15
+
+       xxlor   0, 48, 48
+       xxlor   1, 49, 49
+       xxlor   2, 50, 50
+       xxlor   3, 51, 51
+       xxlor   48, 16, 16
+       xxlor   49, 17, 17
+       xxlor   50, 18, 18
+       xxlor   51, 19, 19
+       Add_state 0
+       xxlor   48, 0, 0
+       xxlor   49, 1, 1
+       xxlor   50, 2, 2
+       xxlor   51, 3, 3
+       Write_256 0
+       addi    14, 14, 256     # offset +=256
+       addi    15, 15, -256    # len -=256
+
+       xxlor   5, 32+31, 32+31
+       xxlor   32+31, 31, 31
+       vadduwm 28, 28, 31
+       xxlor   32+31, 5, 5
+       TP_4x 16+0, 16+1, 16+2, 16+3
+       TP_4x 16+4, 16+5, 16+6, 16+7
+       TP_4x 16+8, 16+9, 16+10, 16+11
+       TP_4x 16+12, 16+13, 16+14, 16+15
+
+       xxlor   32, 16, 16
+       xxlor   33, 17, 17
+       xxlor   34, 18, 18
+       xxlor   35, 19, 19
+       Add_state 16
+       Write_256 16
+       addi    14, 14, 256     # offset +=256
+       addi    15, 15, -256    # len +=256
+
+       xxlor   32+24, 24, 24
+       xxlor   32+25, 25, 25
+       xxlor   32+30, 30, 30
+       vadduwm 30, 30, 25
+       vadduwm 31, 30, 24
+       xxlor   30, 32+30, 32+30
+       xxlor   31, 32+31, 32+31
+
+       cmpdi   15, 0
+       beq     Out_loop
+
+       cmpdi   15, 512
+       blt     Loop_last
+
+       mtctr 8
+       b Loop_8x
+
+Loop_last:
+        lxvw4x 48, 0, 3                #  vr16, constants
+       lxvw4x  49, 17, 3               #  vr17, key 1
+       lxvw4x  50, 18, 3               #  vr18, key 2
+       lxvw4x  51, 19, 3               #  vr19, counter, nonce
+
+       vspltisw 21, 12
+       vspltisw 23, 7
+       addis   11, 2, permx@toc@ha
+       addi    11, 11, permx@toc@l
+       lxvw4x  32+20, 0, 11
+       lxvw4x  32+22, 17, 11
+
+       sradi   8, 7, 1
+       mtctr 8
+
+Loop_4x:
+       vspltw  0, 16, 0
+       vspltw  1, 16, 1
+       vspltw  2, 16, 2
+       vspltw  3, 16, 3
+
+       vspltw  4, 17, 0
+       vspltw  5, 17, 1
+       vspltw  6, 17, 2
+       vspltw  7, 17, 3
+       vspltw  8, 18, 0
+       vspltw  9, 18, 1
+       vspltw  10, 18, 2
+       vspltw  11, 18, 3
+       vspltw  12, 19, 0
+       vadduwm 12, 12, 30      # increase counter
+       vspltw  13, 19, 1
+       vspltw  14, 19, 2
+       vspltw  15, 19, 3
+
+.align 5
+quarter_loop:
+       QT_loop_4x
+
+       bdnz    quarter_loop
+
+       vadduwm 12, 12, 30
+       TP_4x 0, 1, 2, 3
+       TP_4x 4, 5, 6, 7
+       TP_4x 8, 9, 10, 11
+       TP_4x 12, 13, 14, 15
+
+       Add_state 0
+       Write_256 0
+       addi    14, 14, 256     # offset += 256
+       addi    15, 15, -256    # len += 256
+
+       # Update state counter
+       vspltisw 25, 4
+       vadduwm 30, 30, 25
+
+       cmpdi   15, 0
+       beq     Out_loop
+       cmpdi   15, 256
+       blt     Out_loop
+
+       mtctr 8
+       b Loop_4x
+
+Out_loop:
+       RESTORE_REGS
+       blr
+
+Out_no_chacha:
+       li      3, 0
+       blr
+SYM_FUNC_END(chacha_p10le_8x)
+
+SYM_DATA_START_LOCAL(PERMX)
+.align 5
+permx:
+.long 0x22330011, 0x66774455, 0xaabb8899, 0xeeffccdd
+.long 0x11223300, 0x55667744, 0x99aabb88, 0xddeeffcc
+SYM_DATA_END(PERMX)
diff --git a/arch/powerpc/crypto/poly1305-p10-glue.c b/arch/powerpc/crypto/poly1305-p10-glue.c
new file mode 100644 (file)
index 0000000..95dd708
--- /dev/null
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Poly1305 authenticator algorithm, RFC7539.
+ *
+ * Copyright 2023- IBM Corp. All rights reserved.
+ */
+
+#include <crypto/algapi.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jump_label.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/poly1305.h>
+#include <crypto/internal/simd.h>
+#include <linux/cpufeature.h>
+#include <asm/unaligned.h>
+#include <asm/simd.h>
+#include <asm/switch_to.h>
+
+asmlinkage void poly1305_p10le_4blocks(void *h, const u8 *m, u32 mlen);
+asmlinkage void poly1305_64s(void *h, const u8 *m, u32 mlen, int highbit);
+asmlinkage void poly1305_emit_64(void *h, void *s, u8 *dst);
+
+static void vsx_begin(void)
+{
+       preempt_disable();
+       enable_kernel_vsx();
+}
+
+static void vsx_end(void)
+{
+       disable_kernel_vsx();
+       preempt_enable();
+}
+
+static int crypto_poly1305_p10_init(struct shash_desc *desc)
+{
+       struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+       poly1305_core_init(&dctx->h);
+       dctx->buflen = 0;
+       dctx->rset = 0;
+       dctx->sset = false;
+
+       return 0;
+}
+
+static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx,
+                                              const u8 *inp, unsigned int len)
+{
+       unsigned int acc = 0;
+
+       if (unlikely(!dctx->sset)) {
+               if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) {
+                       struct poly1305_core_key *key = &dctx->core_r;
+
+                       key->key.r64[0] = get_unaligned_le64(&inp[0]);
+                       key->key.r64[1] = get_unaligned_le64(&inp[8]);
+                       inp += POLY1305_BLOCK_SIZE;
+                       len -= POLY1305_BLOCK_SIZE;
+                       acc += POLY1305_BLOCK_SIZE;
+                       dctx->rset = 1;
+               }
+               if (len >= POLY1305_BLOCK_SIZE) {
+                       dctx->s[0] = get_unaligned_le32(&inp[0]);
+                       dctx->s[1] = get_unaligned_le32(&inp[4]);
+                       dctx->s[2] = get_unaligned_le32(&inp[8]);
+                       dctx->s[3] = get_unaligned_le32(&inp[12]);
+                       acc += POLY1305_BLOCK_SIZE;
+                       dctx->sset = true;
+               }
+       }
+       return acc;
+}
+
+static int crypto_poly1305_p10_update(struct shash_desc *desc,
+                                     const u8 *src, unsigned int srclen)
+{
+       struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+       unsigned int bytes, used;
+
+       if (unlikely(dctx->buflen)) {
+               bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
+               memcpy(dctx->buf + dctx->buflen, src, bytes);
+               src += bytes;
+               srclen -= bytes;
+               dctx->buflen += bytes;
+
+               if (dctx->buflen == POLY1305_BLOCK_SIZE) {
+                       if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf,
+                                                              POLY1305_BLOCK_SIZE))) {
+                               vsx_begin();
+                               poly1305_64s(&dctx->h, dctx->buf,
+                                                 POLY1305_BLOCK_SIZE, 1);
+                               vsx_end();
+                       }
+                       dctx->buflen = 0;
+               }
+       }
+
+       if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
+               bytes = round_down(srclen, POLY1305_BLOCK_SIZE);
+               used = crypto_poly1305_setdctxkey(dctx, src, bytes);
+               if (likely(used)) {
+                       srclen -= used;
+                       src += used;
+               }
+               if (crypto_simd_usable() && (srclen >= POLY1305_BLOCK_SIZE*4)) {
+                       vsx_begin();
+                       poly1305_p10le_4blocks(&dctx->h, src, srclen);
+                       vsx_end();
+                       src += srclen - (srclen % (POLY1305_BLOCK_SIZE * 4));
+                       srclen %= POLY1305_BLOCK_SIZE * 4;
+               }
+               while (srclen >= POLY1305_BLOCK_SIZE) {
+                       vsx_begin();
+                       poly1305_64s(&dctx->h, src, POLY1305_BLOCK_SIZE, 1);
+                       vsx_end();
+                       srclen -= POLY1305_BLOCK_SIZE;
+                       src += POLY1305_BLOCK_SIZE;
+               }
+       }
+
+       if (unlikely(srclen)) {
+               dctx->buflen = srclen;
+               memcpy(dctx->buf, src, srclen);
+       }
+
+       return 0;
+}
+
+static int crypto_poly1305_p10_final(struct shash_desc *desc, u8 *dst)
+{
+       struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+
+       if (unlikely(!dctx->sset))
+               return -ENOKEY;
+
+       if ((dctx->buflen)) {
+               dctx->buf[dctx->buflen++] = 1;
+               memset(dctx->buf + dctx->buflen, 0,
+                      POLY1305_BLOCK_SIZE - dctx->buflen);
+               vsx_begin();
+               poly1305_64s(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0);
+               vsx_end();
+               dctx->buflen = 0;
+       }
+
+       poly1305_emit_64(&dctx->h, &dctx->s, dst);
+       return 0;
+}
+
+static struct shash_alg poly1305_alg = {
+       .digestsize     = POLY1305_DIGEST_SIZE,
+       .init           = crypto_poly1305_p10_init,
+       .update         = crypto_poly1305_p10_update,
+       .final          = crypto_poly1305_p10_final,
+       .descsize       = sizeof(struct poly1305_desc_ctx),
+       .base           = {
+               .cra_name               = "poly1305",
+               .cra_driver_name        = "poly1305-p10",
+               .cra_priority           = 300,
+               .cra_blocksize          = POLY1305_BLOCK_SIZE,
+               .cra_module             = THIS_MODULE,
+       },
+};
+
+static int __init poly1305_p10_init(void)
+{
+       return crypto_register_shash(&poly1305_alg);
+}
+
+static void __exit poly1305_p10_exit(void)
+{
+       crypto_unregister_shash(&poly1305_alg);
+}
+
+module_cpu_feature_match(PPC_MODULE_FEATURE_P10, poly1305_p10_init);
+module_exit(poly1305_p10_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com>");
+MODULE_DESCRIPTION("Optimized Poly1305 for P10");
+MODULE_ALIAS_CRYPTO("poly1305");
+MODULE_ALIAS_CRYPTO("poly1305-p10");
diff --git a/arch/powerpc/crypto/poly1305-p10le_64.S b/arch/powerpc/crypto/poly1305-p10le_64.S
new file mode 100644 (file)
index 0000000..a3c1987
--- /dev/null
@@ -0,0 +1,1075 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#
+# Accelerated poly1305 implementation for ppc64le.
+#
+# Copyright 2023- IBM Corp. All rights reserved
+#
+#===================================================================================
+# Written by Danny Tsen <dtsen@us.ibm.com>
+#
+# Poly1305 - this version mainly using vector/VSX/Scalar
+#  - 26 bits limbs
+#  - Handle multiple 64 byte blcok.
+#
+# Block size 16 bytes
+# key = (r, s)
+# clamp r &= 0x0FFFFFFC0FFFFFFC 0x0FFFFFFC0FFFFFFF
+# p = 2^130 - 5
+# a += m
+# a = (r + a) % p
+# a += s
+#
+# Improve performance by breaking down polynominal to the sum of products with
+#     h4 = m1 * r⁴ + m2 * r³ + m3 * r² + m4 * r
+#
+#  07/22/21 - this revison based on the above sum of products.  Setup r^4, r^3, r^2, r and s3, s2, s1, s0
+#             to 9 vectors for multiplications.
+#
+# setup r^4, r^3, r^2, r vectors
+#    vs    [r^1, r^3, r^2, r^4]
+#    vs0 = [r0,.....]
+#    vs1 = [r1,.....]
+#    vs2 = [r2,.....]
+#    vs3 = [r3,.....]
+#    vs4 = [r4,.....]
+#    vs5 = [r1*5,...]
+#    vs6 = [r2*5,...]
+#    vs7 = [r2*5,...]
+#    vs8 = [r4*5,...]
+#
+#  Each word in a vector consists a member of a "r/s" in [a * r/s].
+#
+# r0, r4*5, r3*5, r2*5, r1*5;
+# r1, r0,   r4*5, r3*5, r2*5;
+# r2, r1,   r0,   r4*5, r3*5;
+# r3, r2,   r1,   r0,   r4*5;
+# r4, r3,   r2,   r1,   r0  ;
+#
+#
+# poly1305_p10le_4blocks( uint8_t *k, uint32_t mlen, uint8_t *m)
+#  k = 32 bytes key
+#  r3 = k (r, s)
+#  r4 = mlen
+#  r5 = m
+#
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
+#include <linux/linkage.h>
+
+.machine "any"
+
+.text
+
+.macro SAVE_GPR GPR OFFSET FRAME
+       std     \GPR,\OFFSET(\FRAME)
+.endm
+
+.macro SAVE_VRS VRS OFFSET FRAME
+       li      16, \OFFSET
+       stvx    \VRS, 16, \FRAME
+.endm
+
+.macro SAVE_VSX VSX OFFSET FRAME
+       li      16, \OFFSET
+       stxvx   \VSX, 16, \FRAME
+.endm
+
+.macro RESTORE_GPR GPR OFFSET FRAME
+       ld      \GPR,\OFFSET(\FRAME)
+.endm
+
+.macro RESTORE_VRS VRS OFFSET FRAME
+       li      16, \OFFSET
+       lvx     \VRS, 16, \FRAME
+.endm
+
+.macro RESTORE_VSX VSX OFFSET FRAME
+       li      16, \OFFSET
+       lxvx    \VSX, 16, \FRAME
+.endm
+
+.macro SAVE_REGS
+       mflr 0
+       std 0, 16(1)
+       stdu 1,-752(1)
+
+       SAVE_GPR 14, 112, 1
+       SAVE_GPR 15, 120, 1
+       SAVE_GPR 16, 128, 1
+       SAVE_GPR 17, 136, 1
+       SAVE_GPR 18, 144, 1
+       SAVE_GPR 19, 152, 1
+       SAVE_GPR 20, 160, 1
+       SAVE_GPR 21, 168, 1
+       SAVE_GPR 22, 176, 1
+       SAVE_GPR 23, 184, 1
+       SAVE_GPR 24, 192, 1
+       SAVE_GPR 25, 200, 1
+       SAVE_GPR 26, 208, 1
+       SAVE_GPR 27, 216, 1
+       SAVE_GPR 28, 224, 1
+       SAVE_GPR 29, 232, 1
+       SAVE_GPR 30, 240, 1
+       SAVE_GPR 31, 248, 1
+
+       addi    9, 1, 256
+       SAVE_VRS 20, 0, 9
+       SAVE_VRS 21, 16, 9
+       SAVE_VRS 22, 32, 9
+       SAVE_VRS 23, 48, 9
+       SAVE_VRS 24, 64, 9
+       SAVE_VRS 25, 80, 9
+       SAVE_VRS 26, 96, 9
+       SAVE_VRS 27, 112, 9
+       SAVE_VRS 28, 128, 9
+       SAVE_VRS 29, 144, 9
+       SAVE_VRS 30, 160, 9
+       SAVE_VRS 31, 176, 9
+
+       SAVE_VSX 14, 192, 9
+       SAVE_VSX 15, 208, 9
+       SAVE_VSX 16, 224, 9
+       SAVE_VSX 17, 240, 9
+       SAVE_VSX 18, 256, 9
+       SAVE_VSX 19, 272, 9
+       SAVE_VSX 20, 288, 9
+       SAVE_VSX 21, 304, 9
+       SAVE_VSX 22, 320, 9
+       SAVE_VSX 23, 336, 9
+       SAVE_VSX 24, 352, 9
+       SAVE_VSX 25, 368, 9
+       SAVE_VSX 26, 384, 9
+       SAVE_VSX 27, 400, 9
+       SAVE_VSX 28, 416, 9
+       SAVE_VSX 29, 432, 9
+       SAVE_VSX 30, 448, 9
+       SAVE_VSX 31, 464, 9
+.endm # SAVE_REGS
+
+.macro RESTORE_REGS
+       addi    9, 1, 256
+       RESTORE_VRS 20, 0, 9
+       RESTORE_VRS 21, 16, 9
+       RESTORE_VRS 22, 32, 9
+       RESTORE_VRS 23, 48, 9
+       RESTORE_VRS 24, 64, 9
+       RESTORE_VRS 25, 80, 9
+       RESTORE_VRS 26, 96, 9
+       RESTORE_VRS 27, 112, 9
+       RESTORE_VRS 28, 128, 9
+       RESTORE_VRS 29, 144, 9
+       RESTORE_VRS 30, 160, 9
+       RESTORE_VRS 31, 176, 9
+
+       RESTORE_VSX 14, 192, 9
+       RESTORE_VSX 15, 208, 9
+       RESTORE_VSX 16, 224, 9
+       RESTORE_VSX 17, 240, 9
+       RESTORE_VSX 18, 256, 9
+       RESTORE_VSX 19, 272, 9
+       RESTORE_VSX 20, 288, 9
+       RESTORE_VSX 21, 304, 9
+       RESTORE_VSX 22, 320, 9
+       RESTORE_VSX 23, 336, 9
+       RESTORE_VSX 24, 352, 9
+       RESTORE_VSX 25, 368, 9
+       RESTORE_VSX 26, 384, 9
+       RESTORE_VSX 27, 400, 9
+       RESTORE_VSX 28, 416, 9
+       RESTORE_VSX 29, 432, 9
+       RESTORE_VSX 30, 448, 9
+       RESTORE_VSX 31, 464, 9
+
+       RESTORE_GPR 14, 112, 1
+       RESTORE_GPR 15, 120, 1
+       RESTORE_GPR 16, 128, 1
+       RESTORE_GPR 17, 136, 1
+       RESTORE_GPR 18, 144, 1
+       RESTORE_GPR 19, 152, 1
+       RESTORE_GPR 20, 160, 1
+       RESTORE_GPR 21, 168, 1
+       RESTORE_GPR 22, 176, 1
+       RESTORE_GPR 23, 184, 1
+       RESTORE_GPR 24, 192, 1
+       RESTORE_GPR 25, 200, 1
+       RESTORE_GPR 26, 208, 1
+       RESTORE_GPR 27, 216, 1
+       RESTORE_GPR 28, 224, 1
+       RESTORE_GPR 29, 232, 1
+       RESTORE_GPR 30, 240, 1
+       RESTORE_GPR 31, 248, 1
+
+       addi    1, 1, 752
+       ld 0, 16(1)
+       mtlr 0
+.endm # RESTORE_REGS
+
+#
+# p[0] = a0*r0 + a1*r4*5 + a2*r3*5 + a3*r2*5 + a4*r1*5;
+# p[1] = a0*r1 + a1*r0   + a2*r4*5 + a3*r3*5 + a4*r2*5;
+# p[2] = a0*r2 + a1*r1   + a2*r0   + a3*r4*5 + a4*r3*5;
+# p[3] = a0*r3 + a1*r2   + a2*r1   + a3*r0   + a4*r4*5;
+# p[4] = a0*r4 + a1*r3   + a2*r2   + a3*r1   + a4*r0  ;
+#
+#    [r^2, r^3, r^1, r^4]
+#    [m3,  m2,  m4,  m1]
+#
+# multiply odd and even words
+.macro mul_odd
+       vmulouw 14, 4, 26
+       vmulouw 10, 5, 3
+       vmulouw 11, 6, 2
+       vmulouw 12, 7, 1
+       vmulouw 13, 8, 0
+       vmulouw 15, 4, 27
+       vaddudm 14, 14, 10
+       vaddudm 14, 14, 11
+       vmulouw 10, 5, 26
+       vmulouw 11, 6, 3
+       vaddudm 14, 14, 12
+       vaddudm 14, 14, 13      # x0
+       vaddudm 15, 15, 10
+       vaddudm 15, 15, 11
+       vmulouw 12, 7, 2
+       vmulouw 13, 8, 1
+       vaddudm 15, 15, 12
+       vaddudm 15, 15, 13      # x1
+       vmulouw 16, 4, 28
+       vmulouw 10, 5, 27
+       vmulouw 11, 6, 26
+       vaddudm 16, 16, 10
+       vaddudm 16, 16, 11
+       vmulouw 12, 7, 3
+       vmulouw 13, 8, 2
+       vaddudm 16, 16, 12
+       vaddudm 16, 16, 13      # x2
+       vmulouw 17, 4, 29
+       vmulouw 10, 5, 28
+       vmulouw 11, 6, 27
+       vaddudm 17, 17, 10
+       vaddudm 17, 17, 11
+       vmulouw 12, 7, 26
+       vmulouw 13, 8, 3
+       vaddudm 17, 17, 12
+       vaddudm 17, 17, 13      # x3
+       vmulouw 18, 4, 30
+       vmulouw 10, 5, 29
+       vmulouw 11, 6, 28
+       vaddudm 18, 18, 10
+       vaddudm 18, 18, 11
+       vmulouw 12, 7, 27
+       vmulouw 13, 8, 26
+       vaddudm 18, 18, 12
+       vaddudm 18, 18, 13      # x4
+.endm
+
+.macro mul_even
+       vmuleuw 9, 4, 26
+       vmuleuw 10, 5, 3
+       vmuleuw 11, 6, 2
+       vmuleuw 12, 7, 1
+       vmuleuw 13, 8, 0
+       vaddudm 14, 14, 9
+       vaddudm 14, 14, 10
+       vaddudm 14, 14, 11
+       vaddudm 14, 14, 12
+       vaddudm 14, 14, 13      # x0
+
+       vmuleuw 9, 4, 27
+       vmuleuw 10, 5, 26
+       vmuleuw 11, 6, 3
+       vmuleuw 12, 7, 2
+       vmuleuw 13, 8, 1
+       vaddudm 15, 15, 9
+       vaddudm 15, 15, 10
+       vaddudm 15, 15, 11
+       vaddudm 15, 15, 12
+       vaddudm 15, 15, 13      # x1
+
+       vmuleuw 9, 4, 28
+       vmuleuw 10, 5, 27
+       vmuleuw 11, 6, 26
+       vmuleuw 12, 7, 3
+       vmuleuw 13, 8, 2
+       vaddudm 16, 16, 9
+       vaddudm 16, 16, 10
+       vaddudm 16, 16, 11
+       vaddudm 16, 16, 12
+       vaddudm 16, 16, 13      # x2
+
+       vmuleuw 9, 4, 29
+       vmuleuw 10, 5, 28
+       vmuleuw 11, 6, 27
+       vmuleuw 12, 7, 26
+       vmuleuw 13, 8, 3
+       vaddudm 17, 17, 9
+       vaddudm 17, 17, 10
+       vaddudm 17, 17, 11
+       vaddudm 17, 17, 12
+       vaddudm 17, 17, 13      # x3
+
+       vmuleuw 9, 4, 30
+       vmuleuw 10, 5, 29
+       vmuleuw 11, 6, 28
+       vmuleuw 12, 7, 27
+       vmuleuw 13, 8, 26
+       vaddudm 18, 18, 9
+       vaddudm 18, 18, 10
+       vaddudm 18, 18, 11
+       vaddudm 18, 18, 12
+       vaddudm 18, 18, 13      # x4
+.endm
+
+#
+# poly1305_setup_r
+#
+# setup r^4, r^3, r^2, r vectors
+#    [r, r^3, r^2, r^4]
+#    vs0 = [r0,...]
+#    vs1 = [r1,...]
+#    vs2 = [r2,...]
+#    vs3 = [r3,...]
+#    vs4 = [r4,...]
+#    vs5 = [r4*5,...]
+#    vs6 = [r3*5,...]
+#    vs7 = [r2*5,...]
+#    vs8 = [r1*5,...]
+#
+# r0, r4*5, r3*5, r2*5, r1*5;
+# r1, r0,   r4*5, r3*5, r2*5;
+# r2, r1,   r0,   r4*5, r3*5;
+# r3, r2,   r1,   r0,   r4*5;
+# r4, r3,   r2,   r1,   r0  ;
+#
+.macro poly1305_setup_r
+
+       # save r
+       xxlor   26, 58, 58
+       xxlor   27, 59, 59
+       xxlor   28, 60, 60
+       xxlor   29, 61, 61
+       xxlor   30, 62, 62
+
+       xxlxor  31, 31, 31
+
+#    [r, r^3, r^2, r^4]
+       # compute r^2
+       vmr     4, 26
+       vmr     5, 27
+       vmr     6, 28
+       vmr     7, 29
+       vmr     8, 30
+       bl      do_mul          # r^2 r^1
+       xxpermdi 58, 58, 36, 0x3                # r0
+       xxpermdi 59, 59, 37, 0x3                # r1
+       xxpermdi 60, 60, 38, 0x3                # r2
+       xxpermdi 61, 61, 39, 0x3                # r3
+       xxpermdi 62, 62, 40, 0x3                # r4
+       xxpermdi 36, 36, 36, 0x3
+       xxpermdi 37, 37, 37, 0x3
+       xxpermdi 38, 38, 38, 0x3
+       xxpermdi 39, 39, 39, 0x3
+       xxpermdi 40, 40, 40, 0x3
+       vspltisb 13, 2
+       vsld    9, 27, 13
+       vsld    10, 28, 13
+       vsld    11, 29, 13
+       vsld    12, 30, 13
+       vaddudm 0, 9, 27
+       vaddudm 1, 10, 28
+       vaddudm 2, 11, 29
+       vaddudm 3, 12, 30
+
+       bl      do_mul          # r^4 r^3
+       vmrgow  26, 26, 4
+       vmrgow  27, 27, 5
+       vmrgow  28, 28, 6
+       vmrgow  29, 29, 7
+       vmrgow  30, 30, 8
+       vspltisb 13, 2
+       vsld    9, 27, 13
+       vsld    10, 28, 13
+       vsld    11, 29, 13
+       vsld    12, 30, 13
+       vaddudm 0, 9, 27
+       vaddudm 1, 10, 28
+       vaddudm 2, 11, 29
+       vaddudm 3, 12, 30
+
+       # r^2 r^4
+       xxlor   0, 58, 58
+       xxlor   1, 59, 59
+       xxlor   2, 60, 60
+       xxlor   3, 61, 61
+       xxlor   4, 62, 62
+       xxlor   5, 32, 32
+       xxlor   6, 33, 33
+       xxlor   7, 34, 34
+       xxlor   8, 35, 35
+
+       vspltw  9, 26, 3
+       vspltw  10, 26, 2
+       vmrgow  26, 10, 9
+       vspltw  9, 27, 3
+       vspltw  10, 27, 2
+       vmrgow  27, 10, 9
+       vspltw  9, 28, 3
+       vspltw  10, 28, 2
+       vmrgow  28, 10, 9
+       vspltw  9, 29, 3
+       vspltw  10, 29, 2
+       vmrgow  29, 10, 9
+       vspltw  9, 30, 3
+       vspltw  10, 30, 2
+       vmrgow  30, 10, 9
+
+       vsld    9, 27, 13
+       vsld    10, 28, 13
+       vsld    11, 29, 13
+       vsld    12, 30, 13
+       vaddudm 0, 9, 27
+       vaddudm 1, 10, 28
+       vaddudm 2, 11, 29
+       vaddudm 3, 12, 30
+.endm
+
+SYM_FUNC_START_LOCAL(do_mul)
+       mul_odd
+
+       # do reduction ( h %= p )
+       # carry reduction
+       vspltisb 9, 2
+       vsrd    10, 14, 31
+       vsrd    11, 17, 31
+       vand    7, 17, 25
+       vand    4, 14, 25
+       vaddudm 18, 18, 11
+       vsrd    12, 18, 31
+       vaddudm 15, 15, 10
+
+       vsrd    11, 15, 31
+       vand    8, 18, 25
+       vand    5, 15, 25
+       vaddudm 4, 4, 12
+       vsld    10, 12, 9
+       vaddudm 6, 16, 11
+
+       vsrd    13, 6, 31
+       vand    6, 6, 25
+       vaddudm 4, 4, 10
+       vsrd    10, 4, 31
+       vaddudm 7, 7, 13
+
+       vsrd    11, 7, 31
+       vand    7, 7, 25
+       vand    4, 4, 25
+       vaddudm 5, 5, 10
+       vaddudm 8, 8, 11
+       blr
+SYM_FUNC_END(do_mul)
+
+#
+# init key
+#
+.macro do_poly1305_init
+       addis   10, 2, rmask@toc@ha
+       addi    10, 10, rmask@toc@l
+
+       ld      11, 0(10)
+       ld      12, 8(10)
+
+       li      14, 16
+       li      15, 32
+       addis   10, 2, cnum@toc@ha
+       addi    10, 10, cnum@toc@l
+       lvx     25, 0, 10       # v25 - mask
+       lvx     31, 14, 10      # v31 = 1a
+       lvx     19, 15, 10      # v19 = 1 << 24
+       lxv     24, 48(10)      # vs24
+       lxv     25, 64(10)      # vs25
+
+       # initialize
+       # load key from r3 to vectors
+       ld      9, 24(3)
+       ld      10, 32(3)
+       and.    9, 9, 11
+       and.    10, 10, 12
+
+       # break 26 bits
+       extrdi  14, 9, 26, 38
+       extrdi  15, 9, 26, 12
+       extrdi  16, 9, 12, 0
+       mtvsrdd 58, 0, 14
+       insrdi  16, 10, 14, 38
+       mtvsrdd 59, 0, 15
+       extrdi  17, 10, 26, 24
+       mtvsrdd 60, 0, 16
+       extrdi  18, 10, 24, 0
+       mtvsrdd 61, 0, 17
+       mtvsrdd 62, 0, 18
+
+       # r1 = r1 * 5, r2 = r2 * 5, r3 = r3 * 5, r4 = r4 * 5
+       li      9, 5
+       mtvsrdd 36, 0, 9
+       vmulouw 0, 27, 4                # v0 = rr0
+       vmulouw 1, 28, 4                # v1 = rr1
+       vmulouw 2, 29, 4                # v2 = rr2
+       vmulouw 3, 30, 4                # v3 = rr3
+.endm
+
+#
+# poly1305_p10le_4blocks( uint8_t *k, uint32_t mlen, uint8_t *m)
+#  k = 32 bytes key
+#  r3 = k (r, s)
+#  r4 = mlen
+#  r5 = m
+#
+SYM_FUNC_START(poly1305_p10le_4blocks)
+.align 5
+       cmpdi   5, 64
+       blt     Out_no_poly1305
+
+       SAVE_REGS
+
+       do_poly1305_init
+
+       li      21, 0   # counter to message
+
+       poly1305_setup_r
+
+       # load previous H state
+       # break/convert r6 to 26 bits
+       ld      9, 0(3)
+       ld      10, 8(3)
+       ld      19, 16(3)
+       sldi    19, 19, 24
+       mtvsrdd 41, 0, 19
+       extrdi  14, 9, 26, 38
+       extrdi  15, 9, 26, 12
+       extrdi  16, 9, 12, 0
+       mtvsrdd 36, 0, 14
+       insrdi  16, 10, 14, 38
+       mtvsrdd 37, 0, 15
+       extrdi  17, 10, 26, 24
+       mtvsrdd 38, 0, 16
+       extrdi  18, 10, 24, 0
+       mtvsrdd 39, 0, 17
+       mtvsrdd 40, 0, 18
+       vor     8, 8, 9
+
+       # input m1 m2
+       add     20, 4, 21
+       xxlor   49, 24, 24
+       xxlor   50, 25, 25
+       lxvw4x  43, 0, 20
+       addi    17, 20, 16
+       lxvw4x  44, 0, 17
+       vperm   14, 11, 12, 17
+       vperm   15, 11, 12, 18
+       vand    9, 14, 25       # a0
+       vsrd    10, 14, 31      # >> 26
+       vsrd    11, 10, 31      # 12 bits left
+       vand    10, 10, 25      # a1
+       vspltisb 13, 12
+       vand    16, 15, 25
+       vsld    12, 16, 13
+       vor     11, 11, 12
+       vand    11, 11, 25      # a2
+       vspltisb 13, 14
+       vsrd    12, 15, 13      # >> 14
+       vsrd    13, 12, 31      # >> 26, a4
+       vand    12, 12, 25      # a3
+
+       vaddudm 20, 4, 9
+       vaddudm 21, 5, 10
+       vaddudm 22, 6, 11
+       vaddudm 23, 7, 12
+       vaddudm 24, 8, 13
+
+       # m3 m4
+       addi    17, 17, 16
+       lxvw4x  43, 0, 17
+       addi    17, 17, 16
+       lxvw4x  44, 0, 17
+       vperm   14, 11, 12, 17
+       vperm   15, 11, 12, 18
+       vand    9, 14, 25       # a0
+       vsrd    10, 14, 31      # >> 26
+       vsrd    11, 10, 31      # 12 bits left
+       vand    10, 10, 25      # a1
+       vspltisb 13, 12
+       vand    16, 15, 25
+       vsld    12, 16, 13
+       vspltisb 13, 14
+       vor     11, 11, 12
+       vand    11, 11, 25      # a2
+       vsrd    12, 15, 13      # >> 14
+       vsrd    13, 12, 31      # >> 26, a4
+       vand    12, 12, 25      # a3
+
+       # Smash 4 message blocks into 5 vectors of [m4,  m2,  m3,  m1]
+       vmrgow  4, 9, 20
+       vmrgow  5, 10, 21
+       vmrgow  6, 11, 22
+       vmrgow  7, 12, 23
+       vmrgow  8, 13, 24
+       vaddudm 8, 8, 19
+
+       addi    5, 5, -64       # len -= 64
+       addi    21, 21, 64      # offset += 64
+
+       li      9, 64
+       divdu   31, 5, 9
+
+       cmpdi   31, 0
+       ble     Skip_block_loop
+
+       mtctr   31
+
+# h4 =   m1 * r⁴ + m2 * r³ + m3 * r² + m4 * r
+# Rewrite the polynominal sum of product as follows,
+# h1 = (h0 + m1) * r^2,        h2 = (h0 + m2) * r^2
+# h3 = (h1 + m3) * r^2,        h4 = (h2 + m4) * r^2  --> (h0 + m1) r*4 + (h3 + m3) r^2, (h0 + m2) r^4 + (h0 + m4) r^2
+#  .... Repeat
+# h5 = (h3 + m5) * r^2,        h6 = (h4 + m6) * r^2  -->
+# h7 = (h5 + m7) * r^2,        h8 = (h6 + m8) * r^1  --> m5 * r^4 + m6 * r^3 + m7 * r^2 + m8 * r
+#
+loop_4blocks:
+
+       # Multiply odd words and even words
+       mul_odd
+       mul_even
+       # carry reduction
+       vspltisb 9, 2
+       vsrd    10, 14, 31
+       vsrd    11, 17, 31
+       vand    7, 17, 25
+       vand    4, 14, 25
+       vaddudm 18, 18, 11
+       vsrd    12, 18, 31
+       vaddudm 15, 15, 10
+
+       vsrd    11, 15, 31
+       vand    8, 18, 25
+       vand    5, 15, 25
+       vaddudm 4, 4, 12
+       vsld    10, 12, 9
+       vaddudm 6, 16, 11
+
+       vsrd    13, 6, 31
+       vand    6, 6, 25
+       vaddudm 4, 4, 10
+       vsrd    10, 4, 31
+       vaddudm 7, 7, 13
+
+       vsrd    11, 7, 31
+       vand    7, 7, 25
+       vand    4, 4, 25
+       vaddudm 5, 5, 10
+       vaddudm 8, 8, 11
+
+       # input m1  m2  m3  m4
+       add     20, 4, 21
+       xxlor   49, 24, 24
+       xxlor   50, 25, 25
+       lxvw4x  43, 0, 20
+       addi    17, 20, 16
+       lxvw4x  44, 0, 17
+       vperm   14, 11, 12, 17
+       vperm   15, 11, 12, 18
+       addi    17, 17, 16
+       lxvw4x  43, 0, 17
+       addi    17, 17, 16
+       lxvw4x  44, 0, 17
+       vperm   17, 11, 12, 17
+       vperm   18, 11, 12, 18
+
+       vand    20, 14, 25      # a0
+       vand    9, 17, 25       # a0
+       vsrd    21, 14, 31      # >> 26
+       vsrd    22, 21, 31      # 12 bits left
+       vsrd    10, 17, 31      # >> 26
+       vsrd    11, 10, 31      # 12 bits left
+
+       vand    21, 21, 25      # a1
+       vand    10, 10, 25      # a1
+
+       vspltisb 13, 12
+       vand    16, 15, 25
+       vsld    23, 16, 13
+       vor     22, 22, 23
+       vand    22, 22, 25      # a2
+       vand    16, 18, 25
+       vsld    12, 16, 13
+       vor     11, 11, 12
+       vand    11, 11, 25      # a2
+       vspltisb 13, 14
+       vsrd    23, 15, 13      # >> 14
+       vsrd    24, 23, 31      # >> 26, a4
+       vand    23, 23, 25      # a3
+       vsrd    12, 18, 13      # >> 14
+       vsrd    13, 12, 31      # >> 26, a4
+       vand    12, 12, 25      # a3
+
+       vaddudm 4, 4, 20
+       vaddudm 5, 5, 21
+       vaddudm 6, 6, 22
+       vaddudm 7, 7, 23
+       vaddudm 8, 8, 24
+
+       # Smash 4 message blocks into 5 vectors of [m4,  m2,  m3,  m1]
+       vmrgow  4, 9, 4
+       vmrgow  5, 10, 5
+       vmrgow  6, 11, 6
+       vmrgow  7, 12, 7
+       vmrgow  8, 13, 8
+       vaddudm 8, 8, 19
+
+       addi    5, 5, -64       # len -= 64
+       addi    21, 21, 64      # offset += 64
+
+       bdnz    loop_4blocks
+
+Skip_block_loop:
+       xxlor   58, 0, 0
+       xxlor   59, 1, 1
+       xxlor   60, 2, 2
+       xxlor   61, 3, 3
+       xxlor   62, 4, 4
+       xxlor   32, 5, 5
+       xxlor   33, 6, 6
+       xxlor   34, 7, 7
+       xxlor   35, 8, 8
+
+       # Multiply odd words and even words
+       mul_odd
+       mul_even
+
+       # Sum the products.
+       xxpermdi 41, 31, 46, 0
+       xxpermdi 42, 31, 47, 0
+       vaddudm 4, 14, 9
+       xxpermdi 36, 31, 36, 3
+       vaddudm 5, 15, 10
+       xxpermdi 37, 31, 37, 3
+       xxpermdi 43, 31, 48, 0
+       vaddudm 6, 16, 11
+       xxpermdi 38, 31, 38, 3
+       xxpermdi 44, 31, 49, 0
+       vaddudm 7, 17, 12
+       xxpermdi 39, 31, 39, 3
+       xxpermdi 45, 31, 50, 0
+       vaddudm 8, 18, 13
+       xxpermdi 40, 31, 40, 3
+
+       # carry reduction
+       vspltisb 9, 2
+       vsrd    10, 4, 31
+       vsrd    11, 7, 31
+       vand    7, 7, 25
+       vand    4, 4, 25
+       vaddudm 8, 8, 11
+       vsrd    12, 8, 31
+       vaddudm 5, 5, 10
+
+       vsrd    11, 5, 31
+       vand    8, 8, 25
+       vand    5, 5, 25
+       vaddudm 4, 4, 12
+       vsld    10, 12, 9
+       vaddudm 6, 6, 11
+
+       vsrd    13, 6, 31
+       vand    6, 6, 25
+       vaddudm 4, 4, 10
+       vsrd    10, 4, 31
+       vaddudm 7, 7, 13
+
+       vsrd    11, 7, 31
+       vand    7, 7, 25
+       vand    4, 4, 25
+       vaddudm 5, 5, 10
+       vsrd    10, 5, 31
+       vand    5, 5, 25
+       vaddudm 6, 6, 10
+       vaddudm 8, 8, 11
+
+       b       do_final_update
+
+do_final_update:
+       # combine 26 bit limbs
+       # v4, v5, v6, v7 and v8 are 26 bit vectors
+       vsld    5, 5, 31
+       vor     20, 4, 5
+       vspltisb 11, 12
+       vsrd    12, 6, 11
+       vsld    6, 6, 31
+       vsld    6, 6, 31
+       vor     20, 20, 6
+       vspltisb 11, 14
+       vsld    7, 7, 11
+       vor     21, 7, 12
+       mfvsrld 16, 40          # save last 2 bytes
+       vsld    8, 8, 11
+       vsld    8, 8, 31
+       vor     21, 21, 8
+       mfvsrld 17, 52
+       mfvsrld 19, 53
+       srdi    16, 16, 24
+
+       std     17, 0(3)
+       std     19, 8(3)
+       stw     16, 16(3)
+
+Out_loop:
+       li      3, 0
+
+       RESTORE_REGS
+
+       blr
+
+Out_no_poly1305:
+       li      3, 0
+       blr
+SYM_FUNC_END(poly1305_p10le_4blocks)
+
+#
+# =======================================================================
+# The following functions implement 64 x 64 bits multiplication poly1305.
+#
+SYM_FUNC_START_LOCAL(Poly1305_init_64)
+       #  mask 0x0FFFFFFC0FFFFFFC
+       #  mask 0x0FFFFFFC0FFFFFFF
+       addis   10, 2, rmask@toc@ha
+       addi    10, 10, rmask@toc@l
+       ld      11, 0(10)
+       ld      12, 8(10)
+
+       # initialize
+       # load key from r3
+       ld      9, 24(3)
+       ld      10, 32(3)
+       and.    9, 9, 11        # cramp mask r0
+       and.    10, 10, 12      # cramp mask r1
+
+        srdi    21, 10, 2
+        add     19, 21, 10      # s1: r19 - (r1 >> 2) *5
+
+        # setup r and s
+        li      25, 0
+       mtvsrdd 32+0, 9, 19     # r0, s1
+       mtvsrdd 32+1, 10, 9     # r1, r0
+       mtvsrdd 32+2, 19, 25    # s1
+       mtvsrdd 32+3, 9, 25     # r0
+
+       blr
+SYM_FUNC_END(Poly1305_init_64)
+
+# Poly1305_mult
+# v6 = (h0, h1), v8 = h2
+# v0 = (r0, s1), v1 = (r1, r0), v2 = s1, v3 = r0
+#
+# Output: v7, v10, v11
+#
+SYM_FUNC_START_LOCAL(Poly1305_mult)
+       #
+       #       d0 = h0 * r0 + h1 * s1
+       vmsumudm        7, 6, 0, 9              # h0 * r0, h1 * s1
+
+       #       d1 = h0 * r1 + h1 * r0 + h2 * s1
+       vmsumudm        11, 6, 1, 9             # h0 * r1, h1 * r0
+       vmsumudm        10, 8, 2, 11            # d1 += h2 * s1
+
+       #       d2 = r0
+       vmsumudm        11, 8, 3, 9             # d2 = h2 * r0
+       blr
+SYM_FUNC_END(Poly1305_mult)
+
+#
+# carry reduction
+# h %=p
+#
+# Input: v7, v10, v11
+# Output: r27, r28, r29
+#
+SYM_FUNC_START_LOCAL(Carry_reduction)
+       mfvsrld 27, 32+7
+       mfvsrld 28, 32+10
+       mfvsrld 29, 32+11
+       mfvsrd  20, 32+7        # h0.h
+       mfvsrd  21, 32+10       # h1.h
+
+       addc    28, 28, 20
+       adde    29, 29, 21
+       srdi    22, 29, 0x2
+       sldi    23, 22, 0x2
+       add     23, 23, 22      # (h2 & 3) * 5
+       addc    27, 27, 23      # h0
+       addze   28, 28          # h1
+       andi.   29, 29, 0x3     # h2
+       blr
+SYM_FUNC_END(Carry_reduction)
+
+#
+# poly1305 multiplication
+# h *= r, h %= p
+#      d0 = h0 * r0 + h1 * s1
+#      d1 = h0 * r1 + h1 * r0 + h2 * s1
+#       d2 = h0 * r0
+#
+#
+# unsigned int poly1305_test_64s(unisgned char *state, const byte *src, size_t len, highbit)
+#   - no highbit if final leftover block (highbit = 0)
+#
+SYM_FUNC_START(poly1305_64s)
+       cmpdi   5, 0
+       ble     Out_no_poly1305_64
+
+       mflr 0
+       std 0, 16(1)
+       stdu 1,-400(1)
+
+       SAVE_GPR 14, 112, 1
+       SAVE_GPR 15, 120, 1
+       SAVE_GPR 16, 128, 1
+       SAVE_GPR 17, 136, 1
+       SAVE_GPR 18, 144, 1
+       SAVE_GPR 19, 152, 1
+       SAVE_GPR 20, 160, 1
+       SAVE_GPR 21, 168, 1
+       SAVE_GPR 22, 176, 1
+       SAVE_GPR 23, 184, 1
+       SAVE_GPR 24, 192, 1
+       SAVE_GPR 25, 200, 1
+       SAVE_GPR 26, 208, 1
+       SAVE_GPR 27, 216, 1
+       SAVE_GPR 28, 224, 1
+       SAVE_GPR 29, 232, 1
+       SAVE_GPR 30, 240, 1
+       SAVE_GPR 31, 248, 1
+
+       # Init poly1305
+       bl Poly1305_init_64
+
+       li 25, 0                        # offset to inp and outp
+
+       add 11, 25, 4
+
+       # load h
+       # h0, h1, h2?
+        ld     27, 0(3)
+        ld     28, 8(3)
+        lwz    29, 16(3)
+
+        li      30, 16
+        divdu   31, 5, 30
+
+        mtctr   31
+
+        mr      24, 6          # highbit
+
+Loop_block_64:
+       vxor    9, 9, 9
+
+       ld      20, 0(11)
+       ld      21, 8(11)
+       addi    11, 11, 16
+
+       addc    27, 27, 20
+       adde    28, 28, 21
+       adde    29, 29, 24
+
+       li      22, 0
+       mtvsrdd 32+6, 27, 28    # h0, h1
+       mtvsrdd 32+8, 29, 22    # h2
+
+       bl      Poly1305_mult
+
+       bl      Carry_reduction
+
+       bdnz    Loop_block_64
+
+       std     27, 0(3)
+       std     28, 8(3)
+       stw     29, 16(3)
+
+       li      3, 0
+
+       RESTORE_GPR 14, 112, 1
+       RESTORE_GPR 15, 120, 1
+       RESTORE_GPR 16, 128, 1
+       RESTORE_GPR 17, 136, 1
+       RESTORE_GPR 18, 144, 1
+       RESTORE_GPR 19, 152, 1
+       RESTORE_GPR 20, 160, 1
+       RESTORE_GPR 21, 168, 1
+       RESTORE_GPR 22, 176, 1
+       RESTORE_GPR 23, 184, 1
+       RESTORE_GPR 24, 192, 1
+       RESTORE_GPR 25, 200, 1
+       RESTORE_GPR 26, 208, 1
+       RESTORE_GPR 27, 216, 1
+       RESTORE_GPR 28, 224, 1
+       RESTORE_GPR 29, 232, 1
+       RESTORE_GPR 30, 240, 1
+       RESTORE_GPR 31, 248, 1
+
+       addi    1, 1, 400
+       ld 0, 16(1)
+       mtlr 0
+
+       blr
+
+Out_no_poly1305_64:
+       li      3, 0
+       blr
+SYM_FUNC_END(poly1305_64s)
+
+#
+# Input: r3 = h, r4 = s, r5 = mac
+# mac = h + s
+#
+SYM_FUNC_START(poly1305_emit_64)
+       ld      10, 0(3)
+       ld      11, 8(3)
+       ld      12, 16(3)
+
+       # compare modulus
+       # h + 5 + (-p)
+       mr      6, 10
+       mr      7, 11
+       mr      8, 12
+       addic.  6, 6, 5
+       addze   7, 7
+       addze   8, 8
+       srdi    9, 8, 2         # overflow?
+       cmpdi   9, 0
+       beq     Skip_h64
+       mr      10, 6
+       mr      11, 7
+       mr      12, 8
+
+Skip_h64:
+       ld      6, 0(4)
+       ld      7, 8(4)
+       addc    10, 10, 6
+       adde    11, 11, 7
+       addze   12, 12
+
+       std     10, 0(5)
+       std     11, 8(5)
+       blr
+SYM_FUNC_END(poly1305_emit_64)
+
+SYM_DATA_START_LOCAL(RMASK)
+.align 5
+rmask:
+.byte  0xff, 0xff, 0xff, 0x0f, 0xfc, 0xff, 0xff, 0x0f, 0xfc, 0xff, 0xff, 0x0f, 0xfc, 0xff, 0xff, 0x0f
+cnum:
+.long  0x03ffffff, 0x00000000, 0x03ffffff, 0x00000000
+.long  0x1a, 0x00, 0x1a, 0x00
+.long  0x01000000, 0x01000000, 0x01000000, 0x01000000
+.long  0x00010203, 0x04050607, 0x10111213, 0x14151617
+.long  0x08090a0b, 0x0c0d0e0f, 0x18191a1b, 0x1c1d1e1f
+SYM_DATA_END(RMASK)
index a5b0cb3..39d6a62 100644 (file)
@@ -229,10 +229,9 @@ static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
        return (struct crypto_aes_ctx *)ALIGN(addr, align);
 }
 
-static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
+static int aes_set_key_common(struct crypto_aes_ctx *ctx,
                              const u8 *in_key, unsigned int key_len)
 {
-       struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
        int err;
 
        if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
@@ -253,7 +252,8 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
                       unsigned int key_len)
 {
-       return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
+       return aes_set_key_common(aes_ctx(crypto_tfm_ctx(tfm)), in_key,
+                                 key_len);
 }
 
 static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
@@ -285,8 +285,7 @@ static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
                                 unsigned int len)
 {
-       return aes_set_key_common(crypto_skcipher_tfm(tfm),
-                                 crypto_skcipher_ctx(tfm), key, len);
+       return aes_set_key_common(aes_ctx(crypto_skcipher_ctx(tfm)), key, len);
 }
 
 static int ecb_encrypt(struct skcipher_request *req)
@@ -627,8 +626,7 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
 
        memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
 
-       return aes_set_key_common(crypto_aead_tfm(aead),
-                                 &ctx->aes_key_expanded, key, key_len) ?:
+       return aes_set_key_common(&ctx->aes_key_expanded, key, key_len) ?:
               rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
 }
 
@@ -893,14 +891,13 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
        keylen /= 2;
 
        /* first half of xts-key is for crypt */
-       err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
-                                key, keylen);
+       err = aes_set_key_common(aes_ctx(ctx->raw_crypt_ctx), key, keylen);
        if (err)
                return err;
 
        /* second half of xts-key is for tweak */
-       return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
-                                 key + keylen, keylen);
+       return aes_set_key_common(aes_ctx(ctx->raw_tweak_ctx), key + keylen,
+                                 keylen);
 }
 
 static int xts_crypt(struct skcipher_request *req, bool encrypt)
@@ -1150,8 +1147,7 @@ static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
 {
        struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
 
-       return aes_set_key_common(crypto_aead_tfm(aead),
-                                 &ctx->aes_key_expanded, key, key_len) ?:
+       return aes_set_key_common(&ctx->aes_key_expanded, key, key_len) ?:
               rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
 }
 
index 10efb56..ea6fb8e 100644 (file)
@@ -320,18 +320,21 @@ static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval,
 
        if (IS_ERR(ret)) {
                up_read(&key->sem);
+               key_put(key);
                return PTR_ERR(ret);
        }
 
        key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL);
        if (!key_data) {
                up_read(&key->sem);
+               key_put(key);
                return -ENOMEM;
        }
 
        memcpy(key_data, ret, key_datalen);
 
        up_read(&key->sem);
+       key_put(key);
 
        err = type->setkey(ask->private, key_data, key_datalen);
 
@@ -1192,6 +1195,7 @@ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
 
        areq->areqlen = areqlen;
        areq->sk = sk;
+       areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl;
        areq->last_rsgl = NULL;
        INIT_LIST_HEAD(&areq->rsgl_list);
        areq->tsgl = NULL;
index 5e7cd60..4fe95c4 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/slab.h>
 #include <linux/string.h>
+#include <linux/workqueue.h>
 
 #include "internal.h"
 
@@ -74,15 +75,26 @@ static void crypto_free_instance(struct crypto_instance *inst)
        inst->alg.cra_type->free(inst);
 }
 
-static void crypto_destroy_instance(struct crypto_alg *alg)
+static void crypto_destroy_instance_workfn(struct work_struct *w)
 {
-       struct crypto_instance *inst = (void *)alg;
+       struct crypto_instance *inst = container_of(w, struct crypto_instance,
+                                                   free_work);
        struct crypto_template *tmpl = inst->tmpl;
 
        crypto_free_instance(inst);
        crypto_tmpl_put(tmpl);
 }
 
+static void crypto_destroy_instance(struct crypto_alg *alg)
+{
+       struct crypto_instance *inst = container_of(alg,
+                                                   struct crypto_instance,
+                                                   alg);
+
+       INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn);
+       schedule_work(&inst->free_work);
+}
+
 /*
  * This function adds a spawn to the list secondary_spawns which
  * will be used at the end of crypto_remove_spawns to unregister
index 773e159..abeecb8 100644 (file)
@@ -42,7 +42,7 @@ static void public_key_describe(const struct key *asymmetric_key,
 void public_key_free(struct public_key *key)
 {
        if (key) {
-               kfree(key->key);
+               kfree_sensitive(key->key);
                kfree(key->params);
                kfree(key);
        }
@@ -263,7 +263,7 @@ error_free_tfm:
        else
                crypto_free_akcipher(tfm);
 error_free_key:
-       kfree(key);
+       kfree_sensitive(key);
        pr_devel("<==%s() = %d\n", __func__, ret);
        return ret;
 }
@@ -369,7 +369,7 @@ error_free_tfm:
        else
                crypto_free_akcipher(tfm);
 error_free_key:
-       kfree(key);
+       kfree_sensitive(key);
        pr_devel("<==%s() = %d\n", __func__, ret);
        return ret;
 }
@@ -441,7 +441,7 @@ int public_key_verify_signature(const struct public_key *pkey,
                                sig->digest, sig->digest_size);
 
 error_free_key:
-       kfree(key);
+       kfree_sensitive(key);
 error_free_tfm:
        crypto_free_sig(tfm);
        pr_devel("<==%s() = %d\n", __func__, ret);
index 22beaf2..f440767 100644 (file)
@@ -391,7 +391,7 @@ error_no_desc:
  * verify_pefile_signature - Verify the signature on a PE binary image
  * @pebuf: Buffer containing the PE binary image
  * @pelen: Length of the binary image
- * @trust_keys: Signing certificate(s) to use as starting points
+ * @trusted_keys: Signing certificate(s) to use as starting points
  * @usage: The use to which the key is being put.
  *
  * Validate that the certificate chain inside the PKCS#7 message inside the PE
index 6fdfc82..7c71db3 100644 (file)
@@ -130,6 +130,11 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
                        goto out;
        }
 
+       if (cert->unsupported_sig) {
+               ret = 0;
+               goto out;
+       }
+
        ret = public_key_verify_signature(cert->pub, cert->sig);
        if (ret < 0) {
                if (ret == -ENOPKG) {
index 74fcc08..108d9d5 100644 (file)
@@ -7,15 +7,30 @@
  * Author: Baolin Wang <baolin.wang@linaro.org>
  */
 
+#include <crypto/internal/aead.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/internal/skcipher.h>
 #include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/device.h>
-#include <crypto/engine.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
 #include <uapi/linux/sched/types.h>
 #include "internal.h"
 
 #define CRYPTO_ENGINE_MAX_QLEN 10
 
+/* Temporary algorithm flag used to indicate an updated driver. */
+#define CRYPTO_ALG_ENGINE 0x200
+
+struct crypto_engine_alg {
+       struct crypto_alg base;
+       struct crypto_engine_op op;
+};
+
 /**
  * crypto_finalize_request - finalize one request if the request is done
  * @engine: the hardware engine
@@ -26,9 +41,6 @@ static void crypto_finalize_request(struct crypto_engine *engine,
                                    struct crypto_async_request *req, int err)
 {
        unsigned long flags;
-       bool finalize_req = false;
-       int ret;
-       struct crypto_engine_ctx *enginectx;
 
        /*
         * If hardware cannot enqueue more requests
@@ -38,21 +50,11 @@ static void crypto_finalize_request(struct crypto_engine *engine,
        if (!engine->retry_support) {
                spin_lock_irqsave(&engine->queue_lock, flags);
                if (engine->cur_req == req) {
-                       finalize_req = true;
                        engine->cur_req = NULL;
                }
                spin_unlock_irqrestore(&engine->queue_lock, flags);
        }
 
-       if (finalize_req || engine->retry_support) {
-               enginectx = crypto_tfm_ctx(req->tfm);
-               if (enginectx->op.prepare_request &&
-                   enginectx->op.unprepare_request) {
-                       ret = enginectx->op.unprepare_request(engine, req);
-                       if (ret)
-                               dev_err(engine->dev, "failed to unprepare request\n");
-               }
-       }
        lockdep_assert_in_softirq();
        crypto_request_complete(req, err);
 
@@ -72,10 +74,11 @@ static void crypto_pump_requests(struct crypto_engine *engine,
                                 bool in_kthread)
 {
        struct crypto_async_request *async_req, *backlog;
+       struct crypto_engine_alg *alg;
+       struct crypto_engine_op *op;
        unsigned long flags;
        bool was_busy = false;
        int ret;
-       struct crypto_engine_ctx *enginectx;
 
        spin_lock_irqsave(&engine->queue_lock, flags);
 
@@ -141,27 +144,21 @@ start_request:
                ret = engine->prepare_crypt_hardware(engine);
                if (ret) {
                        dev_err(engine->dev, "failed to prepare crypt hardware\n");
-                       goto req_err_2;
+                       goto req_err_1;
                }
        }
 
-       enginectx = crypto_tfm_ctx(async_req->tfm);
-
-       if (enginectx->op.prepare_request) {
-               ret = enginectx->op.prepare_request(engine, async_req);
-               if (ret) {
-                       dev_err(engine->dev, "failed to prepare request: %d\n",
-                               ret);
-                       goto req_err_2;
-               }
-       }
-       if (!enginectx->op.do_one_request) {
+       if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) {
+               alg = container_of(async_req->tfm->__crt_alg,
+                                  struct crypto_engine_alg, base);
+               op = &alg->op;
+       } else {
                dev_err(engine->dev, "failed to do request\n");
                ret = -EINVAL;
                goto req_err_1;
        }
 
-       ret = enginectx->op.do_one_request(engine, async_req);
+       ret = op->do_one_request(engine, async_req);
 
        /* Request unsuccessfully executed by hardware */
        if (ret < 0) {
@@ -177,18 +174,6 @@ start_request:
                                ret);
                        goto req_err_1;
                }
-               /*
-                * If retry mechanism is supported,
-                * unprepare current request and
-                * enqueue it back into crypto-engine queue.
-                */
-               if (enginectx->op.unprepare_request) {
-                       ret = enginectx->op.unprepare_request(engine,
-                                                             async_req);
-                       if (ret)
-                               dev_err(engine->dev,
-                                       "failed to unprepare request\n");
-               }
                spin_lock_irqsave(&engine->queue_lock, flags);
                /*
                 * If hardware was unable to execute request, enqueue it
@@ -204,13 +189,6 @@ start_request:
        goto retry;
 
 req_err_1:
-       if (enginectx->op.unprepare_request) {
-               ret = enginectx->op.unprepare_request(engine, async_req);
-               if (ret)
-                       dev_err(engine->dev, "failed to unprepare request\n");
-       }
-
-req_err_2:
        crypto_request_complete(async_req, ret);
 
 retry:
@@ -591,5 +569,177 @@ int crypto_engine_exit(struct crypto_engine *engine)
 }
 EXPORT_SYMBOL_GPL(crypto_engine_exit);
 
+int crypto_engine_register_aead(struct aead_engine_alg *alg)
+{
+       if (!alg->op.do_one_request)
+               return -EINVAL;
+
+       alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+       return crypto_register_aead(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_aead);
+
+void crypto_engine_unregister_aead(struct aead_engine_alg *alg)
+{
+       crypto_unregister_aead(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead);
+
+int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count)
+{
+       int i, ret;
+
+       for (i = 0; i < count; i++) {
+               ret = crypto_engine_register_aead(&algs[i]);
+               if (ret)
+                       goto err;
+       }
+
+       return 0;
+
+err:
+       crypto_engine_unregister_aeads(algs, i);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_aeads);
+
+void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count)
+{
+       int i;
+
+       for (i = count - 1; i >= 0; --i)
+               crypto_engine_unregister_aead(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads);
+
+int crypto_engine_register_ahash(struct ahash_engine_alg *alg)
+{
+       if (!alg->op.do_one_request)
+               return -EINVAL;
+
+       alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+       return crypto_register_ahash(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_ahash);
+
+void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg)
+{
+       crypto_unregister_ahash(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash);
+
+int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count)
+{
+       int i, ret;
+
+       for (i = 0; i < count; i++) {
+               ret = crypto_engine_register_ahash(&algs[i]);
+               if (ret)
+                       goto err;
+       }
+
+       return 0;
+
+err:
+       crypto_engine_unregister_ahashes(algs, i);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes);
+
+void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
+                                     int count)
+{
+       int i;
+
+       for (i = count - 1; i >= 0; --i)
+               crypto_engine_unregister_ahash(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes);
+
+int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg)
+{
+       if (!alg->op.do_one_request)
+               return -EINVAL;
+
+       alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+       return crypto_register_akcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher);
+
+void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg)
+{
+       crypto_unregister_akcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher);
+
+int crypto_engine_register_kpp(struct kpp_engine_alg *alg)
+{
+       if (!alg->op.do_one_request)
+               return -EINVAL;
+
+       alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+       return crypto_register_kpp(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_kpp);
+
+void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg)
+{
+       crypto_unregister_kpp(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp);
+
+int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg)
+{
+       if (!alg->op.do_one_request)
+               return -EINVAL;
+
+       alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
+
+       return crypto_register_skcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher);
+
+void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg)
+{
+       return crypto_unregister_skcipher(&alg->base);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher);
+
+int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
+                                    int count)
+{
+       int i, ret;
+
+       for (i = 0; i < count; i++) {
+               ret = crypto_engine_register_skcipher(&algs[i]);
+               if (ret)
+                       goto err;
+       }
+
+       return 0;
+
+err:
+       crypto_engine_unregister_skciphers(algs, i);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers);
+
+void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
+                                       int count)
+{
+       int i;
+
+       for (i = count - 1; i >= 0; --i)
+               crypto_engine_unregister_skcipher(&algs[i]);
+}
+EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers);
+
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Crypto hardware engine framework");
index c7d7f2c..fe9c233 100644 (file)
@@ -89,10 +89,14 @@ struct rand_data {
        unsigned int rct_count;                 /* Number of stuck values */
 
        /* Intermittent health test failure threshold of 2^-30 */
-#define JENT_RCT_CUTOFF                30      /* Taken from SP800-90B sec 4.4.1 */
-#define JENT_APT_CUTOFF                325     /* Taken from SP800-90B sec 4.4.2 */
+       /* From an SP800-90B perspective, this RCT cutoff value is equal to 31. */
+       /* However, our RCT implementation starts at 1, so we subtract 1 here. */
+#define JENT_RCT_CUTOFF                (31 - 1)        /* Taken from SP800-90B sec 4.4.1 */
+#define JENT_APT_CUTOFF                325                     /* Taken from SP800-90B sec 4.4.2 */
        /* Permanent health test failure threshold of 2^-60 */
-#define JENT_RCT_CUTOFF_PERMANENT      60
+       /* From an SP800-90B perspective, this RCT cutoff value is equal to 61. */
+       /* However, our RCT implementation starts at 1, so we subtract 1 here. */
+#define JENT_RCT_CUTOFF_PERMANENT      (61 - 1)
 #define JENT_APT_CUTOFF_PERMANENT      355
 #define JENT_APT_WINDOW_SIZE   512     /* Data window size */
        /* LSB of time stamp to process */
index 1b0f76b..59260ae 100644 (file)
@@ -357,10 +357,10 @@ static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb)
         * cipher name.
         */
        if (!strncmp(cipher_name, "ecb(", 4)) {
-               unsigned len;
+               int len;
 
-               len = strlcpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
-               if (len < 2 || len >= sizeof(ecb_name))
+               len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name));
+               if (len < 2)
                        goto err_free_inst;
 
                if (ecb_name[len - 1] != ')')
index b48c18e..224c470 100644 (file)
 
 static const struct crypto_type crypto_sig_type;
 
-static inline struct crypto_sig *__crypto_sig_tfm(struct crypto_tfm *tfm)
-{
-       return container_of(tfm, struct crypto_sig, base);
-}
-
 static int crypto_sig_init_tfm(struct crypto_tfm *tfm)
 {
        if (tfm->__crt_alg->cra_type != &crypto_sig_type)
index 09be909..548b302 100644 (file)
@@ -396,10 +396,10 @@ static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
         * cipher name.
         */
        if (!strncmp(cipher_name, "ecb(", 4)) {
-               unsigned len;
+               int len;
 
-               len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
-               if (len < 2 || len >= sizeof(ctx->name))
+               len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
+               if (len < 2)
                        goto err_free_inst;
 
                if (ctx->name[len - 1] != ')')
index e0b3786..8de74dc 100644 (file)
@@ -37,7 +37,7 @@ config HW_RANDOM_TIMERIOMEM
 
 config HW_RANDOM_INTEL
        tristate "Intel HW Random Number Generator support"
-       depends on (X86 || IA64) && PCI
+       depends on (X86 || IA64 || COMPILE_TEST) && PCI
        default HW_RANDOM
        help
          This driver provides kernel-side support for the Random Number
@@ -50,7 +50,8 @@ config HW_RANDOM_INTEL
 
 config HW_RANDOM_AMD
        tristate "AMD HW Random Number Generator support"
-       depends on (X86 || PPC_MAPLE) && PCI
+       depends on (X86 || PPC_MAPLE || COMPILE_TEST)
+       depends on PCI && HAS_IOPORT_MAP
        default HW_RANDOM
        help
          This driver provides kernel-side support for the Random Number
@@ -63,7 +64,7 @@ config HW_RANDOM_AMD
 
 config HW_RANDOM_ATMEL
        tristate "Atmel Random Number Generator support"
-       depends on (ARCH_AT91 || COMPILE_TEST) && HAVE_CLK && OF
+       depends on (ARCH_AT91 || COMPILE_TEST)
        default HW_RANDOM
        help
          This driver provides kernel-side support for the Random Number
@@ -113,7 +114,8 @@ config HW_RANDOM_IPROC_RNG200
 
 config HW_RANDOM_GEODE
        tristate "AMD Geode HW Random Number Generator support"
-       depends on X86_32 && PCI
+       depends on (X86_32 || COMPILE_TEST)
+       depends on PCI
        default HW_RANDOM
        help
          This driver provides kernel-side support for the Random Number
@@ -205,7 +207,7 @@ config HW_RANDOM_OCTEON
 
 config HW_RANDOM_PASEMI
        tristate "PA Semi HW Random Number Generator support"
-       depends on PPC_PASEMI
+       depends on PPC_PASEMI || (PPC && COMPILE_TEST)
        default HW_RANDOM
        help
          This driver provides kernel-side support for the Random Number
@@ -228,7 +230,7 @@ config HW_RANDOM_VIRTIO
 
 config HW_RANDOM_MXC_RNGA
        tristate "Freescale i.MX RNGA Random Number Generator"
-       depends on SOC_IMX31
+       depends on SOC_IMX31 || COMPILE_TEST
        default HW_RANDOM
        help
          This driver provides kernel-side support for the Random Number
@@ -241,7 +243,7 @@ config HW_RANDOM_MXC_RNGA
 
 config HW_RANDOM_IMX_RNGC
        tristate "Freescale i.MX RNGC Random Number Generator"
-       depends on HAS_IOMEM && HAVE_CLK
+       depends on HAS_IOMEM
        depends on SOC_IMX25 || SOC_IMX6SL || SOC_IMX6SLL || SOC_IMX6UL || COMPILE_TEST
        default HW_RANDOM
        help
@@ -256,8 +258,7 @@ config HW_RANDOM_IMX_RNGC
 
 config HW_RANDOM_INGENIC_RNG
        tristate "Ingenic Random Number Generator support"
-       depends on HW_RANDOM
-       depends on MACH_JZ4780 || MACH_X1000
+       depends on MACH_JZ4780 || MACH_X1000 || COMPILE_TEST
        default HW_RANDOM
        help
          This driver provides kernel-side support for the Random Number Generator
@@ -271,8 +272,7 @@ config HW_RANDOM_INGENIC_RNG
 
 config HW_RANDOM_INGENIC_TRNG
        tristate "Ingenic True Random Number Generator support"
-       depends on HW_RANDOM
-       depends on MACH_X1830
+       depends on MACH_X1830 || COMPILE_TEST
        default HW_RANDOM
        help
          This driver provides kernel-side support for the True Random Number Generator
@@ -324,7 +324,7 @@ config HW_RANDOM_POWERNV
 
 config HW_RANDOM_HISI
        tristate "Hisilicon Random Number Generator support"
-       depends on HW_RANDOM && ARCH_HISI
+       depends on ARCH_HISI || COMPILE_TEST
        default HW_RANDOM
        help
          This driver provides kernel-side support for the Random Number
@@ -348,7 +348,7 @@ config HW_RANDOM_HISTB
 
 config HW_RANDOM_ST
        tristate "ST Microelectronics HW Random Number Generator support"
-       depends on HW_RANDOM && (ARCH_STI || COMPILE_TEST)
+       depends on ARCH_STI || COMPILE_TEST
        help
          This driver provides kernel-side support for the Random Number
          Generator hardware found on STi series of SoCs.
@@ -358,7 +358,7 @@ config HW_RANDOM_ST
 
 config HW_RANDOM_XGENE
        tristate "APM X-Gene True Random Number Generator (TRNG) support"
-       depends on HW_RANDOM && ARCH_XGENE
+       depends on ARCH_XGENE || COMPILE_TEST
        default HW_RANDOM
        help
          This driver provides kernel-side support for the Random Number
@@ -371,7 +371,7 @@ config HW_RANDOM_XGENE
 
 config HW_RANDOM_STM32
        tristate "STMicroelectronics STM32 random number generator"
-       depends on HW_RANDOM && (ARCH_STM32 || COMPILE_TEST)
+       depends on ARCH_STM32 || COMPILE_TEST
        depends on HAS_IOMEM
        default HW_RANDOM
        help
@@ -385,8 +385,8 @@ config HW_RANDOM_STM32
 
 config HW_RANDOM_PIC32
        tristate "Microchip PIC32 Random Number Generator support"
-       depends on HW_RANDOM && MACH_PIC32
-       default y
+       depends on MACH_PIC32 || COMPILE_TEST
+       default HW_RANDOM if MACH_PIC32
        help
          This driver provides kernel-side support for the Random Number
          Generator hardware found on a PIC32.
@@ -425,7 +425,8 @@ config HW_RANDOM_MESON
 
 config HW_RANDOM_CAVIUM
        tristate "Cavium ThunderX Random Number Generator support"
-       depends on HW_RANDOM && PCI && ARCH_THUNDER
+       depends on PCI
+       depends on ARCH_THUNDER || (ARM64 && COMPILE_TEST)
        default HW_RANDOM
        help
          This driver provides kernel-side support for the Random Number
index 0555e38..86162a1 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <linux/delay.h>
 #include <linux/hw_random.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
index e34c3ea..7e95434 100644 (file)
@@ -105,8 +105,6 @@ static int smccc_trng_probe(struct platform_device *pdev)
        trng->name = "smccc_trng";
        trng->read = smccc_trng_read;
 
-       platform_set_drvdata(pdev, trng);
-
        return devm_hwrng_register(&pdev->dev, trng);
 }
 
index b8effe7..a37367e 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/io.h>
 #include <linux/iopoll.h>
 #include <linux/hw_random.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
index 5b7ca04..9de7466 100644 (file)
@@ -189,13 +189,9 @@ static int ba431_trng_probe(struct platform_device *pdev)
        ba431->rng.cleanup = ba431_trng_cleanup;
        ba431->rng.read = ba431_trng_read;
 
-       platform_set_drvdata(pdev, ba431);
-
        ret = devm_hwrng_register(&pdev->dev, &ba431->rng);
-       if (ret) {
-               dev_err(&pdev->dev, "BA431 registration failed (%d)\n", ret);
-               return ret;
-       }
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "BA431 registration failed\n");
 
        dev_info(&pdev->dev, "BA431 TRNG registered\n");
 
@@ -203,7 +199,7 @@ static int ba431_trng_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id ba431_trng_dt_ids[] = {
-       { .compatible = "silex-insight,ba431-rng", .data = NULL },
+       { .compatible = "silex-insight,ba431-rng" },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, ba431_trng_dt_ids);
index e98fcac..e19b0f9 100644 (file)
@@ -8,8 +8,7 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/printk.h>
 #include <linux/clk.h>
index 302ffa3..1abbff0 100644 (file)
@@ -455,35 +455,6 @@ static void cc_trng_startwork_handler(struct work_struct *w)
        cc_trng_hw_trigger(drvdata);
 }
 
-
-static int cc_trng_clk_init(struct cctrng_drvdata *drvdata)
-{
-       struct clk *clk;
-       struct device *dev = &(drvdata->pdev->dev);
-       int rc = 0;
-
-       clk = devm_clk_get_optional(dev, NULL);
-       if (IS_ERR(clk))
-               return dev_err_probe(dev, PTR_ERR(clk),
-                                    "Error getting clock\n");
-
-       drvdata->clk = clk;
-
-       rc = clk_prepare_enable(drvdata->clk);
-       if (rc) {
-               dev_err(dev, "Failed to enable clock\n");
-               return rc;
-       }
-
-       return 0;
-}
-
-static void cc_trng_clk_fini(struct cctrng_drvdata *drvdata)
-{
-       clk_disable_unprepare(drvdata->clk);
-}
-
-
 static int cctrng_probe(struct platform_device *pdev)
 {
        struct cctrng_drvdata *drvdata;
@@ -492,6 +463,10 @@ static int cctrng_probe(struct platform_device *pdev)
        u32 val;
        int irq;
 
+       /* Compile time assertion checks */
+       BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
+       BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
+
        drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
        if (!drvdata)
                return -ENOMEM;
@@ -510,10 +485,8 @@ static int cctrng_probe(struct platform_device *pdev)
        drvdata->circ.buf = (char *)drvdata->data_buf;
 
        drvdata->cc_base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(drvdata->cc_base)) {
-               dev_err(dev, "Failed to ioremap registers");
-               return PTR_ERR(drvdata->cc_base);
-       }
+       if (IS_ERR(drvdata->cc_base))
+               return dev_err_probe(dev, PTR_ERR(drvdata->cc_base), "Failed to ioremap registers");
 
        /* Then IRQ */
        irq = platform_get_irq(pdev, 0);
@@ -522,16 +495,13 @@ static int cctrng_probe(struct platform_device *pdev)
 
        /* parse sampling rate from device tree */
        rc = cc_trng_parse_sampling_ratio(drvdata);
-       if (rc) {
-               dev_err(dev, "Failed to get legal sampling ratio for rosc\n");
-               return rc;
-       }
+       if (rc)
+               return dev_err_probe(dev, rc, "Failed to get legal sampling ratio for rosc\n");
 
-       rc = cc_trng_clk_init(drvdata);
-       if (rc) {
-               dev_err(dev, "cc_trng_clk_init failed\n");
-               return rc;
-       }
+       drvdata->clk = devm_clk_get_optional_enabled(dev, NULL);
+       if (IS_ERR(drvdata->clk))
+               return dev_err_probe(dev, PTR_ERR(drvdata->clk),
+                                    "Failed to get or enable the clock\n");
 
        INIT_WORK(&drvdata->compwork, cc_trng_compwork_handler);
        INIT_WORK(&drvdata->startwork, cc_trng_startwork_handler);
@@ -539,10 +509,8 @@ static int cctrng_probe(struct platform_device *pdev)
 
        /* register the driver isr function */
        rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "cctrng", drvdata);
-       if (rc) {
-               dev_err(dev, "Could not register to interrupt %d\n", irq);
-               goto post_clk_err;
-       }
+       if (rc)
+               return dev_err_probe(dev, rc, "Could not register to interrupt %d\n", irq);
        dev_dbg(dev, "Registered to IRQ: %d\n", irq);
 
        /* Clear all pending interrupts */
@@ -557,17 +525,13 @@ static int cctrng_probe(struct platform_device *pdev)
 
        /* init PM */
        rc = cc_trng_pm_init(drvdata);
-       if (rc) {
-               dev_err(dev, "cc_trng_pm_init failed\n");
-               goto post_clk_err;
-       }
+       if (rc)
+               return dev_err_probe(dev, rc, "cc_trng_pm_init failed\n");
 
        /* increment device's usage counter */
        rc = cc_trng_pm_get(dev);
-       if (rc) {
-               dev_err(dev, "cc_trng_pm_get returned %x\n", rc);
-               goto post_pm_err;
-       }
+       if (rc)
+               return dev_err_probe(dev, rc, "cc_trng_pm_get returned %x\n", rc);
 
        /* set pending_hw to verify that HW won't be triggered from read */
        atomic_set(&drvdata->pending_hw, 1);
@@ -593,9 +557,6 @@ static int cctrng_probe(struct platform_device *pdev)
 post_pm_err:
        cc_trng_pm_fini(drvdata);
 
-post_clk_err:
-       cc_trng_clk_fini(drvdata);
-
        return rc;
 }
 
@@ -608,8 +569,6 @@ static int cctrng_remove(struct platform_device *pdev)
 
        cc_trng_pm_fini(drvdata);
 
-       cc_trng_clk_fini(drvdata);
-
        dev_info(dev, "ARM cctrng device terminated\n");
 
        return 0;
@@ -698,21 +657,7 @@ static struct platform_driver cctrng_driver = {
        .remove = cctrng_remove,
 };
 
-static int __init cctrng_mod_init(void)
-{
-       /* Compile time assertion checks */
-       BUILD_BUG_ON(CCTRNG_DATA_BUF_WORDS < 6);
-       BUILD_BUG_ON((CCTRNG_DATA_BUF_WORDS & (CCTRNG_DATA_BUF_WORDS-1)) != 0);
-
-       return platform_driver_register(&cctrng_driver);
-}
-module_init(cctrng_mod_init);
-
-static void __exit cctrng_mod_exit(void)
-{
-       platform_driver_unregister(&cctrng_driver);
-}
-module_exit(cctrng_mod_exit);
+module_platform_driver(cctrng_driver);
 
 /* Module description */
 MODULE_DESCRIPTION("ARM CryptoCell TRNG Driver");
index 0cd7e1a..3193531 100644 (file)
@@ -187,10 +187,8 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        pci_set_drvdata(pdev, rng);
 
        rng->reg_base = pcim_iomap(pdev, 0, 0);
-       if (!rng->reg_base) {
-               dev_err(&pdev->dev, "Error while mapping CSRs, exiting\n");
-               return -ENOMEM;
-       }
+       if (!rng->reg_base)
+               return dev_err_probe(&pdev->dev, -ENOMEM, "Error while mapping CSRs, exiting\n");
 
        rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
                                       "cn10k-rng-%s", dev_name(&pdev->dev));
@@ -205,19 +203,12 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        reset_rng_health_state(rng);
 
        err = devm_hwrng_register(&pdev->dev, &rng->ops);
-       if (err) {
-               dev_err(&pdev->dev, "Could not register hwrng device.\n");
-               return err;
-       }
+       if (err)
+               return dev_err_probe(&pdev->dev, err, "Could not register hwrng device.\n");
 
        return 0;
 }
 
-static void cn10k_rng_remove(struct pci_dev *pdev)
-{
-       /* Nothing to do */
-}
-
 static const struct pci_device_id cn10k_rng_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA098) }, /* RNG PF */
        {0,},
@@ -229,7 +220,6 @@ static struct pci_driver cn10k_rng_driver = {
        .name           = "cn10k_rng",
        .id_table       = cn10k_rng_id_table,
        .probe          = cn10k_rng_probe,
-       .remove         = cn10k_rng_remove,
 };
 
 module_pci_driver(cn10k_rng_driver);
index f34d356..e3598ec 100644 (file)
 #include <linux/err.h>
 #include <linux/fs.h>
 #include <linux/hw_random.h>
-#include <linux/random.h>
 #include <linux/kernel.h>
 #include <linux/kthread.h>
-#include <linux/sched/signal.h>
 #include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/sched.h>
+#include <linux/sched/signal.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
index 9cc3d54..30207b7 100644 (file)
@@ -185,14 +185,14 @@ static int exynos_trng_remove(struct platform_device *pdev)
        return 0;
 }
 
-static int __maybe_unused exynos_trng_suspend(struct device *dev)
+static int exynos_trng_suspend(struct device *dev)
 {
        pm_runtime_put_sync(dev);
 
        return 0;
 }
 
-static int __maybe_unused exynos_trng_resume(struct device *dev)
+static int exynos_trng_resume(struct device *dev)
 {
        int ret;
 
@@ -205,7 +205,7 @@ static int __maybe_unused exynos_trng_resume(struct device *dev)
        return 0;
 }
 
-static SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
+static DEFINE_SIMPLE_DEV_PM_OPS(exynos_trng_pm_ops, exynos_trng_suspend,
                         exynos_trng_resume);
 
 static const struct of_device_id exynos_trng_dt_match[] = {
@@ -219,7 +219,7 @@ MODULE_DEVICE_TABLE(of, exynos_trng_dt_match);
 static struct platform_driver exynos_trng_driver = {
        .driver = {
                .name = "exynos-trng",
-               .pm = &exynos_trng_pm_ops,
+               .pm = pm_sleep_ptr(&exynos_trng_pm_ops),
                .of_match_table = exynos_trng_dt_match,
        },
        .probe = exynos_trng_probe,
index bf07f17..e4b385b 100644 (file)
@@ -239,10 +239,8 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
                return PTR_ERR(rngc->base);
 
        rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
-       if (IS_ERR(rngc->clk)) {
-               dev_err(&pdev->dev, "Can not get rng_clk\n");
-               return PTR_ERR(rngc->clk);
-       }
+       if (IS_ERR(rngc->clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(rngc->clk), "Cannot get rng_clk\n");
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
@@ -272,24 +270,18 @@ static int __init imx_rngc_probe(struct platform_device *pdev)
 
        ret = devm_request_irq(&pdev->dev,
                        irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
-       if (ret) {
-               dev_err(rngc->dev, "Can't get interrupt working.\n");
-               return ret;
-       }
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "Can't get interrupt working.\n");
 
        if (self_test) {
                ret = imx_rngc_self_test(rngc);
-               if (ret) {
-                       dev_err(rngc->dev, "self test failed\n");
-                       return ret;
-               }
+               if (ret)
+                       return dev_err_probe(&pdev->dev, ret, "self test failed\n");
        }
 
        ret = devm_hwrng_register(&pdev->dev, &rngc->rng);
-       if (ret) {
-               dev_err(&pdev->dev, "hwrng registration failed\n");
-               return ret;
-       }
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "hwrng registration failed\n");
 
        dev_info(&pdev->dev,
                "Freescale RNG%c registered (HW revision %d.%02d)\n",
index 055cfe5..4f18c3f 100644 (file)
@@ -95,7 +95,7 @@ static int ingenic_rng_probe(struct platform_device *pdev)
                return PTR_ERR(priv->base);
        }
 
-       priv->version = (enum ingenic_rng_version)of_device_get_match_data(&pdev->dev);
+       priv->version = (enum ingenic_rng_version)(uintptr_t)of_device_get_match_data(&pdev->dev);
 
        priv->rng.name = pdev->name;
        priv->rng.init = ingenic_rng_init;
index 0eb80f7..1672320 100644 (file)
@@ -11,8 +11,8 @@
 #include <linux/hw_random.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
@@ -22,8 +22,6 @@
 #define TRNG_REG_STATUS_OFFSET         0x08
 
 /* bits within the CFG register */
-#define CFG_RDY_CLR                                    BIT(12)
-#define CFG_INT_MASK                           BIT(11)
 #define CFG_GEN_EN                                     BIT(0)
 
 /* bits within the STATUS register */
@@ -31,7 +29,6 @@
 
 struct ingenic_trng {
        void __iomem *base;
-       struct clk *clk;
        struct hwrng rng;
 };
 
@@ -79,6 +76,7 @@ static int ingenic_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait
 static int ingenic_trng_probe(struct platform_device *pdev)
 {
        struct ingenic_trng *trng;
+       struct clk *clk;
        int ret;
 
        trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
@@ -86,60 +84,28 @@ static int ingenic_trng_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        trng->base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(trng->base)) {
-               pr_err("%s: Failed to map DTRNG registers\n", __func__);
-               ret = PTR_ERR(trng->base);
-               return PTR_ERR(trng->base);
-       }
+       if (IS_ERR(trng->base))
+               return dev_err_probe(&pdev->dev, PTR_ERR(trng->base),
+                                    "%s: Failed to map DTRNG registers\n", __func__);
 
-       trng->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(trng->clk)) {
-               ret = PTR_ERR(trng->clk);
-               pr_crit("%s: Cannot get DTRNG clock\n", __func__);
-               return PTR_ERR(trng->clk);
-       }
-
-       ret = clk_prepare_enable(trng->clk);
-       if (ret) {
-               pr_crit("%s: Unable to enable DTRNG clock\n", __func__);
-               return ret;
-       }
+       clk = devm_clk_get_enabled(&pdev->dev, NULL);
+       if (IS_ERR(clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+                                    "%s: Cannot get and enable DTRNG clock\n", __func__);
 
        trng->rng.name = pdev->name;
        trng->rng.init = ingenic_trng_init;
        trng->rng.cleanup = ingenic_trng_cleanup;
        trng->rng.read = ingenic_trng_read;
 
-       ret = hwrng_register(&trng->rng);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to register hwrng\n");
-               goto err_unprepare_clk;
-       }
+       ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "Failed to register hwrng\n");
 
        platform_set_drvdata(pdev, trng);
 
        dev_info(&pdev->dev, "Ingenic DTRNG driver registered\n");
        return 0;
-
-err_unprepare_clk:
-       clk_disable_unprepare(trng->clk);
-       return ret;
-}
-
-static int ingenic_trng_remove(struct platform_device *pdev)
-{
-       struct ingenic_trng *trng = platform_get_drvdata(pdev);
-       unsigned int ctrl;
-
-       hwrng_unregister(&trng->rng);
-
-       ctrl = readl(trng->base + TRNG_REG_CFG_OFFSET);
-       ctrl &= ~CFG_GEN_EN;
-       writel(ctrl, trng->base + TRNG_REG_CFG_OFFSET);
-
-       clk_disable_unprepare(trng->clk);
-
-       return 0;
 }
 
 static const struct of_device_id ingenic_trng_of_match[] = {
@@ -150,7 +116,6 @@ MODULE_DEVICE_TABLE(of, ingenic_trng_of_match);
 
 static struct platform_driver ingenic_trng_driver = {
        .probe          = ingenic_trng_probe,
-       .remove         = ingenic_trng_remove,
        .driver         = {
                .name   = "ingenic-trng",
                .of_match_table = ingenic_trng_of_match,
index 06bc060..440fe28 100644 (file)
@@ -12,8 +12,7 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 
@@ -182,6 +181,8 @@ static int iproc_rng200_probe(struct platform_device *pdev)
                return PTR_ERR(priv->base);
        }
 
+       dev_set_drvdata(dev, priv);
+
        priv->rng.name = "iproc-rng200";
        priv->rng.read = iproc_rng200_read;
        priv->rng.init = iproc_rng200_init;
@@ -199,6 +200,28 @@ static int iproc_rng200_probe(struct platform_device *pdev)
        return 0;
 }
 
+static int __maybe_unused iproc_rng200_suspend(struct device *dev)
+{
+       struct iproc_rng200_dev *priv = dev_get_drvdata(dev);
+
+       iproc_rng200_cleanup(&priv->rng);
+
+       return 0;
+}
+
+static int __maybe_unused iproc_rng200_resume(struct device *dev)
+{
+       struct iproc_rng200_dev *priv =  dev_get_drvdata(dev);
+
+       iproc_rng200_init(&priv->rng);
+
+       return 0;
+}
+
+static const struct dev_pm_ops iproc_rng200_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(iproc_rng200_suspend, iproc_rng200_resume)
+};
+
 static const struct of_device_id iproc_rng200_of_match[] = {
        { .compatible = "brcm,bcm2711-rng200", },
        { .compatible = "brcm,bcm7211-rng200", },
@@ -212,6 +235,7 @@ static struct platform_driver iproc_rng200_driver = {
        .driver = {
                .name           = "iproc-rng200",
                .of_match_table = iproc_rng200_of_match,
+               .pm             = &iproc_rng200_pm_ops,
        },
        .probe          = iproc_rng200_probe,
 };
index e8f9621..8c6a40d 100644 (file)
@@ -13,8 +13,6 @@
 #include <linux/clk.h>
 #include <linux/err.h>
 
-static struct clk *rng_clk;
-
 static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
 {
        void __iomem *base = (void __iomem *)rng->priv;
@@ -36,21 +34,17 @@ static struct hwrng nmk_rng = {
 
 static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
 {
+       struct clk *rng_clk;
        void __iomem *base;
        int ret;
 
-       rng_clk = devm_clk_get(&dev->dev, NULL);
-       if (IS_ERR(rng_clk)) {
-               dev_err(&dev->dev, "could not get rng clock\n");
-               ret = PTR_ERR(rng_clk);
-               return ret;
-       }
-
-       clk_prepare_enable(rng_clk);
+       rng_clk = devm_clk_get_enabled(&dev->dev, NULL);
+       if (IS_ERR(rng_clk))
+               return dev_err_probe(&dev->dev, PTR_ERR(rng_clk), "could not get rng clock\n");
 
        ret = amba_request_regions(dev, dev->dev.init_name);
        if (ret)
-               goto out_clk;
+               return ret;
        ret = -ENOMEM;
        base = devm_ioremap(&dev->dev, dev->res.start,
                            resource_size(&dev->res));
@@ -64,15 +58,12 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
 
 out_release:
        amba_release_regions(dev);
-out_clk:
-       clk_disable_unprepare(rng_clk);
        return ret;
 }
 
 static void nmk_rng_remove(struct amba_device *dev)
 {
        amba_release_regions(dev);
-       clk_disable_unprepare(rng_clk);
 }
 
 static const struct amba_id nmk_rng_ids[] = {
index 9903d03..8a304b7 100644 (file)
@@ -8,12 +8,11 @@
 #include <linux/init.h>
 #include <linux/random.h>
 #include <linux/err.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/hw_random.h>
 #include <linux/delay.h>
-#include <linux/of_irq.h>
 #include <linux/pm_runtime.h>
-#include <linux/of_device.h>
 
 #define NPCM_RNGCS_REG         0x00    /* Control and status register */
 #define NPCM_RNGD_REG          0x04    /* Data register */
index 00ff967..be03f76 100644 (file)
@@ -26,8 +26,6 @@
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
 #include <linux/interrupt.h>
 #include <linux/clk.h>
 #include <linux/io.h>
index f06e4f9..18dc46b 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
index 2498d4e..6959d6e 100644 (file)
@@ -9,11 +9,10 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
 #include <linux/platform_device.h>
 #include <linux/hw_random.h>
 #include <linux/delay.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
 #include <linux/io.h>
 
 #define SDCRNG_CTL_REG                 0x00
index 99c8bd0..888e6f5 100644 (file)
 #include <linux/hw_random.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
 #define RNGCON         0x04
-#define  TRNGEN                BIT(8)
-#define  PRNGEN                BIT(9)
-#define  PRNGCONT      BIT(10)
-#define  TRNGMOD       BIT(11)
-#define  SEEDLOAD      BIT(12)
-#define RNGPOLY1       0x08
-#define RNGPOLY2       0x0C
-#define RNGNUMGEN1     0x10
-#define RNGNUMGEN2     0x14
+#define TRNGEN         BIT(8)
+#define TRNGMOD                BIT(11)
 #define RNGSEED1       0x18
 #define RNGSEED2       0x1C
 #define RNGRCNT                0x20
-#define  RCNT_MASK     0x7F
+#define RCNT_MASK      0x7F
 
 struct pic32_rng {
        void __iomem    *base;
        struct hwrng    rng;
-       struct clk      *clk;
 };
 
 /*
@@ -46,6 +37,15 @@ struct pic32_rng {
  */
 #define RNG_TIMEOUT 500
 
+static int pic32_rng_init(struct hwrng *rng)
+{
+       struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng);
+
+       /* enable TRNG in enhanced mode */
+       writel(TRNGEN | TRNGMOD, priv->base + RNGCON);
+       return 0;
+}
+
 static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
                          bool wait)
 {
@@ -67,11 +67,17 @@ static int pic32_rng_read(struct hwrng *rng, void *buf, size_t max,
        return -EIO;
 }
 
+static void pic32_rng_cleanup(struct hwrng *rng)
+{
+       struct pic32_rng *priv = container_of(rng, struct pic32_rng, rng);
+
+       writel(0, priv->base + RNGCON);
+}
+
 static int pic32_rng_probe(struct platform_device *pdev)
 {
        struct pic32_rng *priv;
-       u32 v;
-       int ret;
+       struct clk *clk;
 
        priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
@@ -81,41 +87,16 @@ static int pic32_rng_probe(struct platform_device *pdev)
        if (IS_ERR(priv->base))
                return PTR_ERR(priv->base);
 
-       priv->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(priv->clk))
-               return PTR_ERR(priv->clk);
-
-       ret = clk_prepare_enable(priv->clk);
-       if (ret)
-               return ret;
-
-       /* enable TRNG in enhanced mode */
-       v = TRNGEN | TRNGMOD;
-       writel(v, priv->base + RNGCON);
+       clk = devm_clk_get_enabled(&pdev->dev, NULL);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
 
        priv->rng.name = pdev->name;
+       priv->rng.init = pic32_rng_init;
        priv->rng.read = pic32_rng_read;
+       priv->rng.cleanup = pic32_rng_cleanup;
 
-       ret = devm_hwrng_register(&pdev->dev, &priv->rng);
-       if (ret)
-               goto err_register;
-
-       platform_set_drvdata(pdev, priv);
-
-       return 0;
-
-err_register:
-       clk_disable_unprepare(priv->clk);
-       return ret;
-}
-
-static int pic32_rng_remove(struct platform_device *pdev)
-{
-       struct pic32_rng *rng = platform_get_drvdata(pdev);
-
-       writel(0, rng->base + RNGCON);
-       clk_disable_unprepare(rng->clk);
-       return 0;
+       return devm_hwrng_register(&pdev->dev, &priv->rng);
 }
 
 static const struct of_device_id pic32_rng_of_match[] __maybe_unused = {
@@ -126,10 +107,9 @@ MODULE_DEVICE_TABLE(of, pic32_rng_of_match);
 
 static struct platform_driver pic32_rng_driver = {
        .probe          = pic32_rng_probe,
-       .remove         = pic32_rng_remove,
        .driver         = {
                .name   = "pic32-rng",
-               .of_match_table = of_match_ptr(pic32_rng_of_match),
+               .of_match_table = pic32_rng_of_match,
        },
 };
 
index a6731cf..efb6a9f 100644 (file)
@@ -10,8 +10,9 @@
 #include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
 #include <linux/slab.h>
index 26f322d..3db9d86 100644 (file)
@@ -113,16 +113,6 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -ENXIO;
-
-       if (res->start % 4 != 0 || resource_size(res) < 4) {
-               dev_err(&pdev->dev,
-                       "address must be at least four bytes wide and 32-bit aligned\n");
-               return -EINVAL;
-       }
-
        /* Allocate memory for the device structure (and zero it) */
        priv = devm_kzalloc(&pdev->dev,
                        sizeof(struct timeriomem_rng_private), GFP_KERNEL);
@@ -131,6 +121,16 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, priv);
 
+       priv->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+       if (IS_ERR(priv->io_base))
+               return PTR_ERR(priv->io_base);
+
+       if (res->start % 4 != 0 || resource_size(res) < 4) {
+               dev_err(&pdev->dev,
+                       "address must be at least four bytes wide and 32-bit aligned\n");
+               return -EINVAL;
+       }
+
        if (pdev->dev.of_node) {
                int i;
 
@@ -158,11 +158,6 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
        priv->rng_ops.name = dev_name(&pdev->dev);
        priv->rng_ops.read = timeriomem_rng_read;
 
-       priv->io_base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(priv->io_base)) {
-               return PTR_ERR(priv->io_base);
-       }
-
        /* Assume random data is already available. */
        priv->present = 1;
        complete(&priv->completion);
index 7c8f3cb..99f4e86 100644 (file)
 #include <linux/hw_random.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/module.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
 #include <linux/timer.h>
 
 #define RNG_MAX_DATUM                  4
index 2a9fea7..2c586d1 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/hw_random.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 
index 51a3a7b..3bcfcfc 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <crypto/scatterwalk.h>
 #include <linux/scatterlist.h>
index c135500..8d4c428 100644 (file)
@@ -29,7 +29,7 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
        struct sun8i_ce_alg_template *algt;
        unsigned int todo, len;
 
-       algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+       algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
 
        if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG ||
            sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) {
@@ -92,13 +92,18 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
        struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
        struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
        int err;
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
-       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-       struct sun8i_ce_alg_template *algt;
 
-       algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
-       algt->stat_fb++;
+       if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+               struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+               struct sun8i_ce_alg_template *algt __maybe_unused;
+
+               algt = container_of(alg, struct sun8i_ce_alg_template,
+                                   alg.skcipher.base);
+
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+               algt->stat_fb++;
 #endif
+       }
 
        skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
        skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
@@ -133,7 +138,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
        int ns = sg_nents_for_len(areq->src, areq->cryptlen);
        int nd = sg_nents_for_len(areq->dst, areq->cryptlen);
 
-       algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+       algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
 
        dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
                crypto_tfm_alg_name(areq->base.tfm),
@@ -294,7 +299,7 @@ theend:
        return err;
 }
 
-static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
+static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
 {
        struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
@@ -308,10 +313,10 @@ static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
        local_bh_disable();
        crypto_finalize_skcipher_request(engine, breq, err);
        local_bh_enable();
-       return 0;
 }
 
-static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
+static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
+                                     void *async_req)
 {
        struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
@@ -353,7 +358,17 @@ static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_r
        }
 
        dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
+}
+
+int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
+{
+       int err = sun8i_ce_cipher_prepare(engine, areq);
 
+       if (err)
+               return err;
+
+       sun8i_ce_cipher_run(engine, areq);
+       sun8i_ce_cipher_unprepare(engine, areq);
        return 0;
 }
 
@@ -406,7 +421,7 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
 
        memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
 
-       algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
+       algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base);
        op->ce = algt->ce;
 
        op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -423,10 +438,6 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
               crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
               CRYPTO_MAX_ALG_NAME);
 
-       op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
-       op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
-       op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;
-
        err = pm_runtime_get_sync(op->ce->dev);
        if (err < 0)
                goto error_pm;
index 07ea0cc..d4ccd52 100644 (file)
@@ -9,21 +9,24 @@
  *
  * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
  */
+
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/rng.h>
+#include <crypto/internal/skcipher.h>
 #include <linux/clk.h>
-#include <linux/crypto.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
+#include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
-#include <crypto/internal/rng.h>
-#include <crypto/internal/skcipher.h>
 
 #include "sun8i-ce.h"
 
@@ -277,7 +280,7 @@ static struct sun8i_ce_alg_template ce_algs[] = {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
        .ce_algo_id = CE_ID_CIPHER_AES,
        .ce_blockmode = CE_ID_OP_CBC,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base = {
                        .cra_name = "cbc(aes)",
                        .cra_driver_name = "cbc-aes-sun8i-ce",
@@ -298,13 +301,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                .setkey         = sun8i_ce_aes_setkey,
                .encrypt        = sun8i_ce_skencrypt,
                .decrypt        = sun8i_ce_skdecrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = sun8i_ce_cipher_do_one,
+       },
 },
 {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
        .ce_algo_id = CE_ID_CIPHER_AES,
        .ce_blockmode = CE_ID_OP_ECB,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base = {
                        .cra_name = "ecb(aes)",
                        .cra_driver_name = "ecb-aes-sun8i-ce",
@@ -324,13 +330,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                .setkey         = sun8i_ce_aes_setkey,
                .encrypt        = sun8i_ce_skencrypt,
                .decrypt        = sun8i_ce_skdecrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = sun8i_ce_cipher_do_one,
+       },
 },
 {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
        .ce_algo_id = CE_ID_CIPHER_DES3,
        .ce_blockmode = CE_ID_OP_CBC,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base = {
                        .cra_name = "cbc(des3_ede)",
                        .cra_driver_name = "cbc-des3-sun8i-ce",
@@ -351,13 +360,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                .setkey         = sun8i_ce_des3_setkey,
                .encrypt        = sun8i_ce_skencrypt,
                .decrypt        = sun8i_ce_skdecrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = sun8i_ce_cipher_do_one,
+       },
 },
 {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
        .ce_algo_id = CE_ID_CIPHER_DES3,
        .ce_blockmode = CE_ID_OP_ECB,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base = {
                        .cra_name = "ecb(des3_ede)",
                        .cra_driver_name = "ecb-des3-sun8i-ce",
@@ -377,12 +389,15 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                .setkey         = sun8i_ce_des3_setkey,
                .encrypt        = sun8i_ce_skencrypt,
                .decrypt        = sun8i_ce_skdecrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = sun8i_ce_cipher_do_one,
+       },
 },
 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_HASH
 {      .type = CRYPTO_ALG_TYPE_AHASH,
        .ce_algo_id = CE_ID_HASH_MD5,
-       .alg.hash = {
+       .alg.hash.base = {
                .init = sun8i_ce_hash_init,
                .update = sun8i_ce_hash_update,
                .final = sun8i_ce_hash_final,
@@ -390,6 +405,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                .digest = sun8i_ce_hash_digest,
                .export = sun8i_ce_hash_export,
                .import = sun8i_ce_hash_import,
+               .init_tfm = sun8i_ce_hash_init_tfm,
+               .exit_tfm = sun8i_ce_hash_exit_tfm,
                .halg = {
                        .digestsize = MD5_DIGEST_SIZE,
                        .statesize = sizeof(struct md5_state),
@@ -404,15 +421,17 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                                .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
                                .cra_module = THIS_MODULE,
-                               .cra_init = sun8i_ce_hash_crainit,
-                               .cra_exit = sun8i_ce_hash_craexit,
                        }
                }
-       }
+       },
+       .alg.hash.op = {
+               .do_one_request = sun8i_ce_hash_run,
+       },
+
 },
 {      .type = CRYPTO_ALG_TYPE_AHASH,
        .ce_algo_id = CE_ID_HASH_SHA1,
-       .alg.hash = {
+       .alg.hash.base = {
                .init = sun8i_ce_hash_init,
                .update = sun8i_ce_hash_update,
                .final = sun8i_ce_hash_final,
@@ -420,6 +439,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                .digest = sun8i_ce_hash_digest,
                .export = sun8i_ce_hash_export,
                .import = sun8i_ce_hash_import,
+               .init_tfm = sun8i_ce_hash_init_tfm,
+               .exit_tfm = sun8i_ce_hash_exit_tfm,
                .halg = {
                        .digestsize = SHA1_DIGEST_SIZE,
                        .statesize = sizeof(struct sha1_state),
@@ -434,15 +455,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                                .cra_blocksize = SHA1_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
                                .cra_module = THIS_MODULE,
-                               .cra_init = sun8i_ce_hash_crainit,
-                               .cra_exit = sun8i_ce_hash_craexit,
                        }
                }
-       }
+       },
+       .alg.hash.op = {
+               .do_one_request = sun8i_ce_hash_run,
+       },
 },
 {      .type = CRYPTO_ALG_TYPE_AHASH,
        .ce_algo_id = CE_ID_HASH_SHA224,
-       .alg.hash = {
+       .alg.hash.base = {
                .init = sun8i_ce_hash_init,
                .update = sun8i_ce_hash_update,
                .final = sun8i_ce_hash_final,
@@ -450,6 +472,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                .digest = sun8i_ce_hash_digest,
                .export = sun8i_ce_hash_export,
                .import = sun8i_ce_hash_import,
+               .init_tfm = sun8i_ce_hash_init_tfm,
+               .exit_tfm = sun8i_ce_hash_exit_tfm,
                .halg = {
                        .digestsize = SHA224_DIGEST_SIZE,
                        .statesize = sizeof(struct sha256_state),
@@ -464,15 +488,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                                .cra_blocksize = SHA224_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
                                .cra_module = THIS_MODULE,
-                               .cra_init = sun8i_ce_hash_crainit,
-                               .cra_exit = sun8i_ce_hash_craexit,
                        }
                }
-       }
+       },
+       .alg.hash.op = {
+               .do_one_request = sun8i_ce_hash_run,
+       },
 },
 {      .type = CRYPTO_ALG_TYPE_AHASH,
        .ce_algo_id = CE_ID_HASH_SHA256,
-       .alg.hash = {
+       .alg.hash.base = {
                .init = sun8i_ce_hash_init,
                .update = sun8i_ce_hash_update,
                .final = sun8i_ce_hash_final,
@@ -480,6 +505,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                .digest = sun8i_ce_hash_digest,
                .export = sun8i_ce_hash_export,
                .import = sun8i_ce_hash_import,
+               .init_tfm = sun8i_ce_hash_init_tfm,
+               .exit_tfm = sun8i_ce_hash_exit_tfm,
                .halg = {
                        .digestsize = SHA256_DIGEST_SIZE,
                        .statesize = sizeof(struct sha256_state),
@@ -494,15 +521,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                                .cra_blocksize = SHA256_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
                                .cra_module = THIS_MODULE,
-                               .cra_init = sun8i_ce_hash_crainit,
-                               .cra_exit = sun8i_ce_hash_craexit,
                        }
                }
-       }
+       },
+       .alg.hash.op = {
+               .do_one_request = sun8i_ce_hash_run,
+       },
 },
 {      .type = CRYPTO_ALG_TYPE_AHASH,
        .ce_algo_id = CE_ID_HASH_SHA384,
-       .alg.hash = {
+       .alg.hash.base = {
                .init = sun8i_ce_hash_init,
                .update = sun8i_ce_hash_update,
                .final = sun8i_ce_hash_final,
@@ -510,6 +538,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                .digest = sun8i_ce_hash_digest,
                .export = sun8i_ce_hash_export,
                .import = sun8i_ce_hash_import,
+               .init_tfm = sun8i_ce_hash_init_tfm,
+               .exit_tfm = sun8i_ce_hash_exit_tfm,
                .halg = {
                        .digestsize = SHA384_DIGEST_SIZE,
                        .statesize = sizeof(struct sha512_state),
@@ -524,15 +554,16 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                                .cra_blocksize = SHA384_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
                                .cra_module = THIS_MODULE,
-                               .cra_init = sun8i_ce_hash_crainit,
-                               .cra_exit = sun8i_ce_hash_craexit,
                        }
                }
-       }
+       },
+       .alg.hash.op = {
+               .do_one_request = sun8i_ce_hash_run,
+       },
 },
 {      .type = CRYPTO_ALG_TYPE_AHASH,
        .ce_algo_id = CE_ID_HASH_SHA512,
-       .alg.hash = {
+       .alg.hash.base = {
                .init = sun8i_ce_hash_init,
                .update = sun8i_ce_hash_update,
                .final = sun8i_ce_hash_final,
@@ -540,6 +571,8 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                .digest = sun8i_ce_hash_digest,
                .export = sun8i_ce_hash_export,
                .import = sun8i_ce_hash_import,
+               .init_tfm = sun8i_ce_hash_init_tfm,
+               .exit_tfm = sun8i_ce_hash_exit_tfm,
                .halg = {
                        .digestsize = SHA512_DIGEST_SIZE,
                        .statesize = sizeof(struct sha512_state),
@@ -554,11 +587,12 @@ static struct sun8i_ce_alg_template ce_algs[] = {
                                .cra_blocksize = SHA512_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx),
                                .cra_module = THIS_MODULE,
-                               .cra_init = sun8i_ce_hash_crainit,
-                               .cra_exit = sun8i_ce_hash_craexit,
                        }
                }
-       }
+       },
+       .alg.hash.op = {
+               .do_one_request = sun8i_ce_hash_run,
+       },
 },
 #endif
 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG
@@ -582,14 +616,18 @@ static struct sun8i_ce_alg_template ce_algs[] = {
 #endif
 };
 
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
 static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
 {
-       struct sun8i_ce_dev *ce = seq->private;
+       struct sun8i_ce_dev *ce __maybe_unused = seq->private;
        unsigned int i;
 
        for (i = 0; i < MAXFLOW; i++)
-               seq_printf(seq, "Channel %d: nreq %lu\n", i, ce->chanlist[i].stat_req);
+               seq_printf(seq, "Channel %d: nreq %lu\n", i,
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
+                          ce->chanlist[i].stat_req);
+#else
+                          0ul);
+#endif
 
        for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
                if (!ce_algs[i].ce)
@@ -597,8 +635,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
                switch (ce_algs[i].type) {
                case CRYPTO_ALG_TYPE_SKCIPHER:
                        seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
-                                  ce_algs[i].alg.skcipher.base.cra_driver_name,
-                                  ce_algs[i].alg.skcipher.base.cra_name,
+                                  ce_algs[i].alg.skcipher.base.base.cra_driver_name,
+                                  ce_algs[i].alg.skcipher.base.base.cra_name,
                                   ce_algs[i].stat_req, ce_algs[i].stat_fb);
                        seq_printf(seq, "\tLast fallback is: %s\n",
                                   ce_algs[i].fbname);
@@ -621,8 +659,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
                        break;
                case CRYPTO_ALG_TYPE_AHASH:
                        seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
-                                  ce_algs[i].alg.hash.halg.base.cra_driver_name,
-                                  ce_algs[i].alg.hash.halg.base.cra_name,
+                                  ce_algs[i].alg.hash.base.halg.base.cra_driver_name,
+                                  ce_algs[i].alg.hash.base.halg.base.cra_name,
                                   ce_algs[i].stat_req, ce_algs[i].stat_fb);
                        seq_printf(seq, "\tLast fallback is: %s\n",
                                   ce_algs[i].fbname);
@@ -643,7 +681,8 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
                        break;
                }
        }
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG
+#if defined(CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG) && \
+    defined(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)
        seq_printf(seq, "HWRNG %lu %lu\n",
                   ce->hwrng_stat_req, ce->hwrng_stat_bytes);
 #endif
@@ -651,7 +690,6 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v)
 }
 
 DEFINE_SHOW_ATTRIBUTE(sun8i_ce_debugfs);
-#endif
 
 static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i)
 {
@@ -839,7 +877,7 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
                        if (ce_method == CE_ID_NOTSUPP) {
                                dev_dbg(ce->dev,
                                        "DEBUG: Algo of %s not supported\n",
-                                       ce_algs[i].alg.skcipher.base.cra_name);
+                                       ce_algs[i].alg.skcipher.base.base.cra_name);
                                ce_algs[i].ce = NULL;
                                break;
                        }
@@ -847,16 +885,16 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
                        ce_method = ce->variant->op_mode[id];
                        if (ce_method == CE_ID_NOTSUPP) {
                                dev_dbg(ce->dev, "DEBUG: Blockmode of %s not supported\n",
-                                       ce_algs[i].alg.skcipher.base.cra_name);
+                                       ce_algs[i].alg.skcipher.base.base.cra_name);
                                ce_algs[i].ce = NULL;
                                break;
                        }
                        dev_info(ce->dev, "Register %s\n",
-                                ce_algs[i].alg.skcipher.base.cra_name);
-                       err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
+                                ce_algs[i].alg.skcipher.base.base.cra_name);
+                       err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher);
                        if (err) {
                                dev_err(ce->dev, "ERROR: Fail to register %s\n",
-                                       ce_algs[i].alg.skcipher.base.cra_name);
+                                       ce_algs[i].alg.skcipher.base.base.cra_name);
                                ce_algs[i].ce = NULL;
                                return err;
                        }
@@ -867,16 +905,16 @@ static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce)
                        if (ce_method == CE_ID_NOTSUPP) {
                                dev_info(ce->dev,
                                         "DEBUG: Algo of %s not supported\n",
-                                        ce_algs[i].alg.hash.halg.base.cra_name);
+                                        ce_algs[i].alg.hash.base.halg.base.cra_name);
                                ce_algs[i].ce = NULL;
                                break;
                        }
                        dev_info(ce->dev, "Register %s\n",
-                                ce_algs[i].alg.hash.halg.base.cra_name);
-                       err = crypto_register_ahash(&ce_algs[i].alg.hash);
+                                ce_algs[i].alg.hash.base.halg.base.cra_name);
+                       err = crypto_engine_register_ahash(&ce_algs[i].alg.hash);
                        if (err) {
                                dev_err(ce->dev, "ERROR: Fail to register %s\n",
-                                       ce_algs[i].alg.hash.halg.base.cra_name);
+                                       ce_algs[i].alg.hash.base.halg.base.cra_name);
                                ce_algs[i].ce = NULL;
                                return err;
                        }
@@ -916,13 +954,13 @@ static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce)
                switch (ce_algs[i].type) {
                case CRYPTO_ALG_TYPE_SKCIPHER:
                        dev_info(ce->dev, "Unregister %d %s\n", i,
-                                ce_algs[i].alg.skcipher.base.cra_name);
-                       crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
+                                ce_algs[i].alg.skcipher.base.base.cra_name);
+                       crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher);
                        break;
                case CRYPTO_ALG_TYPE_AHASH:
                        dev_info(ce->dev, "Unregister %d %s\n", i,
-                                ce_algs[i].alg.hash.halg.base.cra_name);
-                       crypto_unregister_ahash(&ce_algs[i].alg.hash);
+                                ce_algs[i].alg.hash.base.halg.base.cra_name);
+                       crypto_engine_unregister_ahash(&ce_algs[i].alg.hash);
                        break;
                case CRYPTO_ALG_TYPE_RNG:
                        dev_info(ce->dev, "Unregister %d %s\n", i,
@@ -1007,13 +1045,21 @@ static int sun8i_ce_probe(struct platform_device *pdev)
 
        pm_runtime_put_sync(ce->dev);
 
+       if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+               struct dentry *dbgfs_dir __maybe_unused;
+               struct dentry *dbgfs_stats __maybe_unused;
+
+               /* Ignore error of debugfs */
+               dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL);
+               dbgfs_stats = debugfs_create_file("stats", 0444,
+                                                 dbgfs_dir, ce,
+                                                 &sun8i_ce_debugfs_fops);
+
 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
-       /* Ignore error of debugfs */
-       ce->dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL);
-       ce->dbgfs_stats = debugfs_create_file("stats", 0444,
-                                             ce->dbgfs_dir, ce,
-                                             &sun8i_ce_debugfs_fops);
+               ce->dbgfs_dir = dbgfs_dir;
+               ce->dbgfs_stats = dbgfs_stats;
 #endif
+       }
 
        return 0;
 error_alg:
index 930ad15..d358334 100644 (file)
@@ -9,48 +9,46 @@
  *
  * You could find the datasheet in Documentation/arch/arm/sunxi.rst
  */
+
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
 #include <linux/bottom_half.h>
 #include <linux/dma-mapping.h>
+#include <linux/kernel.h>
 #include <linux/pm_runtime.h>
 #include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/md5.h>
+#include <linux/slab.h>
+#include <linux/string.h>
 #include "sun8i-ce.h"
 
-int sun8i_ce_hash_crainit(struct crypto_tfm *tfm)
+int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
 {
-       struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+       struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
+       struct ahash_alg *alg = crypto_ahash_alg(tfm);
        struct sun8i_ce_alg_template *algt;
        int err;
 
-       memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx));
-
-       algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+       algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
        op->ce = algt->ce;
 
-       op->enginectx.op.do_one_request = sun8i_ce_hash_run;
-       op->enginectx.op.prepare_request = NULL;
-       op->enginectx.op.unprepare_request = NULL;
-
        /* FALLBACK */
-       op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
+       op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
                                              CRYPTO_ALG_NEED_FALLBACK);
        if (IS_ERR(op->fallback_tfm)) {
                dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
                return PTR_ERR(op->fallback_tfm);
        }
 
-       if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
-               algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
+       crypto_ahash_set_statesize(tfm,
+                                  crypto_ahash_statesize(op->fallback_tfm));
 
-       crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+       crypto_ahash_set_reqsize(tfm,
                                 sizeof(struct sun8i_ce_hash_reqctx) +
                                 crypto_ahash_reqsize(op->fallback_tfm));
 
-       memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base),
+       memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
               CRYPTO_MAX_ALG_NAME);
 
        err = pm_runtime_get_sync(op->ce->dev);
@@ -63,9 +61,9 @@ error_pm:
        return err;
 }
 
-void sun8i_ce_hash_craexit(struct crypto_tfm *tfm)
+void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm)
 {
-       struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
+       struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
 
        crypto_free_ahash(tfmctx->fallback_tfm);
        pm_runtime_put_sync_suspend(tfmctx->ce->dev);
@@ -114,20 +112,22 @@ int sun8i_ce_hash_final(struct ahash_request *areq)
        struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
        struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
-       struct sun8i_ce_alg_template *algt;
-#endif
 
        ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
        rctx->fallback_req.base.flags = areq->base.flags &
                                        CRYPTO_TFM_REQ_MAY_SLEEP;
        rctx->fallback_req.result = areq->result;
 
+       if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+               struct sun8i_ce_alg_template *algt __maybe_unused;
+               struct ahash_alg *alg = crypto_ahash_alg(tfm);
+
+               algt = container_of(alg, struct sun8i_ce_alg_template,
+                                   alg.hash.base);
 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
-       algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
-       algt->stat_fb++;
+               algt->stat_fb++;
 #endif
+       }
 
        return crypto_ahash_final(&rctx->fallback_req);
 }
@@ -152,10 +152,6 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
        struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
        struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
-       struct sun8i_ce_alg_template *algt;
-#endif
 
        ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
        rctx->fallback_req.base.flags = areq->base.flags &
@@ -164,10 +160,17 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
        rctx->fallback_req.nbytes = areq->nbytes;
        rctx->fallback_req.src = areq->src;
        rctx->fallback_req.result = areq->result;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+               struct sun8i_ce_alg_template *algt __maybe_unused;
+               struct ahash_alg *alg = crypto_ahash_alg(tfm);
+
+               algt = container_of(alg, struct sun8i_ce_alg_template,
+                                   alg.hash.base);
 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
-       algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
-       algt->stat_fb++;
+               algt->stat_fb++;
 #endif
+       }
 
        return crypto_ahash_finup(&rctx->fallback_req);
 }
@@ -177,10 +180,6 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
        struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
        struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
-       struct sun8i_ce_alg_template *algt;
-#endif
 
        ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
        rctx->fallback_req.base.flags = areq->base.flags &
@@ -189,10 +188,17 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
        rctx->fallback_req.nbytes = areq->nbytes;
        rctx->fallback_req.src = areq->src;
        rctx->fallback_req.result = areq->result;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
+               struct sun8i_ce_alg_template *algt __maybe_unused;
+               struct ahash_alg *alg = crypto_ahash_alg(tfm);
+
+               algt = container_of(alg, struct sun8i_ce_alg_template,
+                                   alg.hash.base);
 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
-       algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
-       algt->stat_fb++;
+               algt->stat_fb++;
 #endif
+       }
 
        return crypto_ahash_digest(&rctx->fallback_req);
 }
@@ -204,7 +210,7 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
        struct sun8i_ce_alg_template *algt;
        struct scatterlist *sg;
 
-       algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+       algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
 
        if (areq->nbytes == 0) {
                algt->stat_fb_len0++;
@@ -253,7 +259,7 @@ int sun8i_ce_hash_digest(struct ahash_request *areq)
                        return sun8i_ce_hash_digest_fb(areq);
        }
 
-       algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+       algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
        ce = algt->ce;
 
        e = sun8i_ce_get_engine_number(ce);
@@ -345,11 +351,11 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
        dma_addr_t addr_res, addr_pad;
        int ns = sg_nents_for_len(areq->src, areq->nbytes);
 
-       algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
+       algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
        ce = algt->ce;
 
-       bs = algt->alg.hash.halg.base.cra_blocksize;
-       digestsize = algt->alg.hash.halg.digestsize;
+       bs = algt->alg.hash.base.halg.base.cra_blocksize;
+       digestsize = algt->alg.hash.base.halg.digestsize;
        if (digestsize == SHA224_DIGEST_SIZE)
                digestsize = SHA256_DIGEST_SIZE;
        if (digestsize == SHA384_DIGEST_SIZE)
@@ -454,14 +460,14 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
 
        chan->timeout = areq->nbytes;
 
-       err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
+       err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
 
        dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
        dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
        dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
 
 
-       memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
+       memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize);
 theend:
        kfree(buf);
        kfree(result);
index 27029fb..93d4985 100644 (file)
@@ -265,14 +265,12 @@ struct sun8i_cipher_req_ctx {
 
 /*
  * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
- * @enginectx:         crypto_engine used by this TFM
  * @key:               pointer to key data
  * @keylen:            len of the key
  * @ce:                        pointer to the private data of driver handling this TFM
  * @fallback_tfm:      pointer to the fallback TFM
  */
 struct sun8i_cipher_tfm_ctx {
-       struct crypto_engine_ctx enginectx;
        u32 *key;
        u32 keylen;
        struct sun8i_ce_dev *ce;
@@ -281,12 +279,10 @@ struct sun8i_cipher_tfm_ctx {
 
 /*
  * struct sun8i_ce_hash_tfm_ctx - context for an ahash TFM
- * @enginectx:         crypto_engine used by this TFM
  * @ce:                        pointer to the private data of driver handling this TFM
  * @fallback_tfm:      pointer to the fallback TFM
  */
 struct sun8i_ce_hash_tfm_ctx {
-       struct crypto_engine_ctx enginectx;
        struct sun8i_ce_dev *ce;
        struct crypto_ahash *fallback_tfm;
 };
@@ -329,8 +325,8 @@ struct sun8i_ce_alg_template {
        u32 ce_blockmode;
        struct sun8i_ce_dev *ce;
        union {
-               struct skcipher_alg skcipher;
-               struct ahash_alg hash;
+               struct skcipher_engine_alg skcipher;
+               struct ahash_engine_alg hash;
                struct rng_alg rng;
        } alg;
        unsigned long stat_req;
@@ -347,14 +343,13 @@ struct sun8i_ce_alg_template {
        char fbname[CRYPTO_MAX_ALG_NAME];
 };
 
-int sun8i_ce_enqueue(struct crypto_async_request *areq, u32 type);
-
 int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
                        unsigned int keylen);
 int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
                         unsigned int keylen);
 int sun8i_ce_cipher_init(struct crypto_tfm *tfm);
 void sun8i_ce_cipher_exit(struct crypto_tfm *tfm);
+int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq);
 int sun8i_ce_skdecrypt(struct skcipher_request *areq);
 int sun8i_ce_skencrypt(struct skcipher_request *areq);
 
@@ -362,12 +357,11 @@ int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce);
 
 int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name);
 
-int sun8i_ce_hash_crainit(struct crypto_tfm *tfm);
-void sun8i_ce_hash_craexit(struct crypto_tfm *tfm);
+int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm);
+void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm);
 int sun8i_ce_hash_init(struct ahash_request *areq);
 int sun8i_ce_hash_export(struct ahash_request *areq, void *out);
 int sun8i_ce_hash_import(struct ahash_request *areq, const void *in);
-int sun8i_ce_hash(struct ahash_request *areq);
 int sun8i_ce_hash_final(struct ahash_request *areq);
 int sun8i_ce_hash_update(struct ahash_request *areq);
 int sun8i_ce_hash_finup(struct ahash_request *areq);
index 381a90f..7fa3597 100644 (file)
@@ -24,7 +24,7 @@ static bool sun8i_ss_need_fallback(struct skcipher_request *areq)
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-       struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+       struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base);
        struct scatterlist *in_sg = areq->src;
        struct scatterlist *out_sg = areq->dst;
        struct scatterlist *sg;
@@ -93,13 +93,18 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
        struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
        int err;
 
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
-       struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-       struct sun8i_ss_alg_template *algt;
+       if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+               struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+               struct sun8i_ss_alg_template *algt __maybe_unused;
+
+               algt = container_of(alg, struct sun8i_ss_alg_template,
+                                   alg.skcipher.base);
 
-       algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
-       algt->stat_fb++;
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+               algt->stat_fb++;
 #endif
+       }
+
        skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
        skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
                                      areq->base.complete, areq->base.data);
@@ -193,7 +198,7 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
        int nsgd = sg_nents_for_len(areq->dst, areq->cryptlen);
        int i;
 
-       algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+       algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base);
 
        dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
                crypto_tfm_alg_name(areq->base.tfm),
@@ -324,7 +329,7 @@ theend:
        return err;
 }
 
-static int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq)
+int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq)
 {
        int err;
        struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
@@ -390,7 +395,7 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
 
        memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
 
-       algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher);
+       algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base);
        op->ss = algt->ss;
 
        op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -408,10 +413,6 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
               crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
               CRYPTO_MAX_ALG_NAME);
 
-       op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request;
-       op->enginectx.op.prepare_request = NULL;
-       op->enginectx.op.unprepare_request = NULL;
-
        err = pm_runtime_resume_and_get(op->ss->dev);
        if (err < 0) {
                dev_err(op->ss->dev, "pm error %d\n", err);
index 3dd844b..4a95872 100644 (file)
@@ -9,22 +9,23 @@
  *
  * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst
  */
+
+#include <crypto/engine.h>
+#include <crypto/internal/rng.h>
+#include <crypto/internal/skcipher.h>
 #include <linux/clk.h>
-#include <linux/crypto.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
+#include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
-#include <crypto/internal/rng.h>
-#include <crypto/internal/skcipher.h>
 
 #include "sun8i-ss.h"
 
@@ -168,7 +169,7 @@ static struct sun8i_ss_alg_template ss_algs[] = {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
        .ss_algo_id = SS_ID_CIPHER_AES,
        .ss_blockmode = SS_ID_OP_CBC,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base = {
                        .cra_name = "cbc(aes)",
                        .cra_driver_name = "cbc-aes-sun8i-ss",
@@ -189,13 +190,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
                .setkey         = sun8i_ss_aes_setkey,
                .encrypt        = sun8i_ss_skencrypt,
                .decrypt        = sun8i_ss_skdecrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = sun8i_ss_handle_cipher_request,
+       },
 },
 {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
        .ss_algo_id = SS_ID_CIPHER_AES,
        .ss_blockmode = SS_ID_OP_ECB,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base = {
                        .cra_name = "ecb(aes)",
                        .cra_driver_name = "ecb-aes-sun8i-ss",
@@ -215,13 +219,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
                .setkey         = sun8i_ss_aes_setkey,
                .encrypt        = sun8i_ss_skencrypt,
                .decrypt        = sun8i_ss_skdecrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = sun8i_ss_handle_cipher_request,
+       },
 },
 {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
        .ss_algo_id = SS_ID_CIPHER_DES3,
        .ss_blockmode = SS_ID_OP_CBC,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base = {
                        .cra_name = "cbc(des3_ede)",
                        .cra_driver_name = "cbc-des3-sun8i-ss",
@@ -242,13 +249,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
                .setkey         = sun8i_ss_des3_setkey,
                .encrypt        = sun8i_ss_skencrypt,
                .decrypt        = sun8i_ss_skdecrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = sun8i_ss_handle_cipher_request,
+       },
 },
 {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
        .ss_algo_id = SS_ID_CIPHER_DES3,
        .ss_blockmode = SS_ID_OP_ECB,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base = {
                        .cra_name = "ecb(des3_ede)",
                        .cra_driver_name = "ecb-des3-sun8i-ss",
@@ -268,7 +278,10 @@ static struct sun8i_ss_alg_template ss_algs[] = {
                .setkey         = sun8i_ss_des3_setkey,
                .encrypt        = sun8i_ss_skencrypt,
                .decrypt        = sun8i_ss_skdecrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = sun8i_ss_handle_cipher_request,
+       },
 },
 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG
 {
@@ -292,7 +305,7 @@ static struct sun8i_ss_alg_template ss_algs[] = {
 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_HASH
 {      .type = CRYPTO_ALG_TYPE_AHASH,
        .ss_algo_id = SS_ID_HASH_MD5,
-       .alg.hash = {
+       .alg.hash.base = {
                .init = sun8i_ss_hash_init,
                .update = sun8i_ss_hash_update,
                .final = sun8i_ss_hash_final,
@@ -300,6 +313,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
                .digest = sun8i_ss_hash_digest,
                .export = sun8i_ss_hash_export,
                .import = sun8i_ss_hash_import,
+               .init_tfm = sun8i_ss_hash_init_tfm,
+               .exit_tfm = sun8i_ss_hash_exit_tfm,
                .halg = {
                        .digestsize = MD5_DIGEST_SIZE,
                        .statesize = sizeof(struct md5_state),
@@ -314,15 +329,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
                                .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
                                .cra_module = THIS_MODULE,
-                               .cra_init = sun8i_ss_hash_crainit,
-                               .cra_exit = sun8i_ss_hash_craexit,
                        }
                }
-       }
+       },
+       .alg.hash.op = {
+               .do_one_request = sun8i_ss_hash_run,
+       },
 },
 {      .type = CRYPTO_ALG_TYPE_AHASH,
        .ss_algo_id = SS_ID_HASH_SHA1,
-       .alg.hash = {
+       .alg.hash.base = {
                .init = sun8i_ss_hash_init,
                .update = sun8i_ss_hash_update,
                .final = sun8i_ss_hash_final,
@@ -330,6 +346,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
                .digest = sun8i_ss_hash_digest,
                .export = sun8i_ss_hash_export,
                .import = sun8i_ss_hash_import,
+               .init_tfm = sun8i_ss_hash_init_tfm,
+               .exit_tfm = sun8i_ss_hash_exit_tfm,
                .halg = {
                        .digestsize = SHA1_DIGEST_SIZE,
                        .statesize = sizeof(struct sha1_state),
@@ -344,15 +362,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
                                .cra_blocksize = SHA1_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
                                .cra_module = THIS_MODULE,
-                               .cra_init = sun8i_ss_hash_crainit,
-                               .cra_exit = sun8i_ss_hash_craexit,
                        }
                }
-       }
+       },
+       .alg.hash.op = {
+               .do_one_request = sun8i_ss_hash_run,
+       },
 },
 {      .type = CRYPTO_ALG_TYPE_AHASH,
        .ss_algo_id = SS_ID_HASH_SHA224,
-       .alg.hash = {
+       .alg.hash.base = {
                .init = sun8i_ss_hash_init,
                .update = sun8i_ss_hash_update,
                .final = sun8i_ss_hash_final,
@@ -360,6 +379,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
                .digest = sun8i_ss_hash_digest,
                .export = sun8i_ss_hash_export,
                .import = sun8i_ss_hash_import,
+               .init_tfm = sun8i_ss_hash_init_tfm,
+               .exit_tfm = sun8i_ss_hash_exit_tfm,
                .halg = {
                        .digestsize = SHA224_DIGEST_SIZE,
                        .statesize = sizeof(struct sha256_state),
@@ -374,15 +395,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
                                .cra_blocksize = SHA224_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
                                .cra_module = THIS_MODULE,
-                               .cra_init = sun8i_ss_hash_crainit,
-                               .cra_exit = sun8i_ss_hash_craexit,
                        }
                }
-       }
+       },
+       .alg.hash.op = {
+               .do_one_request = sun8i_ss_hash_run,
+       },
 },
 {      .type = CRYPTO_ALG_TYPE_AHASH,
        .ss_algo_id = SS_ID_HASH_SHA256,
-       .alg.hash = {
+       .alg.hash.base = {
                .init = sun8i_ss_hash_init,
                .update = sun8i_ss_hash_update,
                .final = sun8i_ss_hash_final,
@@ -390,6 +412,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
                .digest = sun8i_ss_hash_digest,
                .export = sun8i_ss_hash_export,
                .import = sun8i_ss_hash_import,
+               .init_tfm = sun8i_ss_hash_init_tfm,
+               .exit_tfm = sun8i_ss_hash_exit_tfm,
                .halg = {
                        .digestsize = SHA256_DIGEST_SIZE,
                        .statesize = sizeof(struct sha256_state),
@@ -404,15 +428,16 @@ static struct sun8i_ss_alg_template ss_algs[] = {
                                .cra_blocksize = SHA256_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
                                .cra_module = THIS_MODULE,
-                               .cra_init = sun8i_ss_hash_crainit,
-                               .cra_exit = sun8i_ss_hash_craexit,
                        }
                }
-       }
+       },
+       .alg.hash.op = {
+               .do_one_request = sun8i_ss_hash_run,
+       },
 },
 {      .type = CRYPTO_ALG_TYPE_AHASH,
        .ss_algo_id = SS_ID_HASH_SHA1,
-       .alg.hash = {
+       .alg.hash.base = {
                .init = sun8i_ss_hash_init,
                .update = sun8i_ss_hash_update,
                .final = sun8i_ss_hash_final,
@@ -420,6 +445,8 @@ static struct sun8i_ss_alg_template ss_algs[] = {
                .digest = sun8i_ss_hash_digest,
                .export = sun8i_ss_hash_export,
                .import = sun8i_ss_hash_import,
+               .init_tfm = sun8i_ss_hash_init_tfm,
+               .exit_tfm = sun8i_ss_hash_exit_tfm,
                .setkey = sun8i_ss_hmac_setkey,
                .halg = {
                        .digestsize = SHA1_DIGEST_SIZE,
@@ -435,23 +462,28 @@ static struct sun8i_ss_alg_template ss_algs[] = {
                                .cra_blocksize = SHA1_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx),
                                .cra_module = THIS_MODULE,
-                               .cra_init = sun8i_ss_hash_crainit,
-                               .cra_exit = sun8i_ss_hash_craexit,
                        }
                }
-       }
+       },
+       .alg.hash.op = {
+               .do_one_request = sun8i_ss_hash_run,
+       },
 },
 #endif
 };
 
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
 static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
 {
-       struct sun8i_ss_dev *ss = seq->private;
+       struct sun8i_ss_dev *ss __maybe_unused = seq->private;
        unsigned int i;
 
        for (i = 0; i < MAXFLOW; i++)
-               seq_printf(seq, "Channel %d: nreq %lu\n", i, ss->flows[i].stat_req);
+               seq_printf(seq, "Channel %d: nreq %lu\n", i,
+#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
+                          ss->flows[i].stat_req);
+#else
+                          0ul);
+#endif
 
        for (i = 0; i < ARRAY_SIZE(ss_algs); i++) {
                if (!ss_algs[i].ss)
@@ -459,8 +491,8 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
                switch (ss_algs[i].type) {
                case CRYPTO_ALG_TYPE_SKCIPHER:
                        seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
-                                  ss_algs[i].alg.skcipher.base.cra_driver_name,
-                                  ss_algs[i].alg.skcipher.base.cra_name,
+                                  ss_algs[i].alg.skcipher.base.base.cra_driver_name,
+                                  ss_algs[i].alg.skcipher.base.base.cra_name,
                                   ss_algs[i].stat_req, ss_algs[i].stat_fb);
 
                        seq_printf(seq, "\tLast fallback is: %s\n",
@@ -482,8 +514,8 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
                        break;
                case CRYPTO_ALG_TYPE_AHASH:
                        seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
-                                  ss_algs[i].alg.hash.halg.base.cra_driver_name,
-                                  ss_algs[i].alg.hash.halg.base.cra_name,
+                                  ss_algs[i].alg.hash.base.halg.base.cra_driver_name,
+                                  ss_algs[i].alg.hash.base.halg.base.cra_name,
                                   ss_algs[i].stat_req, ss_algs[i].stat_fb);
                        seq_printf(seq, "\tLast fallback is: %s\n",
                                   ss_algs[i].fbname);
@@ -502,7 +534,6 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v)
 }
 
 DEFINE_SHOW_ATTRIBUTE(sun8i_ss_debugfs);
-#endif
 
 static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i)
 {
@@ -659,7 +690,7 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
                        if (ss_method == SS_ID_NOTSUPP) {
                                dev_info(ss->dev,
                                         "DEBUG: Algo of %s not supported\n",
-                                        ss_algs[i].alg.skcipher.base.cra_name);
+                                        ss_algs[i].alg.skcipher.base.base.cra_name);
                                ss_algs[i].ss = NULL;
                                break;
                        }
@@ -667,16 +698,16 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
                        ss_method = ss->variant->op_mode[id];
                        if (ss_method == SS_ID_NOTSUPP) {
                                dev_info(ss->dev, "DEBUG: Blockmode of %s not supported\n",
-                                        ss_algs[i].alg.skcipher.base.cra_name);
+                                        ss_algs[i].alg.skcipher.base.base.cra_name);
                                ss_algs[i].ss = NULL;
                                break;
                        }
                        dev_info(ss->dev, "DEBUG: Register %s\n",
-                                ss_algs[i].alg.skcipher.base.cra_name);
-                       err = crypto_register_skcipher(&ss_algs[i].alg.skcipher);
+                                ss_algs[i].alg.skcipher.base.base.cra_name);
+                       err = crypto_engine_register_skcipher(&ss_algs[i].alg.skcipher);
                        if (err) {
                                dev_err(ss->dev, "Fail to register %s\n",
-                                       ss_algs[i].alg.skcipher.base.cra_name);
+                                       ss_algs[i].alg.skcipher.base.base.cra_name);
                                ss_algs[i].ss = NULL;
                                return err;
                        }
@@ -695,16 +726,16 @@ static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss)
                        if (ss_method == SS_ID_NOTSUPP) {
                                dev_info(ss->dev,
                                        "DEBUG: Algo of %s not supported\n",
-                                       ss_algs[i].alg.hash.halg.base.cra_name);
+                                       ss_algs[i].alg.hash.base.halg.base.cra_name);
                                ss_algs[i].ss = NULL;
                                break;
                        }
                        dev_info(ss->dev, "Register %s\n",
-                                ss_algs[i].alg.hash.halg.base.cra_name);
-                       err = crypto_register_ahash(&ss_algs[i].alg.hash);
+                                ss_algs[i].alg.hash.base.halg.base.cra_name);
+                       err = crypto_engine_register_ahash(&ss_algs[i].alg.hash);
                        if (err) {
                                dev_err(ss->dev, "ERROR: Fail to register %s\n",
-                                       ss_algs[i].alg.hash.halg.base.cra_name);
+                                       ss_algs[i].alg.hash.base.halg.base.cra_name);
                                ss_algs[i].ss = NULL;
                                return err;
                        }
@@ -727,8 +758,8 @@ static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
                switch (ss_algs[i].type) {
                case CRYPTO_ALG_TYPE_SKCIPHER:
                        dev_info(ss->dev, "Unregister %d %s\n", i,
-                                ss_algs[i].alg.skcipher.base.cra_name);
-                       crypto_unregister_skcipher(&ss_algs[i].alg.skcipher);
+                                ss_algs[i].alg.skcipher.base.base.cra_name);
+                       crypto_engine_unregister_skcipher(&ss_algs[i].alg.skcipher);
                        break;
                case CRYPTO_ALG_TYPE_RNG:
                        dev_info(ss->dev, "Unregister %d %s\n", i,
@@ -737,8 +768,8 @@ static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss)
                        break;
                case CRYPTO_ALG_TYPE_AHASH:
                        dev_info(ss->dev, "Unregister %d %s\n", i,
-                                ss_algs[i].alg.hash.halg.base.cra_name);
-                       crypto_unregister_ahash(&ss_algs[i].alg.hash);
+                                ss_algs[i].alg.hash.base.halg.base.cra_name);
+                       crypto_engine_unregister_ahash(&ss_algs[i].alg.hash);
                        break;
                }
        }
@@ -851,13 +882,21 @@ static int sun8i_ss_probe(struct platform_device *pdev)
 
        pm_runtime_put_sync(ss->dev);
 
+       if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+               struct dentry *dbgfs_dir __maybe_unused;
+               struct dentry *dbgfs_stats __maybe_unused;
+
+               /* Ignore error of debugfs */
+               dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL);
+               dbgfs_stats = debugfs_create_file("stats", 0444,
+                                                  dbgfs_dir, ss,
+                                                  &sun8i_ss_debugfs_fops);
+
 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
-       /* Ignore error of debugfs */
-       ss->dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL);
-       ss->dbgfs_stats = debugfs_create_file("stats", 0444,
-                                             ss->dbgfs_dir, ss,
-                                             &sun8i_ss_debugfs_fops);
+               ss->dbgfs_dir = dbgfs_dir;
+               ss->dbgfs_stats = dbgfs_stats;
 #endif
+       }
 
        return 0;
 error_alg:
index a4b67d1..d70b105 100644 (file)
@@ -9,16 +9,21 @@
  *
  * You could find the datasheet in Documentation/arch/arm/sunxi.rst
  */
-#include <linux/bottom_half.h>
-#include <linux/dma-mapping.h>
-#include <linux/pm_runtime.h>
-#include <linux/scatterlist.h>
-#include <crypto/internal/hash.h>
+
 #include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/sha1.h>
 #include <crypto/sha2.h>
-#include <crypto/md5.h>
+#include <linux/bottom_half.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
 #include "sun8i-ss.h"
 
 static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
@@ -60,14 +65,11 @@ int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
                         unsigned int keylen)
 {
        struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash);
-       struct ahash_alg *alg = __crypto_ahash_alg(ahash->base.__crt_alg);
-       struct sun8i_ss_alg_template *algt;
        int digestsize, i;
        int bs = crypto_ahash_blocksize(ahash);
        int ret;
 
-       algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
-       digestsize = algt->alg.hash.halg.digestsize;
+       digestsize = crypto_ahash_digestsize(ahash);
 
        if (keylen > bs) {
                ret = sun8i_ss_hashkey(tfmctx, key, keylen);
@@ -107,38 +109,33 @@ err_opad:
        return ret;
 }
 
-int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
+int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm)
 {
-       struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
+       struct sun8i_ss_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
+       struct ahash_alg *alg = crypto_ahash_alg(tfm);
        struct sun8i_ss_alg_template *algt;
        int err;
 
-       memset(op, 0, sizeof(struct sun8i_ss_hash_tfm_ctx));
-
-       algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+       algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
        op->ss = algt->ss;
 
-       op->enginectx.op.do_one_request = sun8i_ss_hash_run;
-       op->enginectx.op.prepare_request = NULL;
-       op->enginectx.op.unprepare_request = NULL;
-
        /* FALLBACK */
-       op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
+       op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
                                              CRYPTO_ALG_NEED_FALLBACK);
        if (IS_ERR(op->fallback_tfm)) {
                dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
                return PTR_ERR(op->fallback_tfm);
        }
 
-       if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
-               algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
+       crypto_ahash_set_statesize(tfm,
+                                  crypto_ahash_statesize(op->fallback_tfm));
 
-       crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+       crypto_ahash_set_reqsize(tfm,
                                 sizeof(struct sun8i_ss_hash_reqctx) +
                                 crypto_ahash_reqsize(op->fallback_tfm));
 
-       memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), CRYPTO_MAX_ALG_NAME);
+       memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
+              CRYPTO_MAX_ALG_NAME);
 
        err = pm_runtime_get_sync(op->ss->dev);
        if (err < 0)
@@ -150,9 +147,9 @@ error_pm:
        return err;
 }
 
-void sun8i_ss_hash_craexit(struct crypto_tfm *tfm)
+void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm)
 {
-       struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
+       struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
 
        kfree_sensitive(tfmctx->ipad);
        kfree_sensitive(tfmctx->opad);
@@ -204,20 +201,23 @@ int sun8i_ss_hash_final(struct ahash_request *areq)
        struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
        struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
-       struct sun8i_ss_alg_template *algt;
-#endif
 
        ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
        rctx->fallback_req.base.flags = areq->base.flags &
                                        CRYPTO_TFM_REQ_MAY_SLEEP;
        rctx->fallback_req.result = areq->result;
 
+       if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+               struct ahash_alg *alg = crypto_ahash_alg(tfm);
+               struct sun8i_ss_alg_template *algt __maybe_unused;
+
+               algt = container_of(alg, struct sun8i_ss_alg_template,
+                                   alg.hash.base);
+
 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
-       algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
-       algt->stat_fb++;
+               algt->stat_fb++;
 #endif
+       }
 
        return crypto_ahash_final(&rctx->fallback_req);
 }
@@ -242,10 +242,6 @@ int sun8i_ss_hash_finup(struct ahash_request *areq)
        struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
        struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
-       struct sun8i_ss_alg_template *algt;
-#endif
 
        ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
        rctx->fallback_req.base.flags = areq->base.flags &
@@ -254,10 +250,18 @@ int sun8i_ss_hash_finup(struct ahash_request *areq)
        rctx->fallback_req.nbytes = areq->nbytes;
        rctx->fallback_req.src = areq->src;
        rctx->fallback_req.result = areq->result;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+               struct ahash_alg *alg = crypto_ahash_alg(tfm);
+               struct sun8i_ss_alg_template *algt __maybe_unused;
+
+               algt = container_of(alg, struct sun8i_ss_alg_template,
+                                   alg.hash.base);
+
 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
-       algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
-       algt->stat_fb++;
+               algt->stat_fb++;
 #endif
+       }
 
        return crypto_ahash_finup(&rctx->fallback_req);
 }
@@ -267,10 +271,6 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
        struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
        struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
-       struct sun8i_ss_alg_template *algt;
-#endif
 
        ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
        rctx->fallback_req.base.flags = areq->base.flags &
@@ -279,10 +279,18 @@ static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
        rctx->fallback_req.nbytes = areq->nbytes;
        rctx->fallback_req.src = areq->src;
        rctx->fallback_req.result = areq->result;
+
+       if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
+               struct ahash_alg *alg = crypto_ahash_alg(tfm);
+               struct sun8i_ss_alg_template *algt __maybe_unused;
+
+               algt = container_of(alg, struct sun8i_ss_alg_template,
+                                   alg.hash.base);
+
 #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
-       algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
-       algt->stat_fb++;
+               algt->stat_fb++;
 #endif
+       }
 
        return crypto_ahash_digest(&rctx->fallback_req);
 }
@@ -349,11 +357,11 @@ static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
 static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
+       struct ahash_alg *alg = crypto_ahash_alg(tfm);
        struct sun8i_ss_alg_template *algt;
        struct scatterlist *sg;
 
-       algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+       algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
 
        if (areq->nbytes == 0) {
                algt->stat_fb_len++;
@@ -398,8 +406,8 @@ static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
 int sun8i_ss_hash_digest(struct ahash_request *areq)
 {
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
        struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+       struct ahash_alg *alg = crypto_ahash_alg(tfm);
        struct sun8i_ss_alg_template *algt;
        struct sun8i_ss_dev *ss;
        struct crypto_engine *engine;
@@ -408,7 +416,7 @@ int sun8i_ss_hash_digest(struct ahash_request *areq)
        if (sun8i_ss_hash_need_fallback(areq))
                return sun8i_ss_hash_digest_fb(areq);
 
-       algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+       algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
        ss = algt->ss;
 
        e = sun8i_ss_get_engine_number(ss);
@@ -484,8 +492,8 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
        struct ahash_request *areq = container_of(breq, struct ahash_request, base);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
        struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
        struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
+       struct ahash_alg *alg = crypto_ahash_alg(tfm);
        struct sun8i_ss_alg_template *algt;
        struct sun8i_ss_dev *ss;
        struct scatterlist *sg;
@@ -504,10 +512,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
         */
        int hmac = 0;
 
-       algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
+       algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
        ss = algt->ss;
 
-       digestsize = algt->alg.hash.halg.digestsize;
+       digestsize = crypto_ahash_digestsize(tfm);
        if (digestsize == SHA224_DIGEST_SIZE)
                digestsize = SHA256_DIGEST_SIZE;
 
@@ -700,7 +708,7 @@ err_dma_result:
        }
 
        if (!err)
-               memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
+               memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
 theend:
        local_bh_disable();
        crypto_finalize_hash_request(engine, breq, err);
index df6f08f..ae66eb4 100644 (file)
@@ -201,16 +201,12 @@ struct sun8i_cipher_req_ctx {
 
 /*
  * struct sun8i_cipher_tfm_ctx - context for a skcipher TFM
- * @enginectx:         crypto_engine used by this TFM
  * @key:               pointer to key data
  * @keylen:            len of the key
  * @ss:                        pointer to the private data of driver handling this TFM
  * @fallback_tfm:      pointer to the fallback TFM
- *
- * enginectx must be the first element
  */
 struct sun8i_cipher_tfm_ctx {
-       struct crypto_engine_ctx enginectx;
        u32 *key;
        u32 keylen;
        struct sun8i_ss_dev *ss;
@@ -229,14 +225,10 @@ struct sun8i_ss_rng_tfm_ctx {
 
 /*
  * struct sun8i_ss_hash_tfm_ctx - context for an ahash TFM
- * @enginectx:         crypto_engine used by this TFM
  * @fallback_tfm:      pointer to the fallback TFM
  * @ss:                        pointer to the private data of driver handling this TFM
- *
- * enginectx must be the first element
  */
 struct sun8i_ss_hash_tfm_ctx {
-       struct crypto_engine_ctx enginectx;
        struct crypto_ahash *fallback_tfm;
        struct sun8i_ss_dev *ss;
        u8 *ipad;
@@ -279,9 +271,9 @@ struct sun8i_ss_alg_template {
        u32 ss_blockmode;
        struct sun8i_ss_dev *ss;
        union {
-               struct skcipher_alg skcipher;
+               struct skcipher_engine_alg skcipher;
                struct rng_alg rng;
-               struct ahash_alg hash;
+               struct ahash_engine_alg hash;
        } alg;
        unsigned long stat_req;
        unsigned long stat_fb;
@@ -293,14 +285,13 @@ struct sun8i_ss_alg_template {
        char fbname[CRYPTO_MAX_ALG_NAME];
 };
 
-int sun8i_ss_enqueue(struct crypto_async_request *areq, u32 type);
-
 int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
                        unsigned int keylen);
 int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
                         unsigned int keylen);
 int sun8i_ss_cipher_init(struct crypto_tfm *tfm);
 void sun8i_ss_cipher_exit(struct crypto_tfm *tfm);
+int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq);
 int sun8i_ss_skdecrypt(struct skcipher_request *areq);
 int sun8i_ss_skencrypt(struct skcipher_request *areq);
 
@@ -313,8 +304,8 @@ int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen
 int sun8i_ss_prng_init(struct crypto_tfm *tfm);
 void sun8i_ss_prng_exit(struct crypto_tfm *tfm);
 
-int sun8i_ss_hash_crainit(struct crypto_tfm *tfm);
-void sun8i_ss_hash_craexit(struct crypto_tfm *tfm);
+int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm);
+void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm);
 int sun8i_ss_hash_init(struct ahash_request *areq);
 int sun8i_ss_hash_export(struct ahash_request *areq, void *out);
 int sun8i_ss_hash_import(struct ahash_request *areq, const void *in);
index af017a0..3308406 100644 (file)
@@ -65,7 +65,7 @@ static int meson_cipher_do_fallback(struct skcipher_request *areq)
        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
        struct meson_alg_template *algt;
 
-       algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+       algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
        algt->stat_fb++;
 #endif
        skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
@@ -101,7 +101,7 @@ static int meson_cipher(struct skcipher_request *areq)
        void *backup_iv = NULL, *bkeyiv;
        u32 v;
 
-       algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+       algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
 
        dev_dbg(mc->dev, "%s %s %u %x IV(%u) key=%u flow=%d\n", __func__,
                crypto_tfm_alg_name(areq->base.tfm),
@@ -258,8 +258,7 @@ theend:
        return err;
 }
 
-static int meson_handle_cipher_request(struct crypto_engine *engine,
-                                      void *areq)
+int meson_handle_cipher_request(struct crypto_engine *engine, void *areq)
 {
        int err;
        struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
@@ -318,7 +317,7 @@ int meson_cipher_init(struct crypto_tfm *tfm)
 
        memset(op, 0, sizeof(struct meson_cipher_tfm_ctx));
 
-       algt = container_of(alg, struct meson_alg_template, alg.skcipher);
+       algt = container_of(alg, struct meson_alg_template, alg.skcipher.base);
        op->mc = algt->mc;
 
        op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -331,10 +330,6 @@ int meson_cipher_init(struct crypto_tfm *tfm)
        sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
                         crypto_skcipher_reqsize(op->fallback_tfm);
 
-       op->enginectx.op.do_one_request = meson_handle_cipher_request;
-       op->enginectx.op.prepare_request = NULL;
-       op->enginectx.op.unprepare_request = NULL;
-
        return 0;
 }
 
index 9371870..da6dfe0 100644 (file)
@@ -6,17 +6,19 @@
  *
  * Core file which registers crypto algorithms supported by the hardware.
  */
+
+#include <crypto/engine.h>
+#include <crypto/internal/skcipher.h>
 #include <linux/clk.h>
-#include <linux/crypto.h>
-#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/dma-mapping.h>
 
 #include "amlogic-gxl.h"
 
@@ -47,7 +49,7 @@ static struct meson_alg_template mc_algs[] = {
 {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
        .blockmode = MESON_OPMODE_CBC,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base = {
                        .cra_name = "cbc(aes)",
                        .cra_driver_name = "cbc-aes-gxl",
@@ -68,12 +70,15 @@ static struct meson_alg_template mc_algs[] = {
                .setkey         = meson_aes_setkey,
                .encrypt        = meson_skencrypt,
                .decrypt        = meson_skdecrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = meson_handle_cipher_request,
+       },
 },
 {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
        .blockmode = MESON_OPMODE_ECB,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base = {
                        .cra_name = "ecb(aes)",
                        .cra_driver_name = "ecb-aes-gxl",
@@ -93,33 +98,43 @@ static struct meson_alg_template mc_algs[] = {
                .setkey         = meson_aes_setkey,
                .encrypt        = meson_skencrypt,
                .decrypt        = meson_skdecrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = meson_handle_cipher_request,
+       },
 },
 };
 
-#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
 static int meson_debugfs_show(struct seq_file *seq, void *v)
 {
-       struct meson_dev *mc = seq->private;
+       struct meson_dev *mc __maybe_unused = seq->private;
        int i;
 
        for (i = 0; i < MAXFLOW; i++)
-               seq_printf(seq, "Channel %d: nreq %lu\n", i, mc->chanlist[i].stat_req);
+               seq_printf(seq, "Channel %d: nreq %lu\n", i,
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
+                          mc->chanlist[i].stat_req);
+#else
+                          0ul);
+#endif
 
        for (i = 0; i < ARRAY_SIZE(mc_algs); i++) {
                switch (mc_algs[i].type) {
                case CRYPTO_ALG_TYPE_SKCIPHER:
                        seq_printf(seq, "%s %s %lu %lu\n",
-                                  mc_algs[i].alg.skcipher.base.cra_driver_name,
-                                  mc_algs[i].alg.skcipher.base.cra_name,
+                                  mc_algs[i].alg.skcipher.base.base.cra_driver_name,
+                                  mc_algs[i].alg.skcipher.base.base.cra_name,
+#ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
                                   mc_algs[i].stat_req, mc_algs[i].stat_fb);
+#else
+                                  0ul, 0ul);
+#endif
                        break;
                }
        }
        return 0;
 }
 DEFINE_SHOW_ATTRIBUTE(meson_debugfs);
-#endif
 
 static void meson_free_chanlist(struct meson_dev *mc, int i)
 {
@@ -183,10 +198,10 @@ static int meson_register_algs(struct meson_dev *mc)
                mc_algs[i].mc = mc;
                switch (mc_algs[i].type) {
                case CRYPTO_ALG_TYPE_SKCIPHER:
-                       err = crypto_register_skcipher(&mc_algs[i].alg.skcipher);
+                       err = crypto_engine_register_skcipher(&mc_algs[i].alg.skcipher);
                        if (err) {
                                dev_err(mc->dev, "Fail to register %s\n",
-                                       mc_algs[i].alg.skcipher.base.cra_name);
+                                       mc_algs[i].alg.skcipher.base.base.cra_name);
                                mc_algs[i].mc = NULL;
                                return err;
                        }
@@ -206,7 +221,7 @@ static void meson_unregister_algs(struct meson_dev *mc)
                        continue;
                switch (mc_algs[i].type) {
                case CRYPTO_ALG_TYPE_SKCIPHER:
-                       crypto_unregister_skcipher(&mc_algs[i].alg.skcipher);
+                       crypto_engine_unregister_skcipher(&mc_algs[i].alg.skcipher);
                        break;
                }
        }
@@ -264,10 +279,16 @@ static int meson_crypto_probe(struct platform_device *pdev)
        if (err)
                goto error_alg;
 
+       if (IS_ENABLED(CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG)) {
+               struct dentry *dbgfs_dir;
+
+               dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL);
+               debugfs_create_file("stats", 0444, dbgfs_dir, mc, &meson_debugfs_fops);
+
 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
-       mc->dbgfs_dir = debugfs_create_dir("gxl-crypto", NULL);
-       debugfs_create_file("stats", 0444, mc->dbgfs_dir, mc, &meson_debugfs_fops);
+               mc->dbgfs_dir = dbgfs_dir;
 #endif
+       }
 
        return 0;
 error_alg:
index 8c0746a..1013a66 100644 (file)
@@ -114,7 +114,6 @@ struct meson_cipher_req_ctx {
 
 /*
  * struct meson_cipher_tfm_ctx - context for a skcipher TFM
- * @enginectx:         crypto_engine used by this TFM
  * @key:               pointer to key data
  * @keylen:            len of the key
  * @keymode:           The keymode(type and size of key) associated with this TFM
@@ -122,7 +121,6 @@ struct meson_cipher_req_ctx {
  * @fallback_tfm:      pointer to the fallback TFM
  */
 struct meson_cipher_tfm_ctx {
-       struct crypto_engine_ctx enginectx;
        u32 *key;
        u32 keylen;
        u32 keymode;
@@ -143,7 +141,7 @@ struct meson_alg_template {
        u32 type;
        u32 blockmode;
        union {
-               struct skcipher_alg skcipher;
+               struct skcipher_engine_alg skcipher;
        } alg;
        struct meson_dev *mc;
 #ifdef CONFIG_CRYPTO_DEV_AMLOGIC_GXL_DEBUG
@@ -160,3 +158,4 @@ int meson_cipher_init(struct crypto_tfm *tfm);
 void meson_cipher_exit(struct crypto_tfm *tfm);
 int meson_skdecrypt(struct skcipher_request *areq);
 int meson_skencrypt(struct skcipher_request *areq);
+int meson_handle_cipher_request(struct crypto_engine *engine, void *areq);
index 470122c..247c568 100644 (file)
@@ -2,25 +2,23 @@
 /*
  * Copyright 2021 Aspeed Technology Inc.
  */
-#include <crypto/akcipher.h>
-#include <crypto/algapi.h>
 #include <crypto/engine.h>
 #include <crypto/internal/akcipher.h>
 #include <crypto/internal/rsa.h>
 #include <crypto/scatterwalk.h>
 #include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/mfd/syscon.h>
-#include <linux/interrupt.h>
 #include <linux/count_zeros.h>
-#include <linux/err.h>
 #include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/string.h>
 
 #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
 #define ACRY_DBG(d, fmt, ...)  \
@@ -112,7 +110,6 @@ struct aspeed_acry_dev {
 };
 
 struct aspeed_acry_ctx {
-       struct crypto_engine_ctx        enginectx;
        struct aspeed_acry_dev          *acry_dev;
 
        struct rsa_key                  key;
@@ -131,7 +128,7 @@ struct aspeed_acry_ctx {
 
 struct aspeed_acry_alg {
        struct aspeed_acry_dev          *acry_dev;
-       struct akcipher_alg             akcipher;
+       struct akcipher_engine_alg      akcipher;
 };
 
 enum aspeed_rsa_key_mode {
@@ -577,7 +574,7 @@ static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm)
        const char *name = crypto_tfm_alg_name(&tfm->base);
        struct aspeed_acry_alg *acry_alg;
 
-       acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher);
+       acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher.base);
 
        ctx->acry_dev = acry_alg->acry_dev;
 
@@ -589,10 +586,6 @@ static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm)
                return PTR_ERR(ctx->fallback_tfm);
        }
 
-       ctx->enginectx.op.do_one_request = aspeed_acry_do_request;
-       ctx->enginectx.op.prepare_request = NULL;
-       ctx->enginectx.op.unprepare_request = NULL;
-
        return 0;
 }
 
@@ -605,7 +598,7 @@ static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher *tfm)
 
 static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
        {
-               .akcipher = {
+               .akcipher.base = {
                        .encrypt = aspeed_acry_rsa_enc,
                        .decrypt = aspeed_acry_rsa_dec,
                        .sign = aspeed_acry_rsa_dec,
@@ -627,6 +620,9 @@ static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
                                .cra_ctxsize = sizeof(struct aspeed_acry_ctx),
                        },
                },
+               .akcipher.op = {
+                       .do_one_request = aspeed_acry_do_request,
+               },
        },
 };
 
@@ -636,10 +632,10 @@ static void aspeed_acry_register(struct aspeed_acry_dev *acry_dev)
 
        for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) {
                aspeed_acry_akcipher_algs[i].acry_dev = acry_dev;
-               rc = crypto_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
+               rc = crypto_engine_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
                if (rc) {
                        ACRY_DBG(acry_dev, "Failed to register %s\n",
-                                aspeed_acry_akcipher_algs[i].akcipher.base.cra_name);
+                                aspeed_acry_akcipher_algs[i].akcipher.base.base.cra_name);
                }
        }
 }
@@ -649,7 +645,7 @@ static void aspeed_acry_unregister(struct aspeed_acry_dev *acry_dev)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++)
-               crypto_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
+               crypto_engine_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
 }
 
 /* ACRY interrupt service routine. */
index ef73b00..f0eddb7 100644 (file)
@@ -4,6 +4,17 @@
  */
 
 #include "aspeed-hace.h"
+#include <crypto/des.h>
+#include <crypto/engine.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
 
 #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO_DEBUG
 #define CIPHER_DBG(h, fmt, ...)        \
@@ -696,7 +707,7 @@ static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm)
        struct aspeed_hace_alg *crypto_alg;
 
 
-       crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher);
+       crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher.base);
        ctx->hace_dev = crypto_alg->hace_dev;
        ctx->start = aspeed_hace_skcipher_trigger;
 
@@ -713,10 +724,6 @@ static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm)
        crypto_skcipher_set_reqsize(tfm, sizeof(struct aspeed_cipher_reqctx) +
                         crypto_skcipher_reqsize(ctx->fallback_tfm));
 
-       ctx->enginectx.op.do_one_request = aspeed_crypto_do_request;
-       ctx->enginectx.op.prepare_request = NULL;
-       ctx->enginectx.op.unprepare_request = NULL;
-
        return 0;
 }
 
@@ -731,7 +738,7 @@ static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm)
 
 static struct aspeed_hace_alg aspeed_crypto_algs[] = {
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .min_keysize    = AES_MIN_KEY_SIZE,
                        .max_keysize    = AES_MAX_KEY_SIZE,
                        .setkey         = aspeed_aes_setkey,
@@ -751,10 +758,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .ivsize         = AES_BLOCK_SIZE,
                        .min_keysize    = AES_MIN_KEY_SIZE,
                        .max_keysize    = AES_MAX_KEY_SIZE,
@@ -775,10 +785,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .ivsize         = AES_BLOCK_SIZE,
                        .min_keysize    = AES_MIN_KEY_SIZE,
                        .max_keysize    = AES_MAX_KEY_SIZE,
@@ -799,10 +812,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .ivsize         = AES_BLOCK_SIZE,
                        .min_keysize    = AES_MIN_KEY_SIZE,
                        .max_keysize    = AES_MAX_KEY_SIZE,
@@ -823,10 +839,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .min_keysize    = DES_KEY_SIZE,
                        .max_keysize    = DES_KEY_SIZE,
                        .setkey         = aspeed_des_setkey,
@@ -846,10 +865,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .ivsize         = DES_BLOCK_SIZE,
                        .min_keysize    = DES_KEY_SIZE,
                        .max_keysize    = DES_KEY_SIZE,
@@ -870,10 +892,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .ivsize         = DES_BLOCK_SIZE,
                        .min_keysize    = DES_KEY_SIZE,
                        .max_keysize    = DES_KEY_SIZE,
@@ -894,10 +919,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .ivsize         = DES_BLOCK_SIZE,
                        .min_keysize    = DES_KEY_SIZE,
                        .max_keysize    = DES_KEY_SIZE,
@@ -918,10 +946,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .min_keysize    = DES3_EDE_KEY_SIZE,
                        .max_keysize    = DES3_EDE_KEY_SIZE,
                        .setkey         = aspeed_des_setkey,
@@ -941,10 +972,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .ivsize         = DES_BLOCK_SIZE,
                        .min_keysize    = DES3_EDE_KEY_SIZE,
                        .max_keysize    = DES3_EDE_KEY_SIZE,
@@ -965,10 +999,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .ivsize         = DES_BLOCK_SIZE,
                        .min_keysize    = DES3_EDE_KEY_SIZE,
                        .max_keysize    = DES3_EDE_KEY_SIZE,
@@ -989,10 +1026,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .ivsize         = DES_BLOCK_SIZE,
                        .min_keysize    = DES3_EDE_KEY_SIZE,
                        .max_keysize    = DES3_EDE_KEY_SIZE,
@@ -1013,13 +1053,16 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
 };
 
 static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .ivsize         = AES_BLOCK_SIZE,
                        .min_keysize    = AES_MIN_KEY_SIZE,
                        .max_keysize    = AES_MAX_KEY_SIZE,
@@ -1039,10 +1082,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .ivsize         = DES_BLOCK_SIZE,
                        .min_keysize    = DES_KEY_SIZE,
                        .max_keysize    = DES_KEY_SIZE,
@@ -1062,10 +1108,13 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
        {
-               .alg.skcipher = {
+               .alg.skcipher.base = {
                        .ivsize         = DES_BLOCK_SIZE,
                        .min_keysize    = DES3_EDE_KEY_SIZE,
                        .max_keysize    = DES3_EDE_KEY_SIZE,
@@ -1085,7 +1134,10 @@ static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
                                .cra_alignmask          = 0x0f,
                                .cra_module             = THIS_MODULE,
                        }
-               }
+               },
+               .alg.skcipher.op = {
+                       .do_one_request = aspeed_crypto_do_request,
+               },
        },
 
 };
@@ -1095,13 +1147,13 @@ void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++)
-               crypto_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+               crypto_engine_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
 
        if (hace_dev->version != AST2600_VERSION)
                return;
 
        for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++)
-               crypto_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+               crypto_engine_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
 }
 
 void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
@@ -1112,10 +1164,10 @@ void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
 
        for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) {
                aspeed_crypto_algs[i].hace_dev = hace_dev;
-               rc = crypto_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+               rc = crypto_engine_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
                if (rc) {
                        CIPHER_DBG(hace_dev, "Failed to register %s\n",
-                                  aspeed_crypto_algs[i].alg.skcipher.base.cra_name);
+                                  aspeed_crypto_algs[i].alg.skcipher.base.base.cra_name);
                }
        }
 
@@ -1124,10 +1176,10 @@ void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
 
        for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) {
                aspeed_crypto_algs_g6[i].hace_dev = hace_dev;
-               rc = crypto_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+               rc = crypto_engine_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
                if (rc) {
                        CIPHER_DBG(hace_dev, "Failed to register %s\n",
-                                  aspeed_crypto_algs_g6[i].alg.skcipher.base.cra_name);
+                                  aspeed_crypto_algs_g6[i].alg.skcipher.base.base.cra_name);
                }
        }
 }
index 9351352..0b6e49c 100644 (file)
@@ -4,6 +4,17 @@
  */
 
 #include "aspeed-hace.h"
+#include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
 
 #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
 #define AHASH_DBG(h, fmt, ...) \
@@ -48,28 +59,6 @@ static const __be64 sha512_iv[8] = {
        cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
 };
 
-static const __be32 sha512_224_iv[16] = {
-       cpu_to_be32(0xC8373D8CUL), cpu_to_be32(0xA24D5419UL),
-       cpu_to_be32(0x6699E173UL), cpu_to_be32(0xD6D4DC89UL),
-       cpu_to_be32(0xAEB7FA1DUL), cpu_to_be32(0x829CFF32UL),
-       cpu_to_be32(0x14D59D67UL), cpu_to_be32(0xCF9F2F58UL),
-       cpu_to_be32(0x692B6D0FUL), cpu_to_be32(0xA84DD47BUL),
-       cpu_to_be32(0x736FE377UL), cpu_to_be32(0x4289C404UL),
-       cpu_to_be32(0xA8859D3FUL), cpu_to_be32(0xC8361D6AUL),
-       cpu_to_be32(0xADE61211UL), cpu_to_be32(0xA192D691UL)
-};
-
-static const __be32 sha512_256_iv[16] = {
-       cpu_to_be32(0x94213122UL), cpu_to_be32(0x2CF72BFCUL),
-       cpu_to_be32(0xA35F559FUL), cpu_to_be32(0xC2644CC8UL),
-       cpu_to_be32(0x6BB89323UL), cpu_to_be32(0x51B1536FUL),
-       cpu_to_be32(0x19773896UL), cpu_to_be32(0xBDEA4059UL),
-       cpu_to_be32(0xE23E2896UL), cpu_to_be32(0xE3FF8EA8UL),
-       cpu_to_be32(0x251E5EBEUL), cpu_to_be32(0x92398653UL),
-       cpu_to_be32(0xFC99012BUL), cpu_to_be32(0xAAB8852CUL),
-       cpu_to_be32(0xDC2DB70EUL), cpu_to_be32(0xA22CC581UL)
-};
-
 /* The purpose of this padding is to ensure that the padded message is a
  * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
  * The bit "1" is appended at the end of the message followed by
@@ -565,8 +554,8 @@ static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
        return 0;
 }
 
-static int aspeed_ahash_prepare_request(struct crypto_engine *engine,
-                                       void *areq)
+static void aspeed_ahash_prepare_request(struct crypto_engine *engine,
+                                        void *areq)
 {
        struct ahash_request *req = ahash_request_cast(areq);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -581,8 +570,12 @@ static int aspeed_ahash_prepare_request(struct crypto_engine *engine,
                hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
        else
                hash_engine->dma_prepare = aspeed_ahash_dma_prepare;
+}
 
-       return 0;
+static int aspeed_ahash_do_one(struct crypto_engine *engine, void *areq)
+{
+       aspeed_ahash_prepare_request(engine, areq);
+       return aspeed_ahash_do_request(engine, areq);
 }
 
 static int aspeed_sham_update(struct ahash_request *req)
@@ -750,62 +743,6 @@ static int aspeed_sham_init(struct ahash_request *req)
        return 0;
 }
 
-static int aspeed_sha512s_init(struct ahash_request *req)
-{
-       struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-       struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
-       struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-       struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-
-       AHASH_DBG(hace_dev, "digest size: %d\n", crypto_ahash_digestsize(tfm));
-
-       rctx->cmd = HASH_CMD_ACC_MODE;
-       rctx->flags = 0;
-
-       switch (crypto_ahash_digestsize(tfm)) {
-       case SHA224_DIGEST_SIZE:
-               rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_224 |
-                            HASH_CMD_SHA_SWAP;
-               rctx->flags |= SHA_FLAGS_SHA512_224;
-               rctx->digsize = SHA224_DIGEST_SIZE;
-               rctx->block_size = SHA512_BLOCK_SIZE;
-               rctx->sha_iv = sha512_224_iv;
-               rctx->ivsize = 64;
-               memcpy(rctx->digest, sha512_224_iv, rctx->ivsize);
-               break;
-       case SHA256_DIGEST_SIZE:
-               rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512_256 |
-                            HASH_CMD_SHA_SWAP;
-               rctx->flags |= SHA_FLAGS_SHA512_256;
-               rctx->digsize = SHA256_DIGEST_SIZE;
-               rctx->block_size = SHA512_BLOCK_SIZE;
-               rctx->sha_iv = sha512_256_iv;
-               rctx->ivsize = 64;
-               memcpy(rctx->digest, sha512_256_iv, rctx->ivsize);
-               break;
-       default:
-               dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
-                        crypto_ahash_digestsize(tfm));
-               return -EINVAL;
-       }
-
-       rctx->bufcnt = 0;
-       rctx->total = 0;
-       rctx->digcnt[0] = 0;
-       rctx->digcnt[1] = 0;
-
-       /* HMAC init */
-       if (tctx->flags & SHA_FLAGS_HMAC) {
-               rctx->digcnt[0] = rctx->block_size;
-               rctx->bufcnt = rctx->block_size;
-               memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
-               rctx->flags |= SHA_FLAGS_HMAC;
-       }
-
-       return 0;
-}
-
 static int aspeed_sham_digest(struct ahash_request *req)
 {
        return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
@@ -854,7 +791,7 @@ static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
        struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
        struct aspeed_hace_alg *ast_alg;
 
-       ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash);
+       ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base);
        tctx->hace_dev = ast_alg->hace_dev;
        tctx->flags = 0;
 
@@ -876,10 +813,6 @@ static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
                }
        }
 
-       tctx->enginectx.op.do_one_request = aspeed_ahash_do_request;
-       tctx->enginectx.op.prepare_request = aspeed_ahash_prepare_request;
-       tctx->enginectx.op.unprepare_request = NULL;
-
        return 0;
 }
 
@@ -917,7 +850,7 @@ static int aspeed_sham_import(struct ahash_request *req, const void *in)
 
 static struct aspeed_hace_alg aspeed_ahash_algs[] = {
        {
-               .alg.ahash = {
+               .alg.ahash.base = {
                        .init   = aspeed_sham_init,
                        .update = aspeed_sham_update,
                        .final  = aspeed_sham_final,
@@ -944,9 +877,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
                                }
                        }
                },
+               .alg.ahash.op = {
+                       .do_one_request = aspeed_ahash_do_one,
+               },
        },
        {
-               .alg.ahash = {
+               .alg.ahash.base = {
                        .init   = aspeed_sham_init,
                        .update = aspeed_sham_update,
                        .final  = aspeed_sham_final,
@@ -973,9 +909,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
                                }
                        }
                },
+               .alg.ahash.op = {
+                       .do_one_request = aspeed_ahash_do_one,
+               },
        },
        {
-               .alg.ahash = {
+               .alg.ahash.base = {
                        .init   = aspeed_sham_init,
                        .update = aspeed_sham_update,
                        .final  = aspeed_sham_final,
@@ -1002,10 +941,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
                                }
                        }
                },
+               .alg.ahash.op = {
+                       .do_one_request = aspeed_ahash_do_one,
+               },
        },
        {
                .alg_base = "sha1",
-               .alg.ahash = {
+               .alg.ahash.base = {
                        .init   = aspeed_sham_init,
                        .update = aspeed_sham_update,
                        .final  = aspeed_sham_final,
@@ -1034,10 +976,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
                                }
                        }
                },
+               .alg.ahash.op = {
+                       .do_one_request = aspeed_ahash_do_one,
+               },
        },
        {
                .alg_base = "sha224",
-               .alg.ahash = {
+               .alg.ahash.base = {
                        .init   = aspeed_sham_init,
                        .update = aspeed_sham_update,
                        .final  = aspeed_sham_final,
@@ -1066,10 +1011,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
                                }
                        }
                },
+               .alg.ahash.op = {
+                       .do_one_request = aspeed_ahash_do_one,
+               },
        },
        {
                .alg_base = "sha256",
-               .alg.ahash = {
+               .alg.ahash.base = {
                        .init   = aspeed_sham_init,
                        .update = aspeed_sham_update,
                        .final  = aspeed_sham_final,
@@ -1098,12 +1046,15 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
                                }
                        }
                },
+               .alg.ahash.op = {
+                       .do_one_request = aspeed_ahash_do_one,
+               },
        },
 };
 
 static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
        {
-               .alg.ahash = {
+               .alg.ahash.base = {
                        .init   = aspeed_sham_init,
                        .update = aspeed_sham_update,
                        .final  = aspeed_sham_final,
@@ -1130,9 +1081,12 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
                                }
                        }
                },
+               .alg.ahash.op = {
+                       .do_one_request = aspeed_ahash_do_one,
+               },
        },
        {
-               .alg.ahash = {
+               .alg.ahash.base = {
                        .init   = aspeed_sham_init,
                        .update = aspeed_sham_update,
                        .final  = aspeed_sham_final,
@@ -1159,68 +1113,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
                                }
                        }
                },
-       },
-       {
-               .alg.ahash = {
-                       .init   = aspeed_sha512s_init,
-                       .update = aspeed_sham_update,
-                       .final  = aspeed_sham_final,
-                       .finup  = aspeed_sham_finup,
-                       .digest = aspeed_sham_digest,
-                       .export = aspeed_sham_export,
-                       .import = aspeed_sham_import,
-                       .halg = {
-                               .digestsize = SHA224_DIGEST_SIZE,
-                               .statesize = sizeof(struct aspeed_sham_reqctx),
-                               .base = {
-                                       .cra_name               = "sha512_224",
-                                       .cra_driver_name        = "aspeed-sha512_224",
-                                       .cra_priority           = 300,
-                                       .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
-                                                                 CRYPTO_ALG_ASYNC |
-                                                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
-                                       .cra_blocksize          = SHA512_BLOCK_SIZE,
-                                       .cra_ctxsize            = sizeof(struct aspeed_sham_ctx),
-                                       .cra_alignmask          = 0,
-                                       .cra_module             = THIS_MODULE,
-                                       .cra_init               = aspeed_sham_cra_init,
-                                       .cra_exit               = aspeed_sham_cra_exit,
-                               }
-                       }
-               },
-       },
-       {
-               .alg.ahash = {
-                       .init   = aspeed_sha512s_init,
-                       .update = aspeed_sham_update,
-                       .final  = aspeed_sham_final,
-                       .finup  = aspeed_sham_finup,
-                       .digest = aspeed_sham_digest,
-                       .export = aspeed_sham_export,
-                       .import = aspeed_sham_import,
-                       .halg = {
-                               .digestsize = SHA256_DIGEST_SIZE,
-                               .statesize = sizeof(struct aspeed_sham_reqctx),
-                               .base = {
-                                       .cra_name               = "sha512_256",
-                                       .cra_driver_name        = "aspeed-sha512_256",
-                                       .cra_priority           = 300,
-                                       .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
-                                                                 CRYPTO_ALG_ASYNC |
-                                                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
-                                       .cra_blocksize          = SHA512_BLOCK_SIZE,
-                                       .cra_ctxsize            = sizeof(struct aspeed_sham_ctx),
-                                       .cra_alignmask          = 0,
-                                       .cra_module             = THIS_MODULE,
-                                       .cra_init               = aspeed_sham_cra_init,
-                                       .cra_exit               = aspeed_sham_cra_exit,
-                               }
-                       }
+               .alg.ahash.op = {
+                       .do_one_request = aspeed_ahash_do_one,
                },
        },
        {
                .alg_base = "sha384",
-               .alg.ahash = {
+               .alg.ahash.base = {
                        .init   = aspeed_sham_init,
                        .update = aspeed_sham_update,
                        .final  = aspeed_sham_final,
@@ -1249,10 +1148,13 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
                                }
                        }
                },
+               .alg.ahash.op = {
+                       .do_one_request = aspeed_ahash_do_one,
+               },
        },
        {
                .alg_base = "sha512",
-               .alg.ahash = {
+               .alg.ahash.base = {
                        .init   = aspeed_sham_init,
                        .update = aspeed_sham_update,
                        .final  = aspeed_sham_final,
@@ -1281,69 +1183,8 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
                                }
                        }
                },
-       },
-       {
-               .alg_base = "sha512_224",
-               .alg.ahash = {
-                       .init   = aspeed_sha512s_init,
-                       .update = aspeed_sham_update,
-                       .final  = aspeed_sham_final,
-                       .finup  = aspeed_sham_finup,
-                       .digest = aspeed_sham_digest,
-                       .setkey = aspeed_sham_setkey,
-                       .export = aspeed_sham_export,
-                       .import = aspeed_sham_import,
-                       .halg = {
-                               .digestsize = SHA224_DIGEST_SIZE,
-                               .statesize = sizeof(struct aspeed_sham_reqctx),
-                               .base = {
-                                       .cra_name               = "hmac(sha512_224)",
-                                       .cra_driver_name        = "aspeed-hmac-sha512_224",
-                                       .cra_priority           = 300,
-                                       .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
-                                                                 CRYPTO_ALG_ASYNC |
-                                                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
-                                       .cra_blocksize          = SHA512_BLOCK_SIZE,
-                                       .cra_ctxsize            = sizeof(struct aspeed_sham_ctx) +
-                                                               sizeof(struct aspeed_sha_hmac_ctx),
-                                       .cra_alignmask          = 0,
-                                       .cra_module             = THIS_MODULE,
-                                       .cra_init               = aspeed_sham_cra_init,
-                                       .cra_exit               = aspeed_sham_cra_exit,
-                               }
-                       }
-               },
-       },
-       {
-               .alg_base = "sha512_256",
-               .alg.ahash = {
-                       .init   = aspeed_sha512s_init,
-                       .update = aspeed_sham_update,
-                       .final  = aspeed_sham_final,
-                       .finup  = aspeed_sham_finup,
-                       .digest = aspeed_sham_digest,
-                       .setkey = aspeed_sham_setkey,
-                       .export = aspeed_sham_export,
-                       .import = aspeed_sham_import,
-                       .halg = {
-                               .digestsize = SHA256_DIGEST_SIZE,
-                               .statesize = sizeof(struct aspeed_sham_reqctx),
-                               .base = {
-                                       .cra_name               = "hmac(sha512_256)",
-                                       .cra_driver_name        = "aspeed-hmac-sha512_256",
-                                       .cra_priority           = 300,
-                                       .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
-                                                                 CRYPTO_ALG_ASYNC |
-                                                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
-                                       .cra_blocksize          = SHA512_BLOCK_SIZE,
-                                       .cra_ctxsize            = sizeof(struct aspeed_sham_ctx) +
-                                                               sizeof(struct aspeed_sha_hmac_ctx),
-                                       .cra_alignmask          = 0,
-                                       .cra_module             = THIS_MODULE,
-                                       .cra_init               = aspeed_sham_cra_init,
-                                       .cra_exit               = aspeed_sham_cra_exit,
-                               }
-                       }
+               .alg.ahash.op = {
+                       .do_one_request = aspeed_ahash_do_one,
                },
        },
 };
@@ -1353,13 +1194,13 @@ void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++)
-               crypto_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
+               crypto_engine_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
 
        if (hace_dev->version != AST2600_VERSION)
                return;
 
        for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++)
-               crypto_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+               crypto_engine_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
 }
 
 void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
@@ -1370,10 +1211,10 @@ void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
 
        for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) {
                aspeed_ahash_algs[i].hace_dev = hace_dev;
-               rc = crypto_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
+               rc = crypto_engine_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
                if (rc) {
                        AHASH_DBG(hace_dev, "Failed to register %s\n",
-                                 aspeed_ahash_algs[i].alg.ahash.halg.base.cra_name);
+                                 aspeed_ahash_algs[i].alg.ahash.base.halg.base.cra_name);
                }
        }
 
@@ -1382,10 +1223,10 @@ void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
 
        for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) {
                aspeed_ahash_algs_g6[i].hace_dev = hace_dev;
-               rc = crypto_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
+               rc = crypto_engine_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
                if (rc) {
                        AHASH_DBG(hace_dev, "Failed to register %s\n",
-                                 aspeed_ahash_algs_g6[i].alg.ahash.halg.base.cra_name);
+                                 aspeed_ahash_algs_g6[i].alg.ahash.base.halg.base.cra_name);
                }
        }
 }
index d2871e1..8f7aab8 100644 (file)
@@ -3,7 +3,14 @@
  * Copyright (c) 2021 Aspeed Technology Inc.
  */
 
+#include "aspeed-hace.h"
+#include <crypto/engine.h>
 #include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
@@ -11,8 +18,6 @@
 #include <linux/of.h>
 #include <linux/platform_device.h>
 
-#include "aspeed-hace.h"
-
 #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
 #define HACE_DBG(d, fmt, ...)  \
        dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
index 05d0a15..68f70e0 100644 (file)
@@ -2,25 +2,14 @@
 #ifndef __ASPEED_HACE_H__
 #define __ASPEED_HACE_H__
 
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/fips.h>
-#include <linux/dma-mapping.h>
 #include <crypto/aes.h>
-#include <crypto/des.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/aead.h>
-#include <crypto/internal/akcipher.h>
-#include <crypto/internal/des.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/kpp.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/algapi.h>
 #include <crypto/engine.h>
-#include <crypto/hmac.h>
-#include <crypto/sha1.h>
+#include <crypto/hash.h>
 #include <crypto/sha2.h>
+#include <linux/bits.h>
+#include <linux/compiler_attributes.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
 
 /*****************************
  *                           *
                                         HACE_CMD_OFB | HACE_CMD_CTR)
 
 struct aspeed_hace_dev;
+struct scatterlist;
 
 typedef int (*aspeed_hace_fn_t)(struct aspeed_hace_dev *);
 
@@ -178,8 +168,6 @@ struct aspeed_sha_hmac_ctx {
 };
 
 struct aspeed_sham_ctx {
-       struct crypto_engine_ctx        enginectx;
-
        struct aspeed_hace_dev          *hace_dev;
        unsigned long                   flags;  /* hmac flag */
 
@@ -235,8 +223,6 @@ struct aspeed_engine_crypto {
 };
 
 struct aspeed_cipher_ctx {
-       struct crypto_engine_ctx        enginectx;
-
        struct aspeed_hace_dev          *hace_dev;
        int                             key_len;
        u8                              key[AES_MAX_KEYLENGTH];
@@ -275,8 +261,8 @@ struct aspeed_hace_alg {
        const char                      *alg_base;
 
        union {
-               struct skcipher_alg     skcipher;
-               struct ahash_alg        ahash;
+               struct skcipher_engine_alg skcipher;
+               struct ahash_engine_alg ahash;
        } alg;
 };
 
index 143d33f..55b5f57 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/irq.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
 #include <linux/delay.h>
 #include <linux/crypto.h>
 #include <crypto/scatterwalk.h>
@@ -2533,13 +2533,11 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
        }
 }
 
-#if defined(CONFIG_OF)
 static const struct of_device_id atmel_aes_dt_ids[] = {
        { .compatible = "atmel,at91sam9g46-aes" },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
-#endif
 
 static int atmel_aes_probe(struct platform_device *pdev)
 {
@@ -2566,11 +2564,9 @@ static int atmel_aes_probe(struct platform_device *pdev)
 
        crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
 
-       /* Get the base address */
-       aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!aes_res) {
-               dev_err(dev, "no MEM resource info\n");
-               err = -ENODEV;
+       aes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &aes_res);
+       if (IS_ERR(aes_dd->io_base)) {
+               err = PTR_ERR(aes_dd->io_base);
                goto err_tasklet_kill;
        }
        aes_dd->phys_base = aes_res->start;
@@ -2597,13 +2593,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
                goto err_tasklet_kill;
        }
 
-       aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
-       if (IS_ERR(aes_dd->io_base)) {
-               dev_err(dev, "can't ioremap\n");
-               err = PTR_ERR(aes_dd->io_base);
-               goto err_tasklet_kill;
-       }
-
        err = clk_prepare(aes_dd->iclk);
        if (err)
                goto err_tasklet_kill;
@@ -2687,7 +2676,7 @@ static struct platform_driver atmel_aes_driver = {
        .remove         = atmel_aes_remove,
        .driver         = {
                .name   = "atmel_aes",
-               .of_match_table = of_match_ptr(atmel_aes_dt_ids),
+               .of_match_table = atmel_aes_dt_ids,
        },
 };
 
index 432beab..590ea98 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
index 6bef634..3622120 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/irq.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
 #include <linux/delay.h>
 #include <linux/crypto.h>
 #include <crypto/scatterwalk.h>
@@ -1770,7 +1770,8 @@ static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd)
        size_t bs = ctx->block_size;
        size_t i, num_words = bs / sizeof(u32);
 
-       memcpy(hmac->opad, hmac->ipad, bs);
+       unsafe_memcpy(hmac->opad, hmac->ipad, bs,
+                     "fortified memcpy causes -Wrestrict warning");
        for (i = 0; i < num_words; ++i) {
                hmac->ipad[i] ^= 0x36363636;
                hmac->opad[i] ^= 0x5c5c5c5c;
@@ -2499,8 +2500,8 @@ static int atmel_sha_dma_init(struct atmel_sha_dev *dd)
 {
        dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx");
        if (IS_ERR(dd->dma_lch_in.chan)) {
-               dev_err(dd->dev, "DMA channel is not available\n");
-               return PTR_ERR(dd->dma_lch_in.chan);
+               return dev_err_probe(dd->dev, PTR_ERR(dd->dma_lch_in.chan),
+                       "DMA channel is not available\n");
        }
 
        dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
@@ -2570,14 +2571,12 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
        }
 }
 
-#if defined(CONFIG_OF)
 static const struct of_device_id atmel_sha_dt_ids[] = {
        { .compatible = "atmel,at91sam9g46-sha" },
        { /* sentinel */ }
 };
 
 MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids);
-#endif
 
 static int atmel_sha_probe(struct platform_device *pdev)
 {
@@ -2604,11 +2603,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
 
        crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
 
-       /* Get the base address */
-       sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!sha_res) {
-               dev_err(dev, "no MEM resource info\n");
-               err = -ENODEV;
+       sha_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &sha_res);
+       if (IS_ERR(sha_dd->io_base)) {
+               err = PTR_ERR(sha_dd->io_base);
                goto err_tasklet_kill;
        }
        sha_dd->phys_base = sha_res->start;
@@ -2635,13 +2632,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
                goto err_tasklet_kill;
        }
 
-       sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
-       if (IS_ERR(sha_dd->io_base)) {
-               dev_err(dev, "can't ioremap\n");
-               err = PTR_ERR(sha_dd->io_base);
-               goto err_tasklet_kill;
-       }
-
        err = clk_prepare(sha_dd->iclk);
        if (err)
                goto err_tasklet_kill;
@@ -2716,7 +2706,7 @@ static struct platform_driver atmel_sha_driver = {
        .remove         = atmel_sha_remove,
        .driver         = {
                .name   = "atmel_sha",
-               .of_match_table = of_match_ptr(atmel_sha_dt_ids),
+               .of_match_table = atmel_sha_dt_ids,
        },
 };
 
index c9ded8b..099b32a 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/irq.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
 #include <linux/delay.h>
 #include <linux/crypto.h>
 #include <crypto/scatterwalk.h>
@@ -1139,13 +1139,11 @@ static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
        }
 }
 
-#if defined(CONFIG_OF)
 static const struct of_device_id atmel_tdes_dt_ids[] = {
        { .compatible = "atmel,at91sam9g46-tdes" },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, atmel_tdes_dt_ids);
-#endif
 
 static int atmel_tdes_probe(struct platform_device *pdev)
 {
@@ -1172,11 +1170,9 @@ static int atmel_tdes_probe(struct platform_device *pdev)
 
        crypto_init_queue(&tdes_dd->queue, ATMEL_TDES_QUEUE_LENGTH);
 
-       /* Get the base address */
-       tdes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!tdes_res) {
-               dev_err(dev, "no MEM resource info\n");
-               err = -ENODEV;
+       tdes_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &tdes_res);
+       if (IS_ERR(tdes_dd->io_base)) {
+               err = PTR_ERR(tdes_dd->io_base);
                goto err_tasklet_kill;
        }
        tdes_dd->phys_base = tdes_res->start;
@@ -1203,12 +1199,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
                goto err_tasklet_kill;
        }
 
-       tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
-       if (IS_ERR(tdes_dd->io_base)) {
-               err = PTR_ERR(tdes_dd->io_base);
-               goto err_tasklet_kill;
-       }
-
        err = atmel_tdes_hw_version_init(tdes_dd);
        if (err)
                goto err_tasklet_kill;
@@ -1282,7 +1272,7 @@ static struct platform_driver atmel_tdes_driver = {
        .remove         = atmel_tdes_remove,
        .driver         = {
                .name   = "atmel_tdes",
-               .of_match_table = of_match_ptr(atmel_tdes_dt_ids),
+               .of_match_table = atmel_tdes_dt_ids,
        },
 };
 
index 70b911b..689be70 100644 (file)
@@ -15,8 +15,7 @@
 #include <linux/kthread.h>
 #include <linux/rtnetlink.h>
 #include <linux/sched.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/io.h>
 #include <linux/bitops.h>
 
@@ -2397,7 +2396,8 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
                memset(ctx->ipad + ctx->authkeylen, 0,
                       blocksize - ctx->authkeylen);
                ctx->authkeylen = 0;
-               memcpy(ctx->opad, ctx->ipad, blocksize);
+               unsafe_memcpy(ctx->opad, ctx->ipad, blocksize,
+                             "fortified memcpy causes -Wrestrict warning");
 
                for (index = 0; index < blocksize; index++) {
                        ctx->ipad[index] ^= HMAC_IPAD_VALUE;
index feb8601..eba2d75 100644 (file)
 #include "sg_sw_sec4.h"
 #include "key_gen.h"
 #include "caamalg_desc.h"
-#include <crypto/engine.h>
-#include <crypto/xts.h>
 #include <asm/unaligned.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/xts.h>
 #include <linux/dma-mapping.h>
 #include <linux/device.h>
 #include <linux/err.h>
+#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/string.h>
@@ -95,13 +98,13 @@ struct caam_alg_entry {
 };
 
 struct caam_aead_alg {
-       struct aead_alg aead;
+       struct aead_engine_alg aead;
        struct caam_alg_entry caam;
        bool registered;
 };
 
 struct caam_skcipher_alg {
-       struct skcipher_alg skcipher;
+       struct skcipher_engine_alg skcipher;
        struct caam_alg_entry caam;
        bool registered;
 };
@@ -110,7 +113,6 @@ struct caam_skcipher_alg {
  * per-session context
  */
 struct caam_ctx {
-       struct crypto_engine_ctx enginectx;
        u32 sh_desc_enc[DESC_MAX_USED_LEN];
        u32 sh_desc_dec[DESC_MAX_USED_LEN];
        u8 key[CAAM_MAX_KEY_SIZE];
@@ -188,7 +190,8 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
 static int aead_set_sh_desc(struct crypto_aead *aead)
 {
        struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
-                                                struct caam_aead_alg, aead);
+                                                struct caam_aead_alg,
+                                                aead.base);
        unsigned int ivsize = crypto_aead_ivsize(aead);
        struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
        struct device *jrdev = ctx->jrdev;
@@ -738,7 +741,7 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
        struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
        struct caam_skcipher_alg *alg =
                container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
-                            skcipher);
+                            skcipher.base);
        struct device *jrdev = ctx->jrdev;
        unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
        u32 *desc;
@@ -1195,7 +1198,8 @@ static void init_authenc_job(struct aead_request *req,
 {
        struct crypto_aead *aead = crypto_aead_reqtfm(req);
        struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
-                                                struct caam_aead_alg, aead);
+                                                struct caam_aead_alg,
+                                                aead.base);
        unsigned int ivsize = crypto_aead_ivsize(aead);
        struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
        struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
@@ -1881,7 +1885,7 @@ static int skcipher_decrypt(struct skcipher_request *req)
 
 static struct caam_skcipher_alg driver_algs[] = {
        {
-               .skcipher = {
+               .skcipher.base = {
                        .base = {
                                .cra_name = "cbc(aes)",
                                .cra_driver_name = "cbc-aes-caam",
@@ -1894,10 +1898,13 @@ static struct caam_skcipher_alg driver_algs[] = {
                        .max_keysize = AES_MAX_KEY_SIZE,
                        .ivsize = AES_BLOCK_SIZE,
                },
+               .skcipher.op = {
+                       .do_one_request = skcipher_do_one_req,
+               },
                .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
        },
        {
-               .skcipher = {
+               .skcipher.base = {
                        .base = {
                                .cra_name = "cbc(des3_ede)",
                                .cra_driver_name = "cbc-3des-caam",
@@ -1910,10 +1917,13 @@ static struct caam_skcipher_alg driver_algs[] = {
                        .max_keysize = DES3_EDE_KEY_SIZE,
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                },
+               .skcipher.op = {
+                       .do_one_request = skcipher_do_one_req,
+               },
                .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
        },
        {
-               .skcipher = {
+               .skcipher.base = {
                        .base = {
                                .cra_name = "cbc(des)",
                                .cra_driver_name = "cbc-des-caam",
@@ -1926,10 +1936,13 @@ static struct caam_skcipher_alg driver_algs[] = {
                        .max_keysize = DES_KEY_SIZE,
                        .ivsize = DES_BLOCK_SIZE,
                },
+               .skcipher.op = {
+                       .do_one_request = skcipher_do_one_req,
+               },
                .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
        },
        {
-               .skcipher = {
+               .skcipher.base = {
                        .base = {
                                .cra_name = "ctr(aes)",
                                .cra_driver_name = "ctr-aes-caam",
@@ -1943,11 +1956,14 @@ static struct caam_skcipher_alg driver_algs[] = {
                        .ivsize = AES_BLOCK_SIZE,
                        .chunksize = AES_BLOCK_SIZE,
                },
+               .skcipher.op = {
+                       .do_one_request = skcipher_do_one_req,
+               },
                .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
                                        OP_ALG_AAI_CTR_MOD128,
        },
        {
-               .skcipher = {
+               .skcipher.base = {
                        .base = {
                                .cra_name = "rfc3686(ctr(aes))",
                                .cra_driver_name = "rfc3686-ctr-aes-caam",
@@ -1963,6 +1979,9 @@ static struct caam_skcipher_alg driver_algs[] = {
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .chunksize = AES_BLOCK_SIZE,
                },
+               .skcipher.op = {
+                       .do_one_request = skcipher_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES |
                                           OP_ALG_AAI_CTR_MOD128,
@@ -1970,7 +1989,7 @@ static struct caam_skcipher_alg driver_algs[] = {
                },
        },
        {
-               .skcipher = {
+               .skcipher.base = {
                        .base = {
                                .cra_name = "xts(aes)",
                                .cra_driver_name = "xts-aes-caam",
@@ -1984,10 +2003,13 @@ static struct caam_skcipher_alg driver_algs[] = {
                        .max_keysize = 2 * AES_MAX_KEY_SIZE,
                        .ivsize = AES_BLOCK_SIZE,
                },
+               .skcipher.op = {
+                       .do_one_request = skcipher_do_one_req,
+               },
                .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
        },
        {
-               .skcipher = {
+               .skcipher.base = {
                        .base = {
                                .cra_name = "ecb(des)",
                                .cra_driver_name = "ecb-des-caam",
@@ -1999,10 +2021,13 @@ static struct caam_skcipher_alg driver_algs[] = {
                        .min_keysize = DES_KEY_SIZE,
                        .max_keysize = DES_KEY_SIZE,
                },
+               .skcipher.op = {
+                       .do_one_request = skcipher_do_one_req,
+               },
                .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
        },
        {
-               .skcipher = {
+               .skcipher.base = {
                        .base = {
                                .cra_name = "ecb(aes)",
                                .cra_driver_name = "ecb-aes-caam",
@@ -2014,10 +2039,13 @@ static struct caam_skcipher_alg driver_algs[] = {
                        .min_keysize = AES_MIN_KEY_SIZE,
                        .max_keysize = AES_MAX_KEY_SIZE,
                },
+               .skcipher.op = {
+                       .do_one_request = skcipher_do_one_req,
+               },
                .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
        },
        {
-               .skcipher = {
+               .skcipher.base = {
                        .base = {
                                .cra_name = "ecb(des3_ede)",
                                .cra_driver_name = "ecb-des3-caam",
@@ -2029,13 +2057,16 @@ static struct caam_skcipher_alg driver_algs[] = {
                        .min_keysize = DES3_EDE_KEY_SIZE,
                        .max_keysize = DES3_EDE_KEY_SIZE,
                },
+               .skcipher.op = {
+                       .do_one_request = skcipher_do_one_req,
+               },
                .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
        },
 };
 
 static struct caam_aead_alg driver_aeads[] = {
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "rfc4106(gcm(aes))",
                                .cra_driver_name = "rfc4106-gcm-aes-caam",
@@ -2048,13 +2079,16 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = GCM_RFC4106_IV_SIZE,
                        .maxauthsize = AES_BLOCK_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
                        .nodkp = true,
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "rfc4543(gcm(aes))",
                                .cra_driver_name = "rfc4543-gcm-aes-caam",
@@ -2067,6 +2101,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = GCM_RFC4543_IV_SIZE,
                        .maxauthsize = AES_BLOCK_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
                        .nodkp = true,
@@ -2074,7 +2111,7 @@ static struct caam_aead_alg driver_aeads[] = {
        },
        /* Galois Counter Mode */
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "gcm(aes)",
                                .cra_driver_name = "gcm-aes-caam",
@@ -2087,6 +2124,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = GCM_AES_IV_SIZE,
                        .maxauthsize = AES_BLOCK_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
                        .nodkp = true,
@@ -2094,7 +2134,7 @@ static struct caam_aead_alg driver_aeads[] = {
        },
        /* single-pass ipsec_esp descriptor */
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(md5),"
                                            "ecb(cipher_null))",
@@ -2109,13 +2149,16 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = NULL_IV_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha1),"
                                            "ecb(cipher_null))",
@@ -2130,13 +2173,16 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = NULL_IV_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha224),"
                                            "ecb(cipher_null))",
@@ -2151,13 +2197,16 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = NULL_IV_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha256),"
                                            "ecb(cipher_null))",
@@ -2172,13 +2221,16 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = NULL_IV_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha384),"
                                            "ecb(cipher_null))",
@@ -2193,13 +2245,16 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = NULL_IV_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha512),"
                                            "ecb(cipher_null))",
@@ -2214,13 +2269,16 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = NULL_IV_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
                                           OP_ALG_AAI_HMAC_PRECOMP,
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(md5),cbc(aes))",
                                .cra_driver_name = "authenc-hmac-md5-"
@@ -2234,6 +2292,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2241,7 +2302,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(md5),"
                                            "cbc(aes)))",
@@ -2256,6 +2317,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2264,7 +2328,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha1),cbc(aes))",
                                .cra_driver_name = "authenc-hmac-sha1-"
@@ -2278,6 +2342,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2285,7 +2352,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha1),"
                                            "cbc(aes)))",
@@ -2300,6 +2367,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2308,7 +2378,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha224),cbc(aes))",
                                .cra_driver_name = "authenc-hmac-sha224-"
@@ -2322,6 +2392,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2329,7 +2402,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha224),"
                                            "cbc(aes)))",
@@ -2344,6 +2417,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2352,7 +2428,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha256),cbc(aes))",
                                .cra_driver_name = "authenc-hmac-sha256-"
@@ -2366,6 +2442,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2373,7 +2452,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha256),"
                                            "cbc(aes)))",
@@ -2388,6 +2467,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2396,7 +2478,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha384),cbc(aes))",
                                .cra_driver_name = "authenc-hmac-sha384-"
@@ -2410,6 +2492,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2417,7 +2502,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha384),"
                                            "cbc(aes)))",
@@ -2432,6 +2517,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2440,7 +2528,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha512),cbc(aes))",
                                .cra_driver_name = "authenc-hmac-sha512-"
@@ -2454,6 +2542,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2461,7 +2552,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha512),"
                                            "cbc(aes)))",
@@ -2476,6 +2567,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = AES_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2484,7 +2578,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
                                .cra_driver_name = "authenc-hmac-md5-"
@@ -2498,6 +2592,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2505,7 +2602,7 @@ static struct caam_aead_alg driver_aeads[] = {
                }
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(md5),"
                                            "cbc(des3_ede)))",
@@ -2520,6 +2617,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2528,7 +2628,7 @@ static struct caam_aead_alg driver_aeads[] = {
                }
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha1),"
                                            "cbc(des3_ede))",
@@ -2543,6 +2643,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2550,7 +2653,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha1),"
                                            "cbc(des3_ede)))",
@@ -2566,6 +2669,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2574,7 +2680,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha224),"
                                            "cbc(des3_ede))",
@@ -2589,6 +2695,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2596,7 +2705,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha224),"
                                            "cbc(des3_ede)))",
@@ -2612,6 +2721,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2620,7 +2732,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha256),"
                                            "cbc(des3_ede))",
@@ -2635,6 +2747,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2642,7 +2757,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha256),"
                                            "cbc(des3_ede)))",
@@ -2658,6 +2773,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2666,7 +2784,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha384),"
                                            "cbc(des3_ede))",
@@ -2681,6 +2799,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2688,7 +2809,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha384),"
                                            "cbc(des3_ede)))",
@@ -2704,6 +2825,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2712,7 +2836,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha512),"
                                            "cbc(des3_ede))",
@@ -2727,6 +2851,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2734,7 +2861,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha512),"
                                            "cbc(des3_ede)))",
@@ -2750,6 +2877,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES3_EDE_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2758,7 +2888,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(md5),cbc(des))",
                                .cra_driver_name = "authenc-hmac-md5-"
@@ -2772,6 +2902,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2779,7 +2912,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(md5),"
                                            "cbc(des)))",
@@ -2794,6 +2927,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
@@ -2802,7 +2938,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha1),cbc(des))",
                                .cra_driver_name = "authenc-hmac-sha1-"
@@ -2816,6 +2952,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2823,7 +2962,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha1),"
                                            "cbc(des)))",
@@ -2838,6 +2977,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
@@ -2846,7 +2988,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha224),cbc(des))",
                                .cra_driver_name = "authenc-hmac-sha224-"
@@ -2860,6 +3002,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2867,7 +3012,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha224),"
                                            "cbc(des)))",
@@ -2882,6 +3027,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
@@ -2890,7 +3038,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha256),cbc(des))",
                                .cra_driver_name = "authenc-hmac-sha256-"
@@ -2904,6 +3052,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2911,7 +3062,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha256),"
                                            "cbc(des)))",
@@ -2926,6 +3077,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
@@ -2934,7 +3088,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha384),cbc(des))",
                                .cra_driver_name = "authenc-hmac-sha384-"
@@ -2948,6 +3102,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2955,7 +3112,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha384),"
                                            "cbc(des)))",
@@ -2970,6 +3127,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
@@ -2978,7 +3138,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha512),cbc(des))",
                                .cra_driver_name = "authenc-hmac-sha512-"
@@ -2992,6 +3152,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -2999,7 +3162,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "echainiv(authenc(hmac(sha512),"
                                            "cbc(des)))",
@@ -3014,6 +3177,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = DES_BLOCK_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
@@ -3022,7 +3188,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(md5),"
                                            "rfc3686(ctr(aes)))",
@@ -3037,6 +3203,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES |
                                           OP_ALG_AAI_CTR_MOD128,
@@ -3046,7 +3215,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "seqiv(authenc("
                                            "hmac(md5),rfc3686(ctr(aes))))",
@@ -3061,6 +3230,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = MD5_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES |
                                           OP_ALG_AAI_CTR_MOD128,
@@ -3071,7 +3243,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha1),"
                                            "rfc3686(ctr(aes)))",
@@ -3086,6 +3258,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES |
                                           OP_ALG_AAI_CTR_MOD128,
@@ -3095,7 +3270,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "seqiv(authenc("
                                            "hmac(sha1),rfc3686(ctr(aes))))",
@@ -3110,6 +3285,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA1_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES |
                                           OP_ALG_AAI_CTR_MOD128,
@@ -3120,7 +3298,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha224),"
                                            "rfc3686(ctr(aes)))",
@@ -3135,6 +3313,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES |
                                           OP_ALG_AAI_CTR_MOD128,
@@ -3144,7 +3325,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "seqiv(authenc("
                                            "hmac(sha224),rfc3686(ctr(aes))))",
@@ -3159,6 +3340,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA224_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES |
                                           OP_ALG_AAI_CTR_MOD128,
@@ -3169,7 +3353,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha256),"
                                            "rfc3686(ctr(aes)))",
@@ -3184,6 +3368,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES |
                                           OP_ALG_AAI_CTR_MOD128,
@@ -3193,7 +3380,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "seqiv(authenc(hmac(sha256),"
                                            "rfc3686(ctr(aes))))",
@@ -3208,6 +3395,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA256_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES |
                                           OP_ALG_AAI_CTR_MOD128,
@@ -3218,7 +3408,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha384),"
                                            "rfc3686(ctr(aes)))",
@@ -3233,6 +3423,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES |
                                           OP_ALG_AAI_CTR_MOD128,
@@ -3242,7 +3435,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "seqiv(authenc(hmac(sha384),"
                                            "rfc3686(ctr(aes))))",
@@ -3257,6 +3450,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA384_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES |
                                           OP_ALG_AAI_CTR_MOD128,
@@ -3267,7 +3463,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "authenc(hmac(sha512),"
                                            "rfc3686(ctr(aes)))",
@@ -3282,6 +3478,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES |
                                           OP_ALG_AAI_CTR_MOD128,
@@ -3291,7 +3490,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "seqiv(authenc(hmac(sha512),"
                                            "rfc3686(ctr(aes))))",
@@ -3306,6 +3505,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = CTR_RFC3686_IV_SIZE,
                        .maxauthsize = SHA512_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_AES |
                                           OP_ALG_AAI_CTR_MOD128,
@@ -3316,7 +3518,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "rfc7539(chacha20,poly1305)",
                                .cra_driver_name = "rfc7539-chacha20-poly1305-"
@@ -3330,6 +3532,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = CHACHAPOLY_IV_SIZE,
                        .maxauthsize = POLY1305_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
                                           OP_ALG_AAI_AEAD,
@@ -3339,7 +3544,7 @@ static struct caam_aead_alg driver_aeads[] = {
                },
        },
        {
-               .aead = {
+               .aead.base = {
                        .base = {
                                .cra_name = "rfc7539esp(chacha20,poly1305)",
                                .cra_driver_name = "rfc7539esp-chacha20-"
@@ -3353,6 +3558,9 @@ static struct caam_aead_alg driver_aeads[] = {
                        .ivsize = 8,
                        .maxauthsize = POLY1305_DIGEST_SIZE,
                },
+               .aead.op = {
+                       .do_one_request = aead_do_one_req,
+               },
                .caam = {
                        .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
                                           OP_ALG_AAI_AEAD,
@@ -3412,13 +3620,11 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
 {
        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
        struct caam_skcipher_alg *caam_alg =
-               container_of(alg, typeof(*caam_alg), skcipher);
+               container_of(alg, typeof(*caam_alg), skcipher.base);
        struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
        u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
        int ret = 0;
 
-       ctx->enginectx.op.do_one_request = skcipher_do_one_req;
-
        if (alg_aai == OP_ALG_AAI_XTS) {
                const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
                struct crypto_skcipher *fallback;
@@ -3449,13 +3655,11 @@ static int caam_aead_init(struct crypto_aead *tfm)
 {
        struct aead_alg *alg = crypto_aead_alg(tfm);
        struct caam_aead_alg *caam_alg =
-                container_of(alg, struct caam_aead_alg, aead);
+                container_of(alg, struct caam_aead_alg, aead.base);
        struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm);
 
        crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx));
 
-       ctx->enginectx.op.do_one_request = aead_do_one_req;
-
        return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
 }
 
@@ -3490,20 +3694,20 @@ void caam_algapi_exit(void)
                struct caam_aead_alg *t_alg = driver_aeads + i;
 
                if (t_alg->registered)
-                       crypto_unregister_aead(&t_alg->aead);
+                       crypto_engine_unregister_aead(&t_alg->aead);
        }
 
        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
                struct caam_skcipher_alg *t_alg = driver_algs + i;
 
                if (t_alg->registered)
-                       crypto_unregister_skcipher(&t_alg->skcipher);
+                       crypto_engine_unregister_skcipher(&t_alg->skcipher);
        }
 }
 
 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
 {
-       struct skcipher_alg *alg = &t_alg->skcipher;
+       struct skcipher_alg *alg = &t_alg->skcipher.base;
 
        alg->base.cra_module = THIS_MODULE;
        alg->base.cra_priority = CAAM_CRA_PRIORITY;
@@ -3517,7 +3721,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
 
 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
 {
-       struct aead_alg *alg = &t_alg->aead;
+       struct aead_alg *alg = &t_alg->aead.base;
 
        alg->base.cra_module = THIS_MODULE;
        alg->base.cra_priority = CAAM_CRA_PRIORITY;
@@ -3607,10 +3811,10 @@ int caam_algapi_init(struct device *ctrldev)
 
                caam_skcipher_alg_init(t_alg);
 
-               err = crypto_register_skcipher(&t_alg->skcipher);
+               err = crypto_engine_register_skcipher(&t_alg->skcipher);
                if (err) {
                        pr_warn("%s alg registration failed\n",
-                               t_alg->skcipher.base.cra_driver_name);
+                               t_alg->skcipher.base.base.cra_driver_name);
                        continue;
                }
 
@@ -3654,15 +3858,15 @@ int caam_algapi_init(struct device *ctrldev)
                 * if MD or MD size is not supported by device.
                 */
                if (is_mdha(c2_alg_sel) &&
-                   (!md_inst || t_alg->aead.maxauthsize > md_limit))
+                   (!md_inst || t_alg->aead.base.maxauthsize > md_limit))
                        continue;
 
                caam_aead_alg_init(t_alg);
 
-               err = crypto_register_aead(&t_alg->aead);
+               err = crypto_engine_register_aead(&t_alg->aead);
                if (err) {
                        pr_warn("%s alg registration failed\n",
-                               t_alg->aead.base.cra_driver_name);
+                               t_alg->aead.base.base.cra_driver_name);
                        continue;
                }
 
index 80deb00..290c850 100644 (file)
 #include "sg_sw_sec4.h"
 #include "key_gen.h"
 #include "caamhash_desc.h"
-#include <crypto/engine.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/hash.h>
 #include <linux/dma-mapping.h>
+#include <linux/err.h>
 #include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
 
 #define CAAM_CRA_PRIORITY              3000
 
@@ -89,7 +93,6 @@ static struct list_head hash_list;
 
 /* ahash per-session context */
 struct caam_hash_ctx {
-       struct crypto_engine_ctx enginectx;
        u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
        u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
        u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
@@ -368,10 +371,8 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
        int ret;
 
        desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
-       if (!desc) {
-               dev_err(jrdev, "unable to allocate key input memory\n");
+       if (!desc)
                return -ENOMEM;
-       }
 
        init_job_desc(desc, 0);
 
@@ -702,19 +703,14 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
                                             int sg_num, u32 *sh_desc,
                                             dma_addr_t sh_desc_dma)
 {
-       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-       struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
        struct caam_hash_state *state = ahash_request_ctx_dma(req);
        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
                       GFP_KERNEL : GFP_ATOMIC;
        struct ahash_edesc *edesc;
-       unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
 
-       edesc = kzalloc(sizeof(*edesc) + sg_size, flags);
-       if (!edesc) {
-               dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
+       edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags);
+       if (!edesc)
                return NULL;
-       }
 
        state->edesc = edesc;
 
@@ -1757,7 +1753,7 @@ static struct caam_hash_template driver_hash[] = {
 struct caam_hash_alg {
        struct list_head entry;
        int alg_type;
-       struct ahash_alg ahash_alg;
+       struct ahash_engine_alg ahash_alg;
 };
 
 static int caam_hash_cra_init(struct crypto_tfm *tfm)
@@ -1769,7 +1765,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
        struct ahash_alg *alg =
                 container_of(halg, struct ahash_alg, halg);
        struct caam_hash_alg *caam_hash =
-                container_of(alg, struct caam_hash_alg, ahash_alg);
+                container_of(alg, struct caam_hash_alg, ahash_alg.base);
        struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
        /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
        static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
@@ -1860,8 +1856,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
                                                      sh_desc_digest) -
                                        sh_desc_update_offset;
 
-       ctx->enginectx.op.do_one_request = ahash_do_one_req;
-
        crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
 
        /*
@@ -1894,7 +1888,7 @@ void caam_algapi_hash_exit(void)
                return;
 
        list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
-               crypto_unregister_ahash(&t_alg->ahash_alg);
+               crypto_engine_unregister_ahash(&t_alg->ahash_alg);
                list_del(&t_alg->entry);
                kfree(t_alg);
        }
@@ -1909,13 +1903,11 @@ caam_hash_alloc(struct caam_hash_template *template,
        struct crypto_alg *alg;
 
        t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
-       if (!t_alg) {
-               pr_err("failed to allocate t_alg\n");
+       if (!t_alg)
                return ERR_PTR(-ENOMEM);
-       }
 
-       t_alg->ahash_alg = template->template_ahash;
-       halg = &t_alg->ahash_alg;
+       t_alg->ahash_alg.base = template->template_ahash;
+       halg = &t_alg->ahash_alg.base;
        alg = &halg->halg.base;
 
        if (keyed) {
@@ -1928,7 +1920,7 @@ caam_hash_alloc(struct caam_hash_template *template,
                         template->name);
                snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
                         template->driver_name);
-               t_alg->ahash_alg.setkey = NULL;
+               halg->setkey = NULL;
        }
        alg->cra_module = THIS_MODULE;
        alg->cra_init = caam_hash_cra_init;
@@ -1940,6 +1932,7 @@ caam_hash_alloc(struct caam_hash_template *template,
        alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
 
        t_alg->alg_type = template->alg_type;
+       t_alg->ahash_alg.op.do_one_request = ahash_do_one_req;
 
        return t_alg;
 }
@@ -2001,10 +1994,10 @@ int caam_algapi_hash_init(struct device *ctrldev)
                        continue;
                }
 
-               err = crypto_register_ahash(&t_alg->ahash_alg);
+               err = crypto_engine_register_ahash(&t_alg->ahash_alg);
                if (err) {
                        pr_warn("%s alg registration failed: %d\n",
-                               t_alg->ahash_alg.halg.base.cra_driver_name,
+                               t_alg->ahash_alg.base.halg.base.cra_driver_name,
                                err);
                        kfree(t_alg);
                } else
@@ -2021,10 +2014,10 @@ int caam_algapi_hash_init(struct device *ctrldev)
                        continue;
                }
 
-               err = crypto_register_ahash(&t_alg->ahash_alg);
+               err = crypto_engine_register_ahash(&t_alg->ahash_alg);
                if (err) {
                        pr_warn("%s alg registration failed: %d\n",
-                               t_alg->ahash_alg.halg.base.cra_driver_name,
+                               t_alg->ahash_alg.base.halg.base.cra_driver_name,
                                err);
                        kfree(t_alg);
                } else
index 72afc24..887a5f2 100644 (file)
 #include "desc_constr.h"
 #include "sg_sw_sec4.h"
 #include "caampkc.h"
+#include <crypto/internal/engine.h>
 #include <linux/dma-mapping.h>
+#include <linux/err.h>
 #include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
 
 #define DESC_RSA_PUB_LEN       (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
 #define DESC_RSA_PRIV_F1_LEN   (2 * CAAM_CMD_SZ + \
@@ -38,7 +42,7 @@ static u8 *zero_buffer;
 static bool init_done;
 
 struct caam_akcipher_alg {
-       struct akcipher_alg akcipher;
+       struct akcipher_engine_alg akcipher;
        bool registered;
 };
 
@@ -225,7 +229,9 @@ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
                if (len && *buff)
                        break;
 
-               sg_miter_next(&miter);
+               if (!sg_miter_next(&miter))
+                       break;
+
                buff = miter.addr;
                len = miter.length;
 
@@ -1121,8 +1127,6 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
                return -ENOMEM;
        }
 
-       ctx->enginectx.op.do_one_request = akcipher_do_one_req;
-
        return 0;
 }
 
@@ -1139,7 +1143,7 @@ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
 }
 
 static struct caam_akcipher_alg caam_rsa = {
-       .akcipher = {
+       .akcipher.base = {
                .encrypt = caam_rsa_enc,
                .decrypt = caam_rsa_dec,
                .set_pub_key = caam_rsa_set_pub_key,
@@ -1155,7 +1159,10 @@ static struct caam_akcipher_alg caam_rsa = {
                        .cra_ctxsize = sizeof(struct caam_rsa_ctx) +
                                       CRYPTO_DMA_PADDING,
                },
-       }
+       },
+       .akcipher.op = {
+               .do_one_request = akcipher_do_one_req,
+       },
 };
 
 /* Public Key Cryptography module initialization handler */
@@ -1193,12 +1200,12 @@ int caam_pkc_init(struct device *ctrldev)
        if (!zero_buffer)
                return -ENOMEM;
 
-       err = crypto_register_akcipher(&caam_rsa.akcipher);
+       err = crypto_engine_register_akcipher(&caam_rsa.akcipher);
 
        if (err) {
                kfree(zero_buffer);
                dev_warn(ctrldev, "%s alg registration failed\n",
-                        caam_rsa.akcipher.base.cra_driver_name);
+                        caam_rsa.akcipher.base.base.cra_driver_name);
        } else {
                init_done = true;
                caam_rsa.registered = true;
@@ -1214,7 +1221,7 @@ void caam_pkc_exit(void)
                return;
 
        if (caam_rsa.registered)
-               crypto_unregister_akcipher(&caam_rsa.akcipher);
+               crypto_engine_unregister_akcipher(&caam_rsa.akcipher);
 
        kfree(zero_buffer);
 }
index cc889a5..96d0370 100644 (file)
@@ -12,7 +12,6 @@
 #define _PKC_DESC_H_
 #include "compat.h"
 #include "pdb.h"
-#include <crypto/engine.h>
 
 /**
  * caam_priv_key_form - CAAM RSA private key representation
@@ -88,13 +87,11 @@ struct caam_rsa_key {
 
 /**
  * caam_rsa_ctx - per session context.
- * @enginectx   : crypto engine context
  * @key         : RSA key in DMA zone
  * @dev         : device structure
  * @padding_dma : dma address of padding, for adding it to the input
  */
 struct caam_rsa_ctx {
-       struct crypto_engine_ctx enginectx;
        struct caam_rsa_key key;
        struct device *dev;
        dma_addr_t padding_dma;
index 68e7377..bdf367f 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/device.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/platform_device.h>
 #include <linux/sys_soc.h>
 #include <linux/fsl/mc.h>
 
@@ -740,6 +741,109 @@ static int caam_ctrl_rng_init(struct device *dev)
        return 0;
 }
 
+/* Indicate if the internal state of the CAAM is lost during PM */
+static int caam_off_during_pm(void)
+{
+       bool not_off_during_pm = of_machine_is_compatible("fsl,imx6q") ||
+                                of_machine_is_compatible("fsl,imx6qp") ||
+                                of_machine_is_compatible("fsl,imx6dl");
+
+       return not_off_during_pm ? 0 : 1;
+}
+
+static void caam_state_save(struct device *dev)
+{
+       struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+       struct caam_ctl_state *state = &ctrlpriv->state;
+       struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+       u32 deco_inst, jr_inst;
+       int i;
+
+       state->mcr = rd_reg32(&ctrl->mcr);
+       state->scfgr = rd_reg32(&ctrl->scfgr);
+
+       deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+                    CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT;
+       for (i = 0; i < deco_inst; i++) {
+               state->deco_mid[i].liodn_ms =
+                       rd_reg32(&ctrl->deco_mid[i].liodn_ms);
+               state->deco_mid[i].liodn_ls =
+                       rd_reg32(&ctrl->deco_mid[i].liodn_ls);
+       }
+
+       jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+                  CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT;
+       for (i = 0; i < jr_inst; i++) {
+               state->jr_mid[i].liodn_ms =
+                       rd_reg32(&ctrl->jr_mid[i].liodn_ms);
+               state->jr_mid[i].liodn_ls =
+                       rd_reg32(&ctrl->jr_mid[i].liodn_ls);
+       }
+}
+
+static void caam_state_restore(const struct device *dev)
+{
+       const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+       const struct caam_ctl_state *state = &ctrlpriv->state;
+       struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+       u32 deco_inst, jr_inst;
+       int i;
+
+       wr_reg32(&ctrl->mcr, state->mcr);
+       wr_reg32(&ctrl->scfgr, state->scfgr);
+
+       deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+                    CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT;
+       for (i = 0; i < deco_inst; i++) {
+               wr_reg32(&ctrl->deco_mid[i].liodn_ms,
+                        state->deco_mid[i].liodn_ms);
+               wr_reg32(&ctrl->deco_mid[i].liodn_ls,
+                        state->deco_mid[i].liodn_ls);
+       }
+
+       jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
+                  CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT;
+       for (i = 0; i < jr_inst; i++) {
+               wr_reg32(&ctrl->jr_mid[i].liodn_ms,
+                        state->jr_mid[i].liodn_ms);
+               wr_reg32(&ctrl->jr_mid[i].liodn_ls,
+                        state->jr_mid[i].liodn_ls);
+       }
+
+       if (ctrlpriv->virt_en == 1)
+               clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
+                             JRSTART_JR1_START | JRSTART_JR2_START |
+                             JRSTART_JR3_START);
+}
+
+static int caam_ctrl_suspend(struct device *dev)
+{
+       const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+
+       if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en)
+               caam_state_save(dev);
+
+       return 0;
+}
+
+static int caam_ctrl_resume(struct device *dev)
+{
+       struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
+       int ret = 0;
+
+       if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en) {
+               caam_state_restore(dev);
+
+               /* HW and rng will be reset so deinstantiation can be removed */
+               devm_remove_action(dev, devm_deinstantiate_rng, dev);
+               ret = caam_ctrl_rng_init(dev);
+       }
+
+       return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(caam_ctrl_pm_ops, caam_ctrl_suspend, caam_ctrl_resume);
+
 /* Probe routine for CAAM top (controller) level */
 static int caam_probe(struct platform_device *pdev)
 {
@@ -771,6 +875,8 @@ static int caam_probe(struct platform_device *pdev)
 
        caam_imx = (bool)imx_soc_match;
 
+       ctrlpriv->caam_off_during_pm = caam_imx && caam_off_during_pm();
+
        if (imx_soc_match) {
                /*
                 * Until Layerscape and i.MX OP-TEE get in sync,
@@ -1033,6 +1139,7 @@ static struct platform_driver caam_driver = {
        .driver = {
                .name = "caam",
                .of_match_table = caam_match,
+               .pm = pm_ptr(&caam_ctrl_pm_ops),
        },
        .probe       = caam_probe,
 };
index b4f7bf7..e513201 100644 (file)
@@ -4,7 +4,7 @@
  * Private/internal definitions between modules
  *
  * Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2019 NXP
+ * Copyright 2019, 2023 NXP
  */
 
 #ifndef INTERN_H
@@ -47,6 +47,16 @@ struct caam_jrentry_info {
        u32 desc_size;  /* Stored size for postprocessing, header derived */
 };
 
+struct caam_jr_state {
+       dma_addr_t inpbusaddr;
+       dma_addr_t outbusaddr;
+};
+
+struct caam_jr_dequeue_params {
+       struct device *dev;
+       int enable_itr;
+};
+
 /* Private sub-storage for a single JobR */
 struct caam_drv_private_jr {
        struct list_head        list_node;      /* Job Ring device list */
@@ -54,6 +64,7 @@ struct caam_drv_private_jr {
        int ridx;
        struct caam_job_ring __iomem *rregs;    /* JobR's register space */
        struct tasklet_struct irqtask;
+       struct caam_jr_dequeue_params tasklet_params;
        int irq;                        /* One per queue */
        bool hwrng;
 
@@ -71,6 +82,15 @@ struct caam_drv_private_jr {
        int tail;                       /* entinfo (s/w ring) tail index */
        void *outring;                  /* Base of output ring, DMA-safe */
        struct crypto_engine *engine;
+
+       struct caam_jr_state state;     /* State of the JR during PM */
+};
+
+struct caam_ctl_state {
+       struct masterid deco_mid[16];
+       struct masterid jr_mid[4];
+       u32 mcr;
+       u32 scfgr;
 };
 
 /*
@@ -116,6 +136,9 @@ struct caam_drv_private {
        struct dentry *ctl; /* controller dir */
        struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
 #endif
+
+       int caam_off_during_pm;         /* If the CAAM is reset after suspend */
+       struct caam_ctl_state state;    /* State of the CTL during PM */
 };
 
 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
index 96dea53..b1f1b39 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
+#include <linux/platform_device.h>
 
 #include "compat.h"
 #include "ctrl.h"
@@ -117,6 +118,23 @@ static int caam_jr_flush(struct device *dev)
        return caam_jr_stop_processing(dev, JRCR_RESET);
 }
 
+/* The resume can be used after a park or a flush if CAAM has not been reset */
+static int caam_jr_restart_processing(struct device *dev)
+{
+       struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+       u32 halt_status = rd_reg32(&jrp->rregs->jrintstatus) &
+                         JRINT_ERR_HALT_MASK;
+
+       /* Check that the flush/park is completed */
+       if (halt_status != JRINT_ERR_HALT_COMPLETE)
+               return -1;
+
+       /* Resume processing of jobs */
+       clrsetbits_32(&jrp->rregs->jrintstatus, 0, JRINT_ERR_HALT_COMPLETE);
+
+       return 0;
+}
+
 static int caam_reset_hw_jr(struct device *dev)
 {
        struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
@@ -215,7 +233,7 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
         * tasklet if jobs done.
         */
        irqstate = rd_reg32(&jrp->rregs->jrintstatus);
-       if (!irqstate)
+       if (!(irqstate & JRINT_JR_INT))
                return IRQ_NONE;
 
        /*
@@ -245,7 +263,8 @@ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
 static void caam_jr_dequeue(unsigned long devarg)
 {
        int hw_idx, sw_idx, i, head, tail;
-       struct device *dev = (struct device *)devarg;
+       struct caam_jr_dequeue_params *params = (void *)devarg;
+       struct device *dev = params->dev;
        struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
        void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
        u32 *userdesc, userstatus;
@@ -319,8 +338,9 @@ static void caam_jr_dequeue(unsigned long devarg)
                outring_used--;
        }
 
-       /* reenable / unmask IRQs */
-       clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
+       if (params->enable_itr)
+               /* reenable / unmask IRQs */
+               clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
 }
 
 /**
@@ -445,8 +465,16 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
         * Guarantee that the descriptor's DMA address has been written to
         * the next slot in the ring before the write index is updated, since
         * other cores may update this index independently.
+        *
+        * Under heavy DDR load, smp_wmb() or dma_wmb() fail to make the input
+        * ring be updated before the CAAM starts reading it. So, CAAM will
+        * process, again, an old descriptor address and will put it in the
+        * output ring. This will make caam_jr_dequeue() to fail, since this
+        * old descriptor is not in the software ring.
+        * To fix this, use wmb() which works on the full system instead of
+        * inner/outer shareable domains.
         */
-       smp_wmb();
+       wmb();
 
        jrp->head = (head + 1) & (JOBR_DEPTH - 1);
 
@@ -470,6 +498,29 @@ int caam_jr_enqueue(struct device *dev, u32 *desc,
 }
 EXPORT_SYMBOL(caam_jr_enqueue);
 
+static void caam_jr_init_hw(struct device *dev, dma_addr_t inpbusaddr,
+                           dma_addr_t outbusaddr)
+{
+       struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+
+       wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
+       wr_reg64(&jrp->rregs->outring_base, outbusaddr);
+       wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
+       wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
+
+       /* Select interrupt coalescing parameters */
+       clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
+                     (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
+                     (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
+}
+
+static void caam_jr_reset_index(struct caam_drv_private_jr *jrp)
+{
+       jrp->out_ring_read_index = 0;
+       jrp->head = 0;
+       jrp->tail = 0;
+}
+
 /*
  * Init JobR independent of platform property detection
  */
@@ -506,25 +557,16 @@ static int caam_jr_init(struct device *dev)
                jrp->entinfo[i].desc_addr_dma = !0;
 
        /* Setup rings */
-       jrp->out_ring_read_index = 0;
-       jrp->head = 0;
-       jrp->tail = 0;
-
-       wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
-       wr_reg64(&jrp->rregs->outring_base, outbusaddr);
-       wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
-       wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
-
+       caam_jr_reset_index(jrp);
        jrp->inpring_avail = JOBR_DEPTH;
+       caam_jr_init_hw(dev, inpbusaddr, outbusaddr);
 
        spin_lock_init(&jrp->inplock);
 
-       /* Select interrupt coalescing parameters */
-       clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
-                     (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
-                     (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
-
-       tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
+       jrp->tasklet_params.dev = dev;
+       jrp->tasklet_params.enable_itr = 1;
+       tasklet_init(&jrp->irqtask, caam_jr_dequeue,
+                    (unsigned long)&jrp->tasklet_params);
 
        /* Connect job ring interrupt handler. */
        error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED,
@@ -635,11 +677,134 @@ static int caam_jr_probe(struct platform_device *pdev)
 
        atomic_set(&jrpriv->tfm_count, 0);
 
+       device_init_wakeup(&pdev->dev, 1);
+       device_set_wakeup_enable(&pdev->dev, false);
+
        register_algs(jrpriv, jrdev->parent);
 
        return 0;
 }
 
+static void caam_jr_get_hw_state(struct device *dev)
+{
+       struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+
+       jrp->state.inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+       jrp->state.outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+}
+
+static int caam_jr_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev);
+       struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent);
+       struct caam_jr_dequeue_params suspend_params = {
+               .dev = dev,
+               .enable_itr = 0,
+       };
+
+       /* Remove the node from Physical JobR list maintained by driver */
+       spin_lock(&driver_data.jr_alloc_lock);
+       list_del(&jrpriv->list_node);
+       spin_unlock(&driver_data.jr_alloc_lock);
+
+       if (jrpriv->hwrng)
+               caam_rng_exit(dev->parent);
+
+       if (ctrlpriv->caam_off_during_pm) {
+               int err;
+
+               tasklet_disable(&jrpriv->irqtask);
+
+               /* mask itr to call flush */
+               clrsetbits_32(&jrpriv->rregs->rconfig_lo, 0, JRCFG_IMSK);
+
+               /* Invalid job in process */
+               err = caam_jr_flush(dev);
+               if (err) {
+                       dev_err(dev, "Failed to flush\n");
+                       return err;
+               }
+
+               /* Dequeing jobs flushed */
+               caam_jr_dequeue((unsigned long)&suspend_params);
+
+               /* Save state */
+               caam_jr_get_hw_state(dev);
+       } else if (device_may_wakeup(&pdev->dev)) {
+               enable_irq_wake(jrpriv->irq);
+       }
+
+       return 0;
+}
+
+static int caam_jr_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev);
+       struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent);
+
+       if (ctrlpriv->caam_off_during_pm) {
+               u64 inp_addr;
+               int err;
+
+               /*
+                * Check if the CAAM has been resetted checking the address of
+                * the input ring
+                */
+               inp_addr = rd_reg64(&jrpriv->rregs->inpring_base);
+               if (inp_addr != 0) {
+                       /* JR still has some configuration */
+                       if (inp_addr == jrpriv->state.inpbusaddr) {
+                               /* JR has not been resetted */
+                               err = caam_jr_restart_processing(dev);
+                               if (err) {
+                                       dev_err(dev,
+                                               "Restart processing failed\n");
+                                       return err;
+                               }
+
+                               tasklet_enable(&jrpriv->irqtask);
+
+                               clrsetbits_32(&jrpriv->rregs->rconfig_lo,
+                                             JRCFG_IMSK, 0);
+
+                               goto add_jr;
+                       } else if (ctrlpriv->optee_en) {
+                               /* JR has been used by OPTEE, reset it */
+                               err = caam_reset_hw_jr(dev);
+                               if (err) {
+                                       dev_err(dev, "Failed to reset JR\n");
+                                       return err;
+                               }
+                       } else {
+                               /* No explanation, return error */
+                               return -EIO;
+                       }
+               }
+
+               caam_jr_reset_index(jrpriv);
+               caam_jr_init_hw(dev, jrpriv->state.inpbusaddr,
+                               jrpriv->state.outbusaddr);
+
+               tasklet_enable(&jrpriv->irqtask);
+       } else if (device_may_wakeup(&pdev->dev)) {
+               disable_irq_wake(jrpriv->irq);
+       }
+
+add_jr:
+       spin_lock(&driver_data.jr_alloc_lock);
+       list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
+       spin_unlock(&driver_data.jr_alloc_lock);
+
+       if (jrpriv->hwrng)
+               jrpriv->hwrng = !caam_rng_init(dev->parent);
+
+       return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(caam_jr_pm_ops, caam_jr_suspend, caam_jr_resume);
+
 static const struct of_device_id caam_jr_match[] = {
        {
                .compatible = "fsl,sec-v4.0-job-ring",
@@ -655,6 +820,7 @@ static struct platform_driver caam_jr_driver = {
        .driver = {
                .name = "caam_jr",
                .of_match_table = caam_jr_match,
+               .pm = pm_ptr(&caam_jr_pm_ops),
        },
        .probe       = caam_jr_probe,
        .remove      = caam_jr_remove,
index 2ad2c10..46a0838 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/kernel.h>
 #include <linux/kthread.h>
 #include <linux/netdevice.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <soc/fsl/qman.h>
index 189e74c..873df9d 100644 (file)
@@ -459,12 +459,6 @@ struct masterid {
        u32 liodn_ls;   /* LIODN for non-sequence and seq access */
 };
 
-/* Partition ID for DMA configuration */
-struct partid {
-       u32 rsvd1;
-       u32 pidr;       /* partition ID, DECO */
-};
-
 /* RNGB test mode (replicated twice in some configurations) */
 /* Padded out to 0x100 */
 struct rngtst {
@@ -590,8 +584,7 @@ struct caam_ctrl {
        u32 deco_rsr;                   /* DECORSR - Deco Request Source */
        u32 rsvd11;
        u32 deco_rq;                    /* DECORR - DECO Request */
-       struct partid deco_mid[5];      /* DECOxLIODNR - 1 per DECO */
-       u32 rsvd5[22];
+       struct masterid deco_mid[16];   /* DECOxLIODNR - 1 per DECO */
 
        /* DECO Availability/Reset Section                      120-3ff */
        u32 deco_avail;         /* DAR - DECO availability */
index f619649..aa0ba2d 100644 (file)
@@ -11,7 +11,8 @@ ccp-$(CONFIG_PCI) += sp-pci.o
 ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
                                    sev-dev.o \
                                    tee-dev.o \
-                                   platform-access.o
+                                   platform-access.o \
+                                   dbc.o
 
 obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
 ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c
new file mode 100644 (file)
index 0000000..839ea14
--- /dev/null
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Processor Dynamic Boost Control interface
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include "dbc.h"
+
+struct error_map {
+       u32 psp;
+       int ret;
+};
+
+#define DBC_ERROR_ACCESS_DENIED                0x0001
+#define DBC_ERROR_EXCESS_DATA          0x0004
+#define DBC_ERROR_BAD_PARAMETERS       0x0006
+#define DBC_ERROR_BAD_STATE            0x0007
+#define DBC_ERROR_NOT_IMPLEMENTED      0x0009
+#define DBC_ERROR_BUSY                 0x000D
+#define DBC_ERROR_MESSAGE_FAILURE      0x0307
+#define DBC_ERROR_OVERFLOW             0x300F
+#define DBC_ERROR_SIGNATURE_INVALID    0x3072
+
+static struct error_map error_codes[] = {
+       {DBC_ERROR_ACCESS_DENIED,       -EACCES},
+       {DBC_ERROR_EXCESS_DATA,         -E2BIG},
+       {DBC_ERROR_BAD_PARAMETERS,      -EINVAL},
+       {DBC_ERROR_BAD_STATE,           -EAGAIN},
+       {DBC_ERROR_MESSAGE_FAILURE,     -ENOENT},
+       {DBC_ERROR_NOT_IMPLEMENTED,     -ENOENT},
+       {DBC_ERROR_BUSY,                -EBUSY},
+       {DBC_ERROR_OVERFLOW,            -ENFILE},
+       {DBC_ERROR_SIGNATURE_INVALID,   -EPERM},
+       {0x0,   0x0},
+};
+
+static int send_dbc_cmd(struct psp_dbc_device *dbc_dev,
+                       enum psp_platform_access_msg msg)
+{
+       int ret;
+
+       dbc_dev->mbox->req.header.status = 0;
+       ret = psp_send_platform_access_msg(msg, (struct psp_request *)dbc_dev->mbox);
+       if (ret == -EIO) {
+               int i;
+
+               dev_dbg(dbc_dev->dev,
+                        "msg 0x%x failed with PSP error: 0x%x\n",
+                        msg, dbc_dev->mbox->req.header.status);
+
+               for (i = 0; error_codes[i].psp; i++) {
+                       if (dbc_dev->mbox->req.header.status == error_codes[i].psp)
+                               return error_codes[i].ret;
+               }
+       }
+
+       return ret;
+}
+
+static int send_dbc_nonce(struct psp_dbc_device *dbc_dev)
+{
+       int ret;
+
+       dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_nonce);
+       ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE);
+       if (ret == -EAGAIN) {
+               dev_dbg(dbc_dev->dev, "retrying get nonce\n");
+               ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE);
+       }
+
+       return ret;
+}
+
+static int send_dbc_parameter(struct psp_dbc_device *dbc_dev)
+{
+       dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_param);
+
+       switch (dbc_dev->mbox->dbc_param.user.msg_index) {
+       case PARAM_SET_FMAX_CAP:
+       case PARAM_SET_PWR_CAP:
+       case PARAM_SET_GFX_MODE:
+               return send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_PARAMETER);
+       case PARAM_GET_FMAX_CAP:
+       case PARAM_GET_PWR_CAP:
+       case PARAM_GET_CURR_TEMP:
+       case PARAM_GET_FMAX_MAX:
+       case PARAM_GET_FMAX_MIN:
+       case PARAM_GET_SOC_PWR_MAX:
+       case PARAM_GET_SOC_PWR_MIN:
+       case PARAM_GET_SOC_PWR_CUR:
+       case PARAM_GET_GFX_MODE:
+               return send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_PARAMETER);
+       }
+
+       return -EINVAL;
+}
+
+void dbc_dev_destroy(struct psp_device *psp)
+{
+       struct psp_dbc_device *dbc_dev = psp->dbc_data;
+
+       if (!dbc_dev)
+               return;
+
+       misc_deregister(&dbc_dev->char_dev);
+       mutex_destroy(&dbc_dev->ioctl_mutex);
+       psp->dbc_data = NULL;
+}
+
+static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       struct psp_device *psp_master = psp_get_master_device();
+       void __user *argp = (void __user *)arg;
+       struct psp_dbc_device *dbc_dev;
+       int ret;
+
+       if (!psp_master || !psp_master->dbc_data)
+               return -ENODEV;
+       dbc_dev = psp_master->dbc_data;
+
+       mutex_lock(&dbc_dev->ioctl_mutex);
+
+       switch (cmd) {
+       case DBCIOCNONCE:
+               if (copy_from_user(&dbc_dev->mbox->dbc_nonce.user, argp,
+                                  sizeof(struct dbc_user_nonce))) {
+                       ret = -EFAULT;
+                       goto unlock;
+               }
+
+               ret = send_dbc_nonce(dbc_dev);
+               if (ret)
+                       goto unlock;
+
+               if (copy_to_user(argp, &dbc_dev->mbox->dbc_nonce.user,
+                                sizeof(struct dbc_user_nonce))) {
+                       ret = -EFAULT;
+                       goto unlock;
+               }
+               break;
+       case DBCIOCUID:
+               dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_set_uid);
+               if (copy_from_user(&dbc_dev->mbox->dbc_set_uid.user, argp,
+                                  sizeof(struct dbc_user_setuid))) {
+                       ret = -EFAULT;
+                       goto unlock;
+               }
+
+               ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID);
+               if (ret)
+                       goto unlock;
+
+               if (copy_to_user(argp, &dbc_dev->mbox->dbc_set_uid.user,
+                                sizeof(struct dbc_user_setuid))) {
+                       ret = -EFAULT;
+                       goto unlock;
+               }
+               break;
+       case DBCIOCPARAM:
+               if (copy_from_user(&dbc_dev->mbox->dbc_param.user, argp,
+                                  sizeof(struct dbc_user_param))) {
+                       ret = -EFAULT;
+                       goto unlock;
+               }
+
+               ret = send_dbc_parameter(dbc_dev);
+               if (ret)
+                       goto unlock;
+
+               if (copy_to_user(argp, &dbc_dev->mbox->dbc_param.user,
+                                sizeof(struct dbc_user_param)))  {
+                       ret = -EFAULT;
+                       goto unlock;
+               }
+               break;
+       default:
+               ret = -EINVAL;
+
+       }
+unlock:
+       mutex_unlock(&dbc_dev->ioctl_mutex);
+
+       return ret;
+}
+
+static const struct file_operations dbc_fops = {
+       .owner  = THIS_MODULE,
+       .unlocked_ioctl = dbc_ioctl,
+};
+
+int dbc_dev_init(struct psp_device *psp)
+{
+       struct device *dev = psp->dev;
+       struct psp_dbc_device *dbc_dev;
+       int ret;
+
+       if (!PSP_FEATURE(psp, DBC))
+               return 0;
+
+       dbc_dev = devm_kzalloc(dev, sizeof(*dbc_dev), GFP_KERNEL);
+       if (!dbc_dev)
+               return -ENOMEM;
+
+       BUILD_BUG_ON(sizeof(union dbc_buffer) > PAGE_SIZE);
+       dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0);
+       if (!dbc_dev->mbox) {
+               ret = -ENOMEM;
+               goto cleanup_dev;
+       }
+
+       psp->dbc_data = dbc_dev;
+       dbc_dev->dev = dev;
+
+       ret = send_dbc_nonce(dbc_dev);
+       if (ret == -EACCES) {
+               dev_dbg(dbc_dev->dev,
+                       "dynamic boost control was previously authenticated\n");
+               ret = 0;
+       }
+       dev_dbg(dbc_dev->dev, "dynamic boost control is %savailable\n",
+               ret ? "un" : "");
+       if (ret) {
+               ret = 0;
+               goto cleanup_mbox;
+       }
+
+       dbc_dev->char_dev.minor = MISC_DYNAMIC_MINOR;
+       dbc_dev->char_dev.name = "dbc";
+       dbc_dev->char_dev.fops = &dbc_fops;
+       dbc_dev->char_dev.mode = 0600;
+       ret = misc_register(&dbc_dev->char_dev);
+       if (ret)
+               goto cleanup_mbox;
+
+       mutex_init(&dbc_dev->ioctl_mutex);
+
+       return 0;
+
+cleanup_mbox:
+       devm_free_pages(dev, (unsigned long)dbc_dev->mbox);
+
+cleanup_dev:
+       psp->dbc_data = NULL;
+       devm_kfree(dev, dbc_dev);
+
+       return ret;
+}
diff --git a/drivers/crypto/ccp/dbc.h b/drivers/crypto/ccp/dbc.h
new file mode 100644 (file)
index 0000000..e963099
--- /dev/null
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) Dynamic Boost Control support
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#ifndef __DBC_H__
+#define __DBC_H__
+
+#include <uapi/linux/psp-dbc.h>
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/psp-platform-access.h>
+
+#include "psp-dev.h"
+
+struct psp_dbc_device {
+       struct device *dev;
+
+       union dbc_buffer *mbox;
+
+       struct mutex ioctl_mutex;
+
+       struct miscdevice char_dev;
+};
+
+struct dbc_nonce {
+       struct psp_req_buffer_hdr       header;
+       struct dbc_user_nonce           user;
+} __packed;
+
+struct dbc_set_uid {
+       struct psp_req_buffer_hdr       header;
+       struct dbc_user_setuid          user;
+} __packed;
+
+struct dbc_param {
+       struct psp_req_buffer_hdr       header;
+       struct dbc_user_param           user;
+} __packed;
+
+union dbc_buffer {
+       struct psp_request              req;
+       struct dbc_nonce                dbc_nonce;
+       struct dbc_set_uid              dbc_set_uid;
+       struct dbc_param                dbc_param;
+};
+
+void dbc_dev_destroy(struct psp_device *psp);
+int dbc_dev_init(struct psp_device *psp);
+
+#endif /* __DBC_H */
index e3d6955..d42d7bc 100644 (file)
@@ -15,6 +15,7 @@
 #include "sev-dev.h"
 #include "tee-dev.h"
 #include "platform-access.h"
+#include "dbc.h"
 
 struct psp_device *psp_master;
 
@@ -112,6 +113,12 @@ static void psp_init_platform_access(struct psp_device *psp)
                dev_warn(psp->dev, "platform access init failed: %d\n", ret);
                return;
        }
+
+       /* dbc must come after platform access as it tests the feature */
+       ret = dbc_dev_init(psp);
+       if (ret)
+               dev_warn(psp->dev, "failed to init dynamic boost control: %d\n",
+                        ret);
 }
 
 static int psp_init(struct psp_device *psp)
@@ -173,13 +180,14 @@ int psp_dev_init(struct sp_device *sp)
                goto e_err;
        }
 
+       /* master device must be set for platform access */
+       if (psp->sp->set_psp_master_device)
+               psp->sp->set_psp_master_device(psp->sp);
+
        ret = psp_init(psp);
        if (ret)
                goto e_irq;
 
-       if (sp->set_psp_master_device)
-               sp->set_psp_master_device(sp);
-
        /* Enable interrupt */
        iowrite32(-1, psp->io_regs + psp->vdata->inten_reg);
 
@@ -188,6 +196,9 @@ int psp_dev_init(struct sp_device *sp)
        return 0;
 
 e_irq:
+       if (sp->clear_psp_master_device)
+               sp->clear_psp_master_device(sp);
+
        sp_free_psp_irq(psp->sp, psp);
 e_err:
        sp->psp_data = NULL;
@@ -213,6 +224,8 @@ void psp_dev_destroy(struct sp_device *sp)
 
        tee_dev_destroy(psp);
 
+       dbc_dev_destroy(psp);
+
        platform_access_dev_destroy(psp);
 
        sp_free_psp_irq(sp, psp);
index 505e4bd..8a4de69 100644 (file)
@@ -40,6 +40,7 @@ struct psp_device {
        void *sev_data;
        void *tee_data;
        void *platform_access_data;
+       void *dbc_data;
 
        unsigned int capability;
 };
index 1253a02..2329ad5 100644 (file)
 #define CACHE_NONE                     0x00
 #define CACHE_WB_NO_ALLOC              0xb7
 
+#define PLATFORM_FEATURE_DBC           0x1
+
+#define PSP_FEATURE(psp, feat) (psp->vdata && psp->vdata->platform_features & PLATFORM_FEATURE_##feat)
+
 /* Structure to hold CCP device data */
 struct ccp_device;
 struct ccp_vdata {
@@ -51,6 +55,7 @@ struct tee_vdata {
        const unsigned int cmdbuff_addr_hi_reg;
        const unsigned int ring_wptr_reg;
        const unsigned int ring_rptr_reg;
+       const unsigned int info_reg;
 };
 
 struct platform_access_vdata {
@@ -69,6 +74,8 @@ struct psp_vdata {
        const unsigned int feature_reg;
        const unsigned int inten_reg;
        const unsigned int intsts_reg;
+       const unsigned int bootloader_info_reg;
+       const unsigned int platform_features;
 };
 
 /* Structure to hold SP device data */
index b603ad9..b6ab56a 100644 (file)
@@ -8,6 +8,7 @@
  * Author: Gary R Hook <gary.hook@amd.com>
  */
 
+#include <linux/bitfield.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
 #include "ccp-dev.h"
 #include "psp-dev.h"
 
+/* used for version string AA.BB.CC.DD */
+#define AA                             GENMASK(31, 24)
+#define BB                             GENMASK(23, 16)
+#define CC                             GENMASK(15, 8)
+#define DD                             GENMASK(7, 0)
+
 #define MSIX_VECTORS                   2
 
 struct sp_pci {
@@ -32,7 +39,7 @@ struct sp_pci {
 };
 static struct sp_device *sp_dev_master;
 
-#define attribute_show(name, def)                                              \
+#define security_attribute_show(name, def)                                     \
 static ssize_t name##_show(struct device *d, struct device_attribute *attr,    \
                           char *buf)                                           \
 {                                                                              \
@@ -42,24 +49,24 @@ static ssize_t name##_show(struct device *d, struct device_attribute *attr, \
        return sysfs_emit(buf, "%d\n", (psp->capability & bit) > 0);            \
 }
 
-attribute_show(fused_part, FUSED_PART)
+security_attribute_show(fused_part, FUSED_PART)
 static DEVICE_ATTR_RO(fused_part);
-attribute_show(debug_lock_on, DEBUG_LOCK_ON)
+security_attribute_show(debug_lock_on, DEBUG_LOCK_ON)
 static DEVICE_ATTR_RO(debug_lock_on);
-attribute_show(tsme_status, TSME_STATUS)
+security_attribute_show(tsme_status, TSME_STATUS)
 static DEVICE_ATTR_RO(tsme_status);
-attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS)
+security_attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS)
 static DEVICE_ATTR_RO(anti_rollback_status);
-attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED)
+security_attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED)
 static DEVICE_ATTR_RO(rpmc_production_enabled);
-attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE)
+security_attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE)
 static DEVICE_ATTR_RO(rpmc_spirom_available);
-attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE)
+security_attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE)
 static DEVICE_ATTR_RO(hsp_tpm_available);
-attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED)
+security_attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED)
 static DEVICE_ATTR_RO(rom_armor_enforced);
 
-static struct attribute *psp_attrs[] = {
+static struct attribute *psp_security_attrs[] = {
        &dev_attr_fused_part.attr,
        &dev_attr_debug_lock_on.attr,
        &dev_attr_tsme_status.attr,
@@ -83,13 +90,70 @@ static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *a
        return 0;
 }
 
-static struct attribute_group psp_attr_group = {
-       .attrs = psp_attrs,
+static struct attribute_group psp_security_attr_group = {
+       .attrs = psp_security_attrs,
        .is_visible = psp_security_is_visible,
 };
 
+#define version_attribute_show(name, _offset)                                  \
+static ssize_t name##_show(struct device *d, struct device_attribute *attr,    \
+                          char *buf)                                           \
+{                                                                              \
+       struct sp_device *sp = dev_get_drvdata(d);                              \
+       struct psp_device *psp = sp->psp_data;                                  \
+       unsigned int val = ioread32(psp->io_regs + _offset);                    \
+       return sysfs_emit(buf, "%02lx.%02lx.%02lx.%02lx\n",                     \
+                         FIELD_GET(AA, val),                   \
+                         FIELD_GET(BB, val),                   \
+                         FIELD_GET(CC, val),                   \
+                         FIELD_GET(DD, val));                  \
+}
+
+version_attribute_show(bootloader_version, psp->vdata->bootloader_info_reg)
+static DEVICE_ATTR_RO(bootloader_version);
+version_attribute_show(tee_version, psp->vdata->tee->info_reg)
+static DEVICE_ATTR_RO(tee_version);
+
+static struct attribute *psp_firmware_attrs[] = {
+       &dev_attr_bootloader_version.attr,
+       &dev_attr_tee_version.attr,
+       NULL,
+};
+
+static umode_t psp_firmware_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct sp_device *sp = dev_get_drvdata(dev);
+       struct psp_device *psp = sp->psp_data;
+       unsigned int val = 0xffffffff;
+
+       if (!psp)
+               return 0;
+
+       if (attr == &dev_attr_bootloader_version.attr &&
+           psp->vdata->bootloader_info_reg)
+               val = ioread32(psp->io_regs + psp->vdata->bootloader_info_reg);
+
+       if (attr == &dev_attr_tee_version.attr &&
+           psp->capability & PSP_CAPABILITY_TEE &&
+           psp->vdata->tee->info_reg)
+               val = ioread32(psp->io_regs + psp->vdata->tee->info_reg);
+
+       /* If platform disallows accessing this register it will be all f's */
+       if (val != 0xffffffff)
+               return 0444;
+
+       return 0;
+}
+
+static struct attribute_group psp_firmware_attr_group = {
+       .attrs = psp_firmware_attrs,
+       .is_visible = psp_firmware_is_visible,
+};
+
 static const struct attribute_group *psp_groups[] = {
-       &psp_attr_group,
+       &psp_security_attr_group,
+       &psp_firmware_attr_group,
        NULL,
 };
 
@@ -359,6 +423,7 @@ static const struct tee_vdata teev1 = {
        .cmdbuff_addr_hi_reg    = 0x1054c,      /* C2PMSG_19 */
        .ring_wptr_reg          = 0x10550,      /* C2PMSG_20 */
        .ring_rptr_reg          = 0x10554,      /* C2PMSG_21 */
+       .info_reg               = 0x109e8,      /* C2PMSG_58 */
 };
 
 static const struct tee_vdata teev2 = {
@@ -384,6 +449,7 @@ static const struct platform_access_vdata pa_v2 = {
 
 static const struct psp_vdata pspv1 = {
        .sev                    = &sevv1,
+       .bootloader_info_reg    = 0x105ec,      /* C2PMSG_59 */
        .feature_reg            = 0x105fc,      /* C2PMSG_63 */
        .inten_reg              = 0x10610,      /* P2CMSG_INTEN */
        .intsts_reg             = 0x10614,      /* P2CMSG_INTSTS */
@@ -391,6 +457,7 @@ static const struct psp_vdata pspv1 = {
 
 static const struct psp_vdata pspv2 = {
        .sev                    = &sevv2,
+       .bootloader_info_reg    = 0x109ec,      /* C2PMSG_59 */
        .feature_reg            = 0x109fc,      /* C2PMSG_63 */
        .inten_reg              = 0x10690,      /* P2CMSG_INTEN */
        .intsts_reg             = 0x10694,      /* P2CMSG_INTSTS */
@@ -399,14 +466,17 @@ static const struct psp_vdata pspv2 = {
 static const struct psp_vdata pspv3 = {
        .tee                    = &teev1,
        .platform_access        = &pa_v1,
+       .bootloader_info_reg    = 0x109ec,      /* C2PMSG_59 */
        .feature_reg            = 0x109fc,      /* C2PMSG_63 */
        .inten_reg              = 0x10690,      /* P2CMSG_INTEN */
        .intsts_reg             = 0x10694,      /* P2CMSG_INTSTS */
+       .platform_features      = PLATFORM_FEATURE_DBC,
 };
 
 static const struct psp_vdata pspv4 = {
        .sev                    = &sevv2,
        .tee                    = &teev1,
+       .bootloader_info_reg    = 0x109ec,      /* C2PMSG_59 */
        .feature_reg            = 0x109fc,      /* C2PMSG_63 */
        .inten_reg              = 0x10690,      /* P2CMSG_INTEN */
        .intsts_reg             = 0x10694,      /* P2CMSG_INTSTS */
index c57f929..0f06940 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/of.h>
 #include <linux/clk.h>
 #include <linux/of_address.h>
-#include <linux/of_device.h>
 #include <linux/pm_runtime.h>
 
 #include "cc_driver.h"
index 0eade4f..16298ae 100644 (file)
@@ -2216,7 +2216,8 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
                memcpy(hmacctx->ipad, key, keylen);
        }
        memset(hmacctx->ipad + keylen, 0, bs - keylen);
-       memcpy(hmacctx->opad, hmacctx->ipad, bs);
+       unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs,
+                     "fortified memcpy causes -Wrestrict warning");
 
        for (i = 0; i < bs / sizeof(int); i++) {
                *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
index f7c8bb9..5e9d568 100644 (file)
@@ -133,7 +133,6 @@ int start_crypto(void);
 int stop_crypto(void);
 int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
                        const struct pkt_gl *pgl);
-int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
                     int err);
 #endif /* __CHCR_CORE_H__ */
index 7f88ddb..1d693b8 100644 (file)
@@ -344,7 +344,6 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
                             struct cpl_rx_phys_dsgl *phys_cpl,
                             struct  cipher_wr_param *wrparam,
                             unsigned short qid);
-int sg_nents_len_skip(struct scatterlist *sg, u64 len, u64 skip);
 void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx,
                           struct hash_wr_param *param);
 int chcr_hash_dma_map(struct device *dev, struct ahash_request *req);
index cbd8ca6..5d60a4b 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 
 #include <crypto/internal/rng.h>
@@ -277,7 +277,7 @@ static int exynos_rng_probe(struct platform_device *pdev)
        if (!rng)
                return -ENOMEM;
 
-       rng->type = (enum exynos_prng_type)of_device_get_match_data(&pdev->dev);
+       rng->type = (uintptr_t)of_device_get_match_data(&pdev->dev);
 
        mutex_init(&rng->lock);
 
index 14d0d83..49dce9e 100644 (file)
@@ -8,13 +8,17 @@
  * ECB mode.
  */
 
-#include <linux/crypto.h>
+#include <crypto/engine.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
+#include <linux/err.h>
 #include <linux/io.h>
+#include <linux/kernel.h>
 #include <linux/pm_runtime.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/skcipher.h>
+#include <linux/slab.h>
+#include <linux/string.h>
 #include "sl3516-ce.h"
 
 /* sl3516_ce_need_fallback - check if a request can be handled by the CE */
@@ -105,7 +109,7 @@ static int sl3516_ce_cipher_fallback(struct skcipher_request *areq)
        struct sl3516_ce_alg_template *algt;
        int err;
 
-       algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+       algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
        algt->stat_fb++;
 
        skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
@@ -136,7 +140,7 @@ static int sl3516_ce_cipher(struct skcipher_request *areq)
        int err = 0;
        int i;
 
-       algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+       algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
 
        dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
                crypto_tfm_alg_name(areq->base.tfm),
@@ -258,7 +262,7 @@ theend:
        return err;
 }
 
-static int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
+int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq)
 {
        int err;
        struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
@@ -318,7 +322,7 @@ int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
 
        memset(op, 0, sizeof(struct sl3516_ce_cipher_tfm_ctx));
 
-       algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher);
+       algt = container_of(alg, struct sl3516_ce_alg_template, alg.skcipher.base);
        op->ce = algt->ce;
 
        op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
@@ -335,10 +339,6 @@ int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
                 crypto_tfm_alg_driver_name(&sktfm->base),
                 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
 
-       op->enginectx.op.do_one_request = sl3516_ce_handle_cipher_request;
-       op->enginectx.op.prepare_request = NULL;
-       op->enginectx.op.unprepare_request = NULL;
-
        err = pm_runtime_get_sync(op->ce->dev);
        if (err < 0)
                goto error_pm;
index b7524b6..0f43c6e 100644 (file)
@@ -6,22 +6,24 @@
  *
  * Core file which registers crypto algorithms supported by the CryptoEngine
  */
+
+#include <crypto/engine.h>
+#include <crypto/internal/rng.h>
+#include <crypto/internal/skcipher.h>
 #include <linux/clk.h>
-#include <linux/crypto.h>
 #include <linux/debugfs.h>
 #include <linux/dev_printk.h>
 #include <linux/dma-mapping.h>
+#include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
-#include <crypto/internal/rng.h>
-#include <crypto/internal/skcipher.h>
 
 #include "sl3516-ce.h"
 
@@ -217,7 +219,7 @@ static struct sl3516_ce_alg_template ce_algs[] = {
 {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
        .mode = ECB_AES,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base = {
                        .cra_name = "ecb(aes)",
                        .cra_driver_name = "ecb-aes-sl3516",
@@ -236,11 +238,13 @@ static struct sl3516_ce_alg_template ce_algs[] = {
                .setkey         = sl3516_ce_aes_setkey,
                .encrypt        = sl3516_ce_skencrypt,
                .decrypt        = sl3516_ce_skdecrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = sl3516_ce_handle_cipher_request,
+       },
 },
 };
 
-#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
 static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
 {
        struct sl3516_ce_dev *ce = seq->private;
@@ -264,8 +268,8 @@ static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
                switch (ce_algs[i].type) {
                case CRYPTO_ALG_TYPE_SKCIPHER:
                        seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
-                                  ce_algs[i].alg.skcipher.base.cra_driver_name,
-                                  ce_algs[i].alg.skcipher.base.cra_name,
+                                  ce_algs[i].alg.skcipher.base.base.cra_driver_name,
+                                  ce_algs[i].alg.skcipher.base.base.cra_name,
                                   ce_algs[i].stat_req, ce_algs[i].stat_fb);
                        break;
                }
@@ -274,7 +278,6 @@ static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
 }
 
 DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs);
-#endif
 
 static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
 {
@@ -286,11 +289,11 @@ static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
                switch (ce_algs[i].type) {
                case CRYPTO_ALG_TYPE_SKCIPHER:
                        dev_info(ce->dev, "DEBUG: Register %s\n",
-                                ce_algs[i].alg.skcipher.base.cra_name);
-                       err = crypto_register_skcipher(&ce_algs[i].alg.skcipher);
+                                ce_algs[i].alg.skcipher.base.base.cra_name);
+                       err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher);
                        if (err) {
                                dev_err(ce->dev, "Fail to register %s\n",
-                                       ce_algs[i].alg.skcipher.base.cra_name);
+                                       ce_algs[i].alg.skcipher.base.base.cra_name);
                                ce_algs[i].ce = NULL;
                                return err;
                        }
@@ -313,8 +316,8 @@ static void sl3516_ce_unregister_algs(struct sl3516_ce_dev *ce)
                switch (ce_algs[i].type) {
                case CRYPTO_ALG_TYPE_SKCIPHER:
                        dev_info(ce->dev, "Unregister %d %s\n", i,
-                                ce_algs[i].alg.skcipher.base.cra_name);
-                       crypto_unregister_skcipher(&ce_algs[i].alg.skcipher);
+                                ce_algs[i].alg.skcipher.base.base.cra_name);
+                       crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher);
                        break;
                }
        }
@@ -473,13 +476,20 @@ static int sl3516_ce_probe(struct platform_device *pdev)
 
        pm_runtime_put_sync(ce->dev);
 
+       if (IS_ENABLED(CONFIG_CRYPTO_DEV_SL3516_DEBUG)) {
+               struct dentry *dbgfs_dir __maybe_unused;
+               struct dentry *dbgfs_stats __maybe_unused;
+
+               /* Ignore error of debugfs */
+               dbgfs_dir = debugfs_create_dir("sl3516", NULL);
+               dbgfs_stats = debugfs_create_file("stats", 0444,
+                                                 dbgfs_dir, ce,
+                                                 &sl3516_ce_debugfs_fops);
 #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
-       /* Ignore error of debugfs */
-       ce->dbgfs_dir = debugfs_create_dir("sl3516", NULL);
-       ce->dbgfs_stats = debugfs_create_file("stats", 0444,
-                                             ce->dbgfs_dir, ce,
-                                             &sl3516_ce_debugfs_fops);
+               ce->dbgfs_dir = dbgfs_dir;
+               ce->dbgfs_stats = dbgfs_stats;
 #endif
+       }
 
        return 0;
 error_pmuse:
index 4c0ec6c..9e1a7e7 100644 (file)
@@ -17,7 +17,6 @@
 #include <crypto/engine.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/skcipher.h>
-#include <linux/crypto.h>
 #include <linux/debugfs.h>
 #include <linux/hw_random.h>
 
@@ -292,16 +291,12 @@ struct sl3516_ce_cipher_req_ctx {
 
 /*
  * struct sl3516_ce_cipher_tfm_ctx - context for a skcipher TFM
- * @enginectx:         crypto_engine used by this TFM
  * @key:               pointer to key data
  * @keylen:            len of the key
  * @ce:                        pointer to the private data of driver handling this TFM
  * @fallback_tfm:      pointer to the fallback TFM
- *
- * enginectx must be the first element
  */
 struct sl3516_ce_cipher_tfm_ctx {
-       struct crypto_engine_ctx enginectx;
        u32 *key;
        u32 keylen;
        struct sl3516_ce_dev *ce;
@@ -324,7 +319,7 @@ struct sl3516_ce_alg_template {
        u32 mode;
        struct sl3516_ce_dev *ce;
        union {
-               struct skcipher_alg skcipher;
+               struct skcipher_engine_alg skcipher;
        } alg;
        unsigned long stat_req;
        unsigned long stat_fb;
@@ -345,3 +340,4 @@ int sl3516_ce_run_task(struct sl3516_ce_dev *ce,
 
 int sl3516_ce_rng_register(struct sl3516_ce_dev *ce);
 void sl3516_ce_rng_unregister(struct sl3516_ce_dev *ce);
+int sl3516_ce_handle_cipher_request(struct crypto_engine *engine, void *areq);
index 8ede773..9a1c61b 100644 (file)
@@ -1392,9 +1392,9 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
                                unsigned int len)
 {
        struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
+       unsigned int sz, sz_shift, curve_sz;
        struct device *dev = ctx->dev;
        char key[HPRE_ECC_MAX_KSZ];
-       unsigned int sz, sz_shift;
        struct ecdh params;
        int ret;
 
@@ -1406,7 +1406,13 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
        /* Use stdrng to generate private key */
        if (!params.key || !params.key_size) {
                params.key = key;
-               params.key_size = hpre_ecdh_get_curvesz(ctx->curve_id);
+               curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
+               if (!curve_sz) {
+                       dev_err(dev, "Invalid curve size!\n");
+                       return -EINVAL;
+               }
+
+               params.key_size = curve_sz - 1;
                ret = ecdh_gen_privkey(ctx, &params);
                if (ret)
                        return ret;
index 5d0adfb..39297ce 100644 (file)
@@ -209,7 +209,7 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = {
        {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
        {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
        {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
-       {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xFFFFFE},
+       {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE},
        {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE},
        {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE},
        {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
@@ -276,6 +276,9 @@ static const struct hpre_hw_error hpre_hw_errors[] = {
                .int_msk = BIT(23),
                .msg = "sva_fsm_timeout_int_set"
        }, {
+               .int_msk = BIT(24),
+               .msg = "sva_int_set"
+       }, {
                /* sentinel */
        }
 };
index edc6fd4..a99fd58 100644 (file)
@@ -88,6 +88,8 @@
 #define QM_DB_PRIORITY_SHIFT_V1                48
 #define QM_PAGE_SIZE                   0x0034
 #define QM_QP_DB_INTERVAL              0x10000
+#define QM_DB_TIMEOUT_CFG              0x100074
+#define QM_DB_TIMEOUT_SET              0x1fffff
 
 #define QM_MEM_START_INIT              0x100040
 #define QM_MEM_INIT_DONE               0x100044
@@ -954,6 +956,11 @@ static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
        if (!val)
                return IRQ_NONE;
 
+       if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) {
+               dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n");
+               return IRQ_HANDLED;
+       }
+
        schedule_work(&qm->cmd_process);
 
        return IRQ_HANDLED;
@@ -997,7 +1004,7 @@ static void qm_reset_function(struct hisi_qm *qm)
                return;
        }
 
-       ret = hisi_qm_stop(qm, QM_FLR);
+       ret = hisi_qm_stop(qm, QM_DOWN);
        if (ret) {
                dev_err(dev, "failed to stop qm when reset function\n");
                goto clear_bit;
@@ -2743,6 +2750,9 @@ void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
               test_bit(QM_RESETTING, &qm->misc_ctl))
                msleep(WAIT_PERIOD);
 
+       if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
+               flush_work(&qm->cmd_process);
+
        udelay(REMOVE_WAIT_DELAY);
 }
 EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
@@ -3243,7 +3253,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
        }
 
        if (qm->status.stop_reason == QM_SOFT_RESET ||
-           qm->status.stop_reason == QM_FLR) {
+           qm->status.stop_reason == QM_DOWN) {
                hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
                ret = qm_stop_started_qp(qm);
                if (ret < 0) {
@@ -4539,11 +4549,11 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
        if (qm->fun_type == QM_HW_PF)
                qm_cmd_uninit(qm);
 
-       ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR);
+       ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN);
        if (ret)
                pci_err(pdev, "failed to stop vfs by pf in FLR.\n");
 
-       ret = hisi_qm_stop(qm, QM_FLR);
+       ret = hisi_qm_stop(qm, QM_DOWN);
        if (ret) {
                pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
                hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
@@ -4641,9 +4651,11 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev)
        struct hisi_qm *qm = pci_get_drvdata(pdev);
        int ret;
 
-       ret = hisi_qm_stop(qm, QM_NORMAL);
+       ret = hisi_qm_stop(qm, QM_DOWN);
        if (ret)
                dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
+
+       hisi_qm_cache_wb(qm);
 }
 EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
 
@@ -4807,7 +4819,7 @@ static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
        cmd = msg & QM_MB_CMD_DATA_MASK;
        switch (cmd) {
        case QM_PF_FLR_PREPARE:
-               qm_pf_reset_vf_process(qm, QM_FLR);
+               qm_pf_reset_vf_process(qm, QM_DOWN);
                break;
        case QM_PF_SRST_PREPARE:
                qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
@@ -5371,6 +5383,8 @@ int hisi_qm_init(struct hisi_qm *qm)
                goto err_pci_init;
 
        if (qm->fun_type == QM_HW_PF) {
+               /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */
+               writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
                qm_disable_clock_gate(qm);
                ret = qm_dev_mem_reset(qm);
                if (ret) {
@@ -5538,6 +5552,8 @@ static int qm_rebuild_for_resume(struct hisi_qm *qm)
 
        qm_cmd_init(qm);
        hisi_qm_dev_err_init(qm);
+       /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */
+       writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
        qm_disable_clock_gate(qm);
        ret = qm_dev_mem_reset(qm);
        if (ret)
index e758513..e1e0899 100644 (file)
@@ -1107,8 +1107,8 @@ static int sec_queue_res_cfg(struct sec_queue *queue)
        }
        queue->task_irq = platform_get_irq(to_platform_device(dev),
                                           queue->queue_id * 2 + 1);
-       if (queue->task_irq <= 0) {
-               ret = -EINVAL;
+       if (queue->task_irq < 0) {
+               ret = queue->task_irq;
                goto err_free_ring_db;
        }
 
index 359aa2b..4506369 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
 #include <linux/platform_device.h>
 #include <linux/scatterlist.h>
 
@@ -1105,7 +1105,7 @@ static struct platform_driver img_hash_driver = {
        .driver         = {
                .name   = "img-hash-accelerator",
                .pm     = &img_hash_pm_ops,
-               .of_match_table = of_match_ptr(img_hash_match),
+               .of_match_table = img_hash_match,
        }
 };
 module_platform_driver(img_hash_driver);
index ae31be0..1e2fd9a 100644 (file)
@@ -5,24 +5,23 @@
  * Copyright (C) 2018-2020 Intel Corporation
  */
 
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
-#include <linux/crypto.h>
 #include <linux/dma-mapping.h>
+#include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
-#include <linux/types.h>
-
-#include <crypto/aes.h>
-#include <crypto/engine.h>
-#include <crypto/gcm.h>
-#include <crypto/scatterwalk.h>
-
-#include <crypto/internal/aead.h>
-#include <crypto/internal/skcipher.h>
+#include <linux/string.h>
 
 #include "ocs-aes.h"
 
@@ -38,7 +37,6 @@
 
 /**
  * struct ocs_aes_tctx - OCS AES Transform context
- * @engine_ctx:                Engine context.
  * @aes_dev:           The OCS AES device.
  * @key:               AES/SM4 key.
  * @key_len:           The length (in bytes) of @key.
@@ -47,7 +45,6 @@
  * @use_fallback:      Whether or not fallback cipher should be used.
  */
 struct ocs_aes_tctx {
-       struct crypto_engine_ctx engine_ctx;
        struct ocs_aes_dev *aes_dev;
        u8 key[OCS_AES_KEYSIZE_256];
        unsigned int key_len;
@@ -1148,15 +1145,6 @@ static int kmb_ocs_sm4_ccm_decrypt(struct aead_request *req)
        return kmb_ocs_aead_common(req, OCS_SM4, OCS_DECRYPT, OCS_MODE_CCM);
 }
 
-static inline int ocs_common_init(struct ocs_aes_tctx *tctx)
-{
-       tctx->engine_ctx.op.prepare_request = NULL;
-       tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_sk_do_one_request;
-       tctx->engine_ctx.op.unprepare_request = NULL;
-
-       return 0;
-}
-
 static int ocs_aes_init_tfm(struct crypto_skcipher *tfm)
 {
        const char *alg_name = crypto_tfm_alg_name(&tfm->base);
@@ -1172,16 +1160,14 @@ static int ocs_aes_init_tfm(struct crypto_skcipher *tfm)
 
        crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
 
-       return ocs_common_init(tctx);
+       return 0;
 }
 
 static int ocs_sm4_init_tfm(struct crypto_skcipher *tfm)
 {
-       struct ocs_aes_tctx *tctx = crypto_skcipher_ctx(tfm);
-
        crypto_skcipher_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
 
-       return ocs_common_init(tctx);
+       return 0;
 }
 
 static inline void clear_key(struct ocs_aes_tctx *tctx)
@@ -1206,15 +1192,6 @@ static void ocs_exit_tfm(struct crypto_skcipher *tfm)
        }
 }
 
-static inline int ocs_common_aead_init(struct ocs_aes_tctx *tctx)
-{
-       tctx->engine_ctx.op.prepare_request = NULL;
-       tctx->engine_ctx.op.do_one_request = kmb_ocs_aes_aead_do_one_request;
-       tctx->engine_ctx.op.unprepare_request = NULL;
-
-       return 0;
-}
-
 static int ocs_aes_aead_cra_init(struct crypto_aead *tfm)
 {
        const char *alg_name = crypto_tfm_alg_name(&tfm->base);
@@ -1233,7 +1210,7 @@ static int ocs_aes_aead_cra_init(struct crypto_aead *tfm)
                                    (sizeof(struct aead_request) +
                                     crypto_aead_reqsize(tctx->sw_cipher.aead))));
 
-       return ocs_common_aead_init(tctx);
+       return 0;
 }
 
 static int kmb_ocs_aead_ccm_setauthsize(struct crypto_aead *tfm,
@@ -1261,11 +1238,9 @@ static int kmb_ocs_aead_gcm_setauthsize(struct crypto_aead *tfm,
 
 static int ocs_sm4_aead_cra_init(struct crypto_aead *tfm)
 {
-       struct ocs_aes_tctx *tctx = crypto_aead_ctx(tfm);
-
        crypto_aead_set_reqsize(tfm, sizeof(struct ocs_aes_rctx));
 
-       return ocs_common_aead_init(tctx);
+       return 0;
 }
 
 static void ocs_aead_cra_exit(struct crypto_aead *tfm)
@@ -1280,182 +1255,190 @@ static void ocs_aead_cra_exit(struct crypto_aead *tfm)
        }
 }
 
-static struct skcipher_alg algs[] = {
+static struct skcipher_engine_alg algs[] = {
 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
        {
-               .base.cra_name = "ecb(aes)",
-               .base.cra_driver_name = "ecb-aes-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_NEED_FALLBACK,
-               .base.cra_blocksize = AES_BLOCK_SIZE,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_AES_MIN_KEY_SIZE,
-               .max_keysize = OCS_AES_MAX_KEY_SIZE,
-               .setkey = kmb_ocs_aes_set_key,
-               .encrypt = kmb_ocs_aes_ecb_encrypt,
-               .decrypt = kmb_ocs_aes_ecb_decrypt,
-               .init = ocs_aes_init_tfm,
-               .exit = ocs_exit_tfm,
+               .base.base.cra_name = "ecb(aes)",
+               .base.base.cra_driver_name = "ecb-aes-keembay-ocs",
+               .base.base.cra_priority = KMB_OCS_PRIORITY,
+               .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                      CRYPTO_ALG_NEED_FALLBACK,
+               .base.base.cra_blocksize = AES_BLOCK_SIZE,
+               .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.base.cra_module = THIS_MODULE,
+               .base.base.cra_alignmask = 0,
+
+               .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+               .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+               .base.setkey = kmb_ocs_aes_set_key,
+               .base.encrypt = kmb_ocs_aes_ecb_encrypt,
+               .base.decrypt = kmb_ocs_aes_ecb_decrypt,
+               .base.init = ocs_aes_init_tfm,
+               .base.exit = ocs_exit_tfm,
+               .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
        },
 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
        {
-               .base.cra_name = "cbc(aes)",
-               .base.cra_driver_name = "cbc-aes-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_NEED_FALLBACK,
-               .base.cra_blocksize = AES_BLOCK_SIZE,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_AES_MIN_KEY_SIZE,
-               .max_keysize = OCS_AES_MAX_KEY_SIZE,
-               .ivsize = AES_BLOCK_SIZE,
-               .setkey = kmb_ocs_aes_set_key,
-               .encrypt = kmb_ocs_aes_cbc_encrypt,
-               .decrypt = kmb_ocs_aes_cbc_decrypt,
-               .init = ocs_aes_init_tfm,
-               .exit = ocs_exit_tfm,
+               .base.base.cra_name = "cbc(aes)",
+               .base.base.cra_driver_name = "cbc-aes-keembay-ocs",
+               .base.base.cra_priority = KMB_OCS_PRIORITY,
+               .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                      CRYPTO_ALG_NEED_FALLBACK,
+               .base.base.cra_blocksize = AES_BLOCK_SIZE,
+               .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.base.cra_module = THIS_MODULE,
+               .base.base.cra_alignmask = 0,
+
+               .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+               .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+               .base.ivsize = AES_BLOCK_SIZE,
+               .base.setkey = kmb_ocs_aes_set_key,
+               .base.encrypt = kmb_ocs_aes_cbc_encrypt,
+               .base.decrypt = kmb_ocs_aes_cbc_decrypt,
+               .base.init = ocs_aes_init_tfm,
+               .base.exit = ocs_exit_tfm,
+               .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
        },
        {
-               .base.cra_name = "ctr(aes)",
-               .base.cra_driver_name = "ctr-aes-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_NEED_FALLBACK,
-               .base.cra_blocksize = 1,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_AES_MIN_KEY_SIZE,
-               .max_keysize = OCS_AES_MAX_KEY_SIZE,
-               .ivsize = AES_BLOCK_SIZE,
-               .setkey = kmb_ocs_aes_set_key,
-               .encrypt = kmb_ocs_aes_ctr_encrypt,
-               .decrypt = kmb_ocs_aes_ctr_decrypt,
-               .init = ocs_aes_init_tfm,
-               .exit = ocs_exit_tfm,
+               .base.base.cra_name = "ctr(aes)",
+               .base.base.cra_driver_name = "ctr-aes-keembay-ocs",
+               .base.base.cra_priority = KMB_OCS_PRIORITY,
+               .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                      CRYPTO_ALG_NEED_FALLBACK,
+               .base.base.cra_blocksize = 1,
+               .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.base.cra_module = THIS_MODULE,
+               .base.base.cra_alignmask = 0,
+
+               .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+               .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+               .base.ivsize = AES_BLOCK_SIZE,
+               .base.setkey = kmb_ocs_aes_set_key,
+               .base.encrypt = kmb_ocs_aes_ctr_encrypt,
+               .base.decrypt = kmb_ocs_aes_ctr_decrypt,
+               .base.init = ocs_aes_init_tfm,
+               .base.exit = ocs_exit_tfm,
+               .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
        },
 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
        {
-               .base.cra_name = "cts(cbc(aes))",
-               .base.cra_driver_name = "cts-aes-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_NEED_FALLBACK,
-               .base.cra_blocksize = AES_BLOCK_SIZE,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_AES_MIN_KEY_SIZE,
-               .max_keysize = OCS_AES_MAX_KEY_SIZE,
-               .ivsize = AES_BLOCK_SIZE,
-               .setkey = kmb_ocs_aes_set_key,
-               .encrypt = kmb_ocs_aes_cts_encrypt,
-               .decrypt = kmb_ocs_aes_cts_decrypt,
-               .init = ocs_aes_init_tfm,
-               .exit = ocs_exit_tfm,
+               .base.base.cra_name = "cts(cbc(aes))",
+               .base.base.cra_driver_name = "cts-aes-keembay-ocs",
+               .base.base.cra_priority = KMB_OCS_PRIORITY,
+               .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                      CRYPTO_ALG_NEED_FALLBACK,
+               .base.base.cra_blocksize = AES_BLOCK_SIZE,
+               .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.base.cra_module = THIS_MODULE,
+               .base.base.cra_alignmask = 0,
+
+               .base.min_keysize = OCS_AES_MIN_KEY_SIZE,
+               .base.max_keysize = OCS_AES_MAX_KEY_SIZE,
+               .base.ivsize = AES_BLOCK_SIZE,
+               .base.setkey = kmb_ocs_aes_set_key,
+               .base.encrypt = kmb_ocs_aes_cts_encrypt,
+               .base.decrypt = kmb_ocs_aes_cts_decrypt,
+               .base.init = ocs_aes_init_tfm,
+               .base.exit = ocs_exit_tfm,
+               .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
        },
 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB
        {
-               .base.cra_name = "ecb(sm4)",
-               .base.cra_driver_name = "ecb-sm4-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
-               .base.cra_blocksize = AES_BLOCK_SIZE,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_SM4_KEY_SIZE,
-               .max_keysize = OCS_SM4_KEY_SIZE,
-               .setkey = kmb_ocs_sm4_set_key,
-               .encrypt = kmb_ocs_sm4_ecb_encrypt,
-               .decrypt = kmb_ocs_sm4_ecb_decrypt,
-               .init = ocs_sm4_init_tfm,
-               .exit = ocs_exit_tfm,
+               .base.base.cra_name = "ecb(sm4)",
+               .base.base.cra_driver_name = "ecb-sm4-keembay-ocs",
+               .base.base.cra_priority = KMB_OCS_PRIORITY,
+               .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
+               .base.base.cra_blocksize = AES_BLOCK_SIZE,
+               .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.base.cra_module = THIS_MODULE,
+               .base.base.cra_alignmask = 0,
+
+               .base.min_keysize = OCS_SM4_KEY_SIZE,
+               .base.max_keysize = OCS_SM4_KEY_SIZE,
+               .base.setkey = kmb_ocs_sm4_set_key,
+               .base.encrypt = kmb_ocs_sm4_ecb_encrypt,
+               .base.decrypt = kmb_ocs_sm4_ecb_decrypt,
+               .base.init = ocs_sm4_init_tfm,
+               .base.exit = ocs_exit_tfm,
+               .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
        },
 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_ECB */
        {
-               .base.cra_name = "cbc(sm4)",
-               .base.cra_driver_name = "cbc-sm4-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
-               .base.cra_blocksize = AES_BLOCK_SIZE,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_SM4_KEY_SIZE,
-               .max_keysize = OCS_SM4_KEY_SIZE,
-               .ivsize = AES_BLOCK_SIZE,
-               .setkey = kmb_ocs_sm4_set_key,
-               .encrypt = kmb_ocs_sm4_cbc_encrypt,
-               .decrypt = kmb_ocs_sm4_cbc_decrypt,
-               .init = ocs_sm4_init_tfm,
-               .exit = ocs_exit_tfm,
+               .base.base.cra_name = "cbc(sm4)",
+               .base.base.cra_driver_name = "cbc-sm4-keembay-ocs",
+               .base.base.cra_priority = KMB_OCS_PRIORITY,
+               .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
+               .base.base.cra_blocksize = AES_BLOCK_SIZE,
+               .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.base.cra_module = THIS_MODULE,
+               .base.base.cra_alignmask = 0,
+
+               .base.min_keysize = OCS_SM4_KEY_SIZE,
+               .base.max_keysize = OCS_SM4_KEY_SIZE,
+               .base.ivsize = AES_BLOCK_SIZE,
+               .base.setkey = kmb_ocs_sm4_set_key,
+               .base.encrypt = kmb_ocs_sm4_cbc_encrypt,
+               .base.decrypt = kmb_ocs_sm4_cbc_decrypt,
+               .base.init = ocs_sm4_init_tfm,
+               .base.exit = ocs_exit_tfm,
+               .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
        },
        {
-               .base.cra_name = "ctr(sm4)",
-               .base.cra_driver_name = "ctr-sm4-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
-               .base.cra_blocksize = 1,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_SM4_KEY_SIZE,
-               .max_keysize = OCS_SM4_KEY_SIZE,
-               .ivsize = AES_BLOCK_SIZE,
-               .setkey = kmb_ocs_sm4_set_key,
-               .encrypt = kmb_ocs_sm4_ctr_encrypt,
-               .decrypt = kmb_ocs_sm4_ctr_decrypt,
-               .init = ocs_sm4_init_tfm,
-               .exit = ocs_exit_tfm,
+               .base.base.cra_name = "ctr(sm4)",
+               .base.base.cra_driver_name = "ctr-sm4-keembay-ocs",
+               .base.base.cra_priority = KMB_OCS_PRIORITY,
+               .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
+               .base.base.cra_blocksize = 1,
+               .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.base.cra_module = THIS_MODULE,
+               .base.base.cra_alignmask = 0,
+
+               .base.min_keysize = OCS_SM4_KEY_SIZE,
+               .base.max_keysize = OCS_SM4_KEY_SIZE,
+               .base.ivsize = AES_BLOCK_SIZE,
+               .base.setkey = kmb_ocs_sm4_set_key,
+               .base.encrypt = kmb_ocs_sm4_ctr_encrypt,
+               .base.decrypt = kmb_ocs_sm4_ctr_decrypt,
+               .base.init = ocs_sm4_init_tfm,
+               .base.exit = ocs_exit_tfm,
+               .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
        },
 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
        {
-               .base.cra_name = "cts(cbc(sm4))",
-               .base.cra_driver_name = "cts-sm4-keembay-ocs",
-               .base.cra_priority = KMB_OCS_PRIORITY,
-               .base.cra_flags = CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
-               .base.cra_blocksize = AES_BLOCK_SIZE,
-               .base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
-               .base.cra_module = THIS_MODULE,
-               .base.cra_alignmask = 0,
-
-               .min_keysize = OCS_SM4_KEY_SIZE,
-               .max_keysize = OCS_SM4_KEY_SIZE,
-               .ivsize = AES_BLOCK_SIZE,
-               .setkey = kmb_ocs_sm4_set_key,
-               .encrypt = kmb_ocs_sm4_cts_encrypt,
-               .decrypt = kmb_ocs_sm4_cts_decrypt,
-               .init = ocs_sm4_init_tfm,
-               .exit = ocs_exit_tfm,
+               .base.base.cra_name = "cts(cbc(sm4))",
+               .base.base.cra_driver_name = "cts-sm4-keembay-ocs",
+               .base.base.cra_priority = KMB_OCS_PRIORITY,
+               .base.base.cra_flags = CRYPTO_ALG_ASYNC |
+                                      CRYPTO_ALG_KERN_DRIVER_ONLY,
+               .base.base.cra_blocksize = AES_BLOCK_SIZE,
+               .base.base.cra_ctxsize = sizeof(struct ocs_aes_tctx),
+               .base.base.cra_module = THIS_MODULE,
+               .base.base.cra_alignmask = 0,
+
+               .base.min_keysize = OCS_SM4_KEY_SIZE,
+               .base.max_keysize = OCS_SM4_KEY_SIZE,
+               .base.ivsize = AES_BLOCK_SIZE,
+               .base.setkey = kmb_ocs_sm4_set_key,
+               .base.encrypt = kmb_ocs_sm4_cts_encrypt,
+               .base.decrypt = kmb_ocs_sm4_cts_decrypt,
+               .base.init = ocs_sm4_init_tfm,
+               .base.exit = ocs_exit_tfm,
+               .op.do_one_request = kmb_ocs_aes_sk_do_one_request,
        }
 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS */
 };
 
-static struct aead_alg algs_aead[] = {
+static struct aead_engine_alg algs_aead[] = {
        {
-               .base = {
+               .base.base = {
                        .cra_name = "gcm(aes)",
                        .cra_driver_name = "gcm-aes-keembay-ocs",
                        .cra_priority = KMB_OCS_PRIORITY,
@@ -1467,17 +1450,18 @@ static struct aead_alg algs_aead[] = {
                        .cra_alignmask = 0,
                        .cra_module = THIS_MODULE,
                },
-               .init = ocs_aes_aead_cra_init,
-               .exit = ocs_aead_cra_exit,
-               .ivsize = GCM_AES_IV_SIZE,
-               .maxauthsize = AES_BLOCK_SIZE,
-               .setauthsize = kmb_ocs_aead_gcm_setauthsize,
-               .setkey = kmb_ocs_aes_aead_set_key,
-               .encrypt = kmb_ocs_aes_gcm_encrypt,
-               .decrypt = kmb_ocs_aes_gcm_decrypt,
+               .base.init = ocs_aes_aead_cra_init,
+               .base.exit = ocs_aead_cra_exit,
+               .base.ivsize = GCM_AES_IV_SIZE,
+               .base.maxauthsize = AES_BLOCK_SIZE,
+               .base.setauthsize = kmb_ocs_aead_gcm_setauthsize,
+               .base.setkey = kmb_ocs_aes_aead_set_key,
+               .base.encrypt = kmb_ocs_aes_gcm_encrypt,
+               .base.decrypt = kmb_ocs_aes_gcm_decrypt,
+               .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
        },
        {
-               .base = {
+               .base.base = {
                        .cra_name = "ccm(aes)",
                        .cra_driver_name = "ccm-aes-keembay-ocs",
                        .cra_priority = KMB_OCS_PRIORITY,
@@ -1489,17 +1473,18 @@ static struct aead_alg algs_aead[] = {
                        .cra_alignmask = 0,
                        .cra_module = THIS_MODULE,
                },
-               .init = ocs_aes_aead_cra_init,
-               .exit = ocs_aead_cra_exit,
-               .ivsize = AES_BLOCK_SIZE,
-               .maxauthsize = AES_BLOCK_SIZE,
-               .setauthsize = kmb_ocs_aead_ccm_setauthsize,
-               .setkey = kmb_ocs_aes_aead_set_key,
-               .encrypt = kmb_ocs_aes_ccm_encrypt,
-               .decrypt = kmb_ocs_aes_ccm_decrypt,
+               .base.init = ocs_aes_aead_cra_init,
+               .base.exit = ocs_aead_cra_exit,
+               .base.ivsize = AES_BLOCK_SIZE,
+               .base.maxauthsize = AES_BLOCK_SIZE,
+               .base.setauthsize = kmb_ocs_aead_ccm_setauthsize,
+               .base.setkey = kmb_ocs_aes_aead_set_key,
+               .base.encrypt = kmb_ocs_aes_ccm_encrypt,
+               .base.decrypt = kmb_ocs_aes_ccm_decrypt,
+               .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
        },
        {
-               .base = {
+               .base.base = {
                        .cra_name = "gcm(sm4)",
                        .cra_driver_name = "gcm-sm4-keembay-ocs",
                        .cra_priority = KMB_OCS_PRIORITY,
@@ -1510,17 +1495,18 @@ static struct aead_alg algs_aead[] = {
                        .cra_alignmask = 0,
                        .cra_module = THIS_MODULE,
                },
-               .init = ocs_sm4_aead_cra_init,
-               .exit = ocs_aead_cra_exit,
-               .ivsize = GCM_AES_IV_SIZE,
-               .maxauthsize = AES_BLOCK_SIZE,
-               .setauthsize = kmb_ocs_aead_gcm_setauthsize,
-               .setkey = kmb_ocs_sm4_aead_set_key,
-               .encrypt = kmb_ocs_sm4_gcm_encrypt,
-               .decrypt = kmb_ocs_sm4_gcm_decrypt,
+               .base.init = ocs_sm4_aead_cra_init,
+               .base.exit = ocs_aead_cra_exit,
+               .base.ivsize = GCM_AES_IV_SIZE,
+               .base.maxauthsize = AES_BLOCK_SIZE,
+               .base.setauthsize = kmb_ocs_aead_gcm_setauthsize,
+               .base.setkey = kmb_ocs_sm4_aead_set_key,
+               .base.encrypt = kmb_ocs_sm4_gcm_encrypt,
+               .base.decrypt = kmb_ocs_sm4_gcm_decrypt,
+               .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
        },
        {
-               .base = {
+               .base.base = {
                        .cra_name = "ccm(sm4)",
                        .cra_driver_name = "ccm-sm4-keembay-ocs",
                        .cra_priority = KMB_OCS_PRIORITY,
@@ -1531,21 +1517,22 @@ static struct aead_alg algs_aead[] = {
                        .cra_alignmask = 0,
                        .cra_module = THIS_MODULE,
                },
-               .init = ocs_sm4_aead_cra_init,
-               .exit = ocs_aead_cra_exit,
-               .ivsize = AES_BLOCK_SIZE,
-               .maxauthsize = AES_BLOCK_SIZE,
-               .setauthsize = kmb_ocs_aead_ccm_setauthsize,
-               .setkey = kmb_ocs_sm4_aead_set_key,
-               .encrypt = kmb_ocs_sm4_ccm_encrypt,
-               .decrypt = kmb_ocs_sm4_ccm_decrypt,
+               .base.init = ocs_sm4_aead_cra_init,
+               .base.exit = ocs_aead_cra_exit,
+               .base.ivsize = AES_BLOCK_SIZE,
+               .base.maxauthsize = AES_BLOCK_SIZE,
+               .base.setauthsize = kmb_ocs_aead_ccm_setauthsize,
+               .base.setkey = kmb_ocs_sm4_aead_set_key,
+               .base.encrypt = kmb_ocs_sm4_ccm_encrypt,
+               .base.decrypt = kmb_ocs_sm4_ccm_decrypt,
+               .op.do_one_request = kmb_ocs_aes_aead_do_one_request,
        }
 };
 
 static void unregister_aes_algs(struct ocs_aes_dev *aes_dev)
 {
-       crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead));
-       crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+       crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs_aead));
+       crypto_engine_unregister_skciphers(algs, ARRAY_SIZE(algs));
 }
 
 static int register_aes_algs(struct ocs_aes_dev *aes_dev)
@@ -1556,13 +1543,13 @@ static int register_aes_algs(struct ocs_aes_dev *aes_dev)
         * If any algorithm fails to register, all preceding algorithms that
         * were successfully registered will be automatically unregistered.
         */
-       ret = crypto_register_aeads(algs_aead, ARRAY_SIZE(algs_aead));
+       ret = crypto_engine_register_aeads(algs_aead, ARRAY_SIZE(algs_aead));
        if (ret)
                return ret;
 
-       ret = crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+       ret = crypto_engine_register_skciphers(algs, ARRAY_SIZE(algs));
        if (ret)
-               crypto_unregister_aeads(algs_aead, ARRAY_SIZE(algs));
+               crypto_engine_unregister_aeads(algs_aead, ARRAY_SIZE(algs));
 
        return ret;
 }
index 2269df1..fb95dee 100644 (file)
@@ -7,30 +7,27 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <crypto/ecc_curve.h>
+#include <crypto/ecdh.h>
+#include <crypto/engine.h>
+#include <crypto/internal/ecc.h>
+#include <crypto/internal/kpp.h>
+#include <crypto/kpp.h>
+#include <crypto/rng.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
-#include <linux/crypto.h>
-#include <linux/delay.h>
+#include <linux/err.h>
 #include <linux/fips.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
 #include <linux/irq.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/scatterlist.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-#include <crypto/ecc_curve.h>
-#include <crypto/ecdh.h>
-#include <crypto/engine.h>
-#include <crypto/kpp.h>
-#include <crypto/rng.h>
-
-#include <crypto/internal/ecc.h>
-#include <crypto/internal/kpp.h>
+#include <linux/string.h>
 
 #define DRV_NAME                       "keembay-ocs-ecc"
 
@@ -95,13 +92,11 @@ struct ocs_ecc_dev {
 
 /**
  * struct ocs_ecc_ctx - Transformation context.
- * @engine_ctx:         Crypto engine ctx.
  * @ecc_dev:    The ECC driver associated with this context.
  * @curve:      The elliptic curve used by this transformation.
  * @private_key: The private key.
  */
 struct ocs_ecc_ctx {
-       struct crypto_engine_ctx engine_ctx;
        struct ocs_ecc_dev *ecc_dev;
        const struct ecc_curve *curve;
        u64 private_key[KMB_ECC_VLI_MAX_DIGITS];
@@ -794,10 +789,6 @@ static int kmb_ecc_tctx_init(struct ocs_ecc_ctx *tctx, unsigned int curve_id)
        if (!tctx->curve)
                return -EOPNOTSUPP;
 
-       tctx->engine_ctx.op.prepare_request = NULL;
-       tctx->engine_ctx.op.do_one_request = kmb_ocs_ecc_do_one_request;
-       tctx->engine_ctx.op.unprepare_request = NULL;
-
        return 0;
 }
 
@@ -830,36 +821,38 @@ static unsigned int kmb_ocs_ecdh_max_size(struct crypto_kpp *tfm)
        return digits_to_bytes(tctx->curve->g.ndigits) * 2;
 }
 
-static struct kpp_alg ocs_ecdh_p256 = {
-       .set_secret = kmb_ocs_ecdh_set_secret,
-       .generate_public_key = kmb_ocs_ecdh_generate_public_key,
-       .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
-       .init = kmb_ocs_ecdh_nist_p256_init_tfm,
-       .exit = kmb_ocs_ecdh_exit_tfm,
-       .max_size = kmb_ocs_ecdh_max_size,
-       .base = {
+static struct kpp_engine_alg ocs_ecdh_p256 = {
+       .base.set_secret = kmb_ocs_ecdh_set_secret,
+       .base.generate_public_key = kmb_ocs_ecdh_generate_public_key,
+       .base.compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
+       .base.init = kmb_ocs_ecdh_nist_p256_init_tfm,
+       .base.exit = kmb_ocs_ecdh_exit_tfm,
+       .base.max_size = kmb_ocs_ecdh_max_size,
+       .base.base = {
                .cra_name = "ecdh-nist-p256",
                .cra_driver_name = "ecdh-nist-p256-keembay-ocs",
                .cra_priority = KMB_OCS_ECC_PRIORITY,
                .cra_module = THIS_MODULE,
                .cra_ctxsize = sizeof(struct ocs_ecc_ctx),
        },
+       .op.do_one_request = kmb_ocs_ecc_do_one_request,
 };
 
-static struct kpp_alg ocs_ecdh_p384 = {
-       .set_secret = kmb_ocs_ecdh_set_secret,
-       .generate_public_key = kmb_ocs_ecdh_generate_public_key,
-       .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
-       .init = kmb_ocs_ecdh_nist_p384_init_tfm,
-       .exit = kmb_ocs_ecdh_exit_tfm,
-       .max_size = kmb_ocs_ecdh_max_size,
-       .base = {
+static struct kpp_engine_alg ocs_ecdh_p384 = {
+       .base.set_secret = kmb_ocs_ecdh_set_secret,
+       .base.generate_public_key = kmb_ocs_ecdh_generate_public_key,
+       .base.compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
+       .base.init = kmb_ocs_ecdh_nist_p384_init_tfm,
+       .base.exit = kmb_ocs_ecdh_exit_tfm,
+       .base.max_size = kmb_ocs_ecdh_max_size,
+       .base.base = {
                .cra_name = "ecdh-nist-p384",
                .cra_driver_name = "ecdh-nist-p384-keembay-ocs",
                .cra_priority = KMB_OCS_ECC_PRIORITY,
                .cra_module = THIS_MODULE,
                .cra_ctxsize = sizeof(struct ocs_ecc_ctx),
        },
+       .op.do_one_request = kmb_ocs_ecc_do_one_request,
 };
 
 static irqreturn_t ocs_ecc_irq_handler(int irq, void *dev_id)
@@ -941,14 +934,14 @@ static int kmb_ocs_ecc_probe(struct platform_device *pdev)
        }
 
        /* Register the KPP algo. */
-       rc = crypto_register_kpp(&ocs_ecdh_p256);
+       rc = crypto_engine_register_kpp(&ocs_ecdh_p256);
        if (rc) {
                dev_err(dev,
                        "Could not register OCS algorithms with Crypto API\n");
                goto cleanup;
        }
 
-       rc = crypto_register_kpp(&ocs_ecdh_p384);
+       rc = crypto_engine_register_kpp(&ocs_ecdh_p384);
        if (rc) {
                dev_err(dev,
                        "Could not register OCS algorithms with Crypto API\n");
@@ -958,7 +951,7 @@ static int kmb_ocs_ecc_probe(struct platform_device *pdev)
        return 0;
 
 ocs_ecdh_p384_error:
-       crypto_unregister_kpp(&ocs_ecdh_p256);
+       crypto_engine_unregister_kpp(&ocs_ecdh_p256);
 
 cleanup:
        crypto_engine_exit(ecc_dev->engine);
@@ -977,8 +970,8 @@ static int kmb_ocs_ecc_remove(struct platform_device *pdev)
 
        ecc_dev = platform_get_drvdata(pdev);
 
-       crypto_unregister_kpp(&ocs_ecdh_p384);
-       crypto_unregister_kpp(&ocs_ecdh_p256);
+       crypto_engine_unregister_kpp(&ocs_ecdh_p384);
+       crypto_engine_unregister_kpp(&ocs_ecdh_p256);
 
        spin_lock(&ocs_ecc.lock);
        list_del(&ecc_dev->list);
index d4bcbed..daba8ca 100644 (file)
@@ -5,19 +5,21 @@
  * Copyright (C) 2018-2020 Intel Corporation
  */
 
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-
 #include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/sha2.h>
 #include <crypto/sm3.h>
-#include <crypto/hmac.h>
-#include <crypto/internal/hash.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
 
 #include "ocs-hcu.h"
 
@@ -34,7 +36,6 @@
 
 /**
  * struct ocs_hcu_ctx: OCS HCU Transform context.
- * @engine_ctx:         Crypto Engine context.
  * @hcu_dev:    The OCS HCU device used by the transformation.
  * @key:        The key (used only for HMAC transformations).
  * @key_len:    The length of the key.
@@ -42,7 +43,6 @@
  * @is_hmac_tfm: Whether or not this is a HMAC transformation.
  */
 struct ocs_hcu_ctx {
-       struct crypto_engine_ctx engine_ctx;
        struct ocs_hcu_dev *hcu_dev;
        u8 key[SHA512_BLOCK_SIZE];
        size_t key_len;
@@ -824,11 +824,6 @@ static void __cra_init(struct crypto_tfm *tfm, struct ocs_hcu_ctx *ctx)
 {
        crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
                                     sizeof(struct ocs_hcu_rctx));
-
-       /* Init context to 0. */
-       memzero_explicit(ctx, sizeof(*ctx));
-       /* Set engine ops. */
-       ctx->engine_ctx.op.do_one_request = kmb_ocs_hcu_do_one_request;
 }
 
 static int kmb_ocs_hcu_sha_cra_init(struct crypto_tfm *tfm)
@@ -883,17 +878,17 @@ static void kmb_ocs_hcu_hmac_cra_exit(struct crypto_tfm *tfm)
        memzero_explicit(ctx->key, sizeof(ctx->key));
 }
 
-static struct ahash_alg ocs_hcu_algs[] = {
+static struct ahash_engine_alg ocs_hcu_algs[] = {
 #ifdef CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224
 {
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .halg = {
+       .base.init              = kmb_ocs_hcu_init,
+       .base.update            = kmb_ocs_hcu_update,
+       .base.final             = kmb_ocs_hcu_final,
+       .base.finup             = kmb_ocs_hcu_finup,
+       .base.digest            = kmb_ocs_hcu_digest,
+       .base.export            = kmb_ocs_hcu_export,
+       .base.import            = kmb_ocs_hcu_import,
+       .base.halg = {
                .digestsize     = SHA224_DIGEST_SIZE,
                .statesize      = sizeof(struct ocs_hcu_rctx),
                .base   = {
@@ -907,18 +902,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
                        .cra_module             = THIS_MODULE,
                        .cra_init               = kmb_ocs_hcu_sha_cra_init,
                }
-       }
+       },
+       .op.do_one_request = kmb_ocs_hcu_do_one_request,
 },
 {
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .setkey         = kmb_ocs_hcu_setkey,
-       .halg = {
+       .base.init              = kmb_ocs_hcu_init,
+       .base.update            = kmb_ocs_hcu_update,
+       .base.final             = kmb_ocs_hcu_final,
+       .base.finup             = kmb_ocs_hcu_finup,
+       .base.digest            = kmb_ocs_hcu_digest,
+       .base.export            = kmb_ocs_hcu_export,
+       .base.import            = kmb_ocs_hcu_import,
+       .base.setkey            = kmb_ocs_hcu_setkey,
+       .base.halg = {
                .digestsize     = SHA224_DIGEST_SIZE,
                .statesize      = sizeof(struct ocs_hcu_rctx),
                .base   = {
@@ -933,18 +929,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
                        .cra_init               = kmb_ocs_hcu_hmac_cra_init,
                        .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
                }
-       }
+       },
+       .op.do_one_request = kmb_ocs_hcu_do_one_request,
 },
 #endif /* CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU_HMAC_SHA224 */
 {
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .halg = {
+       .base.init              = kmb_ocs_hcu_init,
+       .base.update            = kmb_ocs_hcu_update,
+       .base.final             = kmb_ocs_hcu_final,
+       .base.finup             = kmb_ocs_hcu_finup,
+       .base.digest            = kmb_ocs_hcu_digest,
+       .base.export            = kmb_ocs_hcu_export,
+       .base.import            = kmb_ocs_hcu_import,
+       .base.halg = {
                .digestsize     = SHA256_DIGEST_SIZE,
                .statesize      = sizeof(struct ocs_hcu_rctx),
                .base   = {
@@ -958,18 +955,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
                        .cra_module             = THIS_MODULE,
                        .cra_init               = kmb_ocs_hcu_sha_cra_init,
                }
-       }
+       },
+       .op.do_one_request = kmb_ocs_hcu_do_one_request,
 },
 {
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .setkey         = kmb_ocs_hcu_setkey,
-       .halg = {
+       .base.init              = kmb_ocs_hcu_init,
+       .base.update            = kmb_ocs_hcu_update,
+       .base.final             = kmb_ocs_hcu_final,
+       .base.finup             = kmb_ocs_hcu_finup,
+       .base.digest            = kmb_ocs_hcu_digest,
+       .base.export            = kmb_ocs_hcu_export,
+       .base.import            = kmb_ocs_hcu_import,
+       .base.setkey            = kmb_ocs_hcu_setkey,
+       .base.halg = {
                .digestsize     = SHA256_DIGEST_SIZE,
                .statesize      = sizeof(struct ocs_hcu_rctx),
                .base   = {
@@ -984,17 +982,18 @@ static struct ahash_alg ocs_hcu_algs[] = {
                        .cra_init               = kmb_ocs_hcu_hmac_cra_init,
                        .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
                }
-       }
+       },
+       .op.do_one_request = kmb_ocs_hcu_do_one_request,
 },
 {
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .halg = {
+       .base.init              = kmb_ocs_hcu_init,
+       .base.update            = kmb_ocs_hcu_update,
+       .base.final             = kmb_ocs_hcu_final,
+       .base.finup             = kmb_ocs_hcu_finup,
+       .base.digest            = kmb_ocs_hcu_digest,
+       .base.export            = kmb_ocs_hcu_export,
+       .base.import            = kmb_ocs_hcu_import,
+       .base.halg = {
                .digestsize     = SM3_DIGEST_SIZE,
                .statesize      = sizeof(struct ocs_hcu_rctx),
                .base   = {
@@ -1008,18 +1007,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
                        .cra_module             = THIS_MODULE,
                        .cra_init               = kmb_ocs_hcu_sm3_cra_init,
                }
-       }
+       },
+       .op.do_one_request = kmb_ocs_hcu_do_one_request,
 },
 {
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .setkey         = kmb_ocs_hcu_setkey,
-       .halg = {
+       .base.init              = kmb_ocs_hcu_init,
+       .base.update            = kmb_ocs_hcu_update,
+       .base.final             = kmb_ocs_hcu_final,
+       .base.finup             = kmb_ocs_hcu_finup,
+       .base.digest            = kmb_ocs_hcu_digest,
+       .base.export            = kmb_ocs_hcu_export,
+       .base.import            = kmb_ocs_hcu_import,
+       .base.setkey            = kmb_ocs_hcu_setkey,
+       .base.halg = {
                .digestsize     = SM3_DIGEST_SIZE,
                .statesize      = sizeof(struct ocs_hcu_rctx),
                .base   = {
@@ -1034,17 +1034,18 @@ static struct ahash_alg ocs_hcu_algs[] = {
                        .cra_init               = kmb_ocs_hcu_hmac_sm3_cra_init,
                        .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
                }
-       }
+       },
+       .op.do_one_request = kmb_ocs_hcu_do_one_request,
 },
 {
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .halg = {
+       .base.init              = kmb_ocs_hcu_init,
+       .base.update            = kmb_ocs_hcu_update,
+       .base.final             = kmb_ocs_hcu_final,
+       .base.finup             = kmb_ocs_hcu_finup,
+       .base.digest            = kmb_ocs_hcu_digest,
+       .base.export            = kmb_ocs_hcu_export,
+       .base.import            = kmb_ocs_hcu_import,
+       .base.halg = {
                .digestsize     = SHA384_DIGEST_SIZE,
                .statesize      = sizeof(struct ocs_hcu_rctx),
                .base   = {
@@ -1058,18 +1059,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
                        .cra_module             = THIS_MODULE,
                        .cra_init               = kmb_ocs_hcu_sha_cra_init,
                }
-       }
+       },
+       .op.do_one_request = kmb_ocs_hcu_do_one_request,
 },
 {
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .setkey         = kmb_ocs_hcu_setkey,
-       .halg = {
+       .base.init              = kmb_ocs_hcu_init,
+       .base.update            = kmb_ocs_hcu_update,
+       .base.final             = kmb_ocs_hcu_final,
+       .base.finup             = kmb_ocs_hcu_finup,
+       .base.digest            = kmb_ocs_hcu_digest,
+       .base.export            = kmb_ocs_hcu_export,
+       .base.import            = kmb_ocs_hcu_import,
+       .base.setkey            = kmb_ocs_hcu_setkey,
+       .base.halg = {
                .digestsize     = SHA384_DIGEST_SIZE,
                .statesize      = sizeof(struct ocs_hcu_rctx),
                .base   = {
@@ -1084,17 +1086,18 @@ static struct ahash_alg ocs_hcu_algs[] = {
                        .cra_init               = kmb_ocs_hcu_hmac_cra_init,
                        .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
                }
-       }
+       },
+       .op.do_one_request = kmb_ocs_hcu_do_one_request,
 },
 {
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .halg = {
+       .base.init              = kmb_ocs_hcu_init,
+       .base.update            = kmb_ocs_hcu_update,
+       .base.final             = kmb_ocs_hcu_final,
+       .base.finup             = kmb_ocs_hcu_finup,
+       .base.digest            = kmb_ocs_hcu_digest,
+       .base.export            = kmb_ocs_hcu_export,
+       .base.import            = kmb_ocs_hcu_import,
+       .base.halg = {
                .digestsize     = SHA512_DIGEST_SIZE,
                .statesize      = sizeof(struct ocs_hcu_rctx),
                .base   = {
@@ -1108,18 +1111,19 @@ static struct ahash_alg ocs_hcu_algs[] = {
                        .cra_module             = THIS_MODULE,
                        .cra_init               = kmb_ocs_hcu_sha_cra_init,
                }
-       }
+       },
+       .op.do_one_request = kmb_ocs_hcu_do_one_request,
 },
 {
-       .init           = kmb_ocs_hcu_init,
-       .update         = kmb_ocs_hcu_update,
-       .final          = kmb_ocs_hcu_final,
-       .finup          = kmb_ocs_hcu_finup,
-       .digest         = kmb_ocs_hcu_digest,
-       .export         = kmb_ocs_hcu_export,
-       .import         = kmb_ocs_hcu_import,
-       .setkey         = kmb_ocs_hcu_setkey,
-       .halg = {
+       .base.init              = kmb_ocs_hcu_init,
+       .base.update            = kmb_ocs_hcu_update,
+       .base.final             = kmb_ocs_hcu_final,
+       .base.finup             = kmb_ocs_hcu_finup,
+       .base.digest            = kmb_ocs_hcu_digest,
+       .base.export            = kmb_ocs_hcu_export,
+       .base.import            = kmb_ocs_hcu_import,
+       .base.setkey            = kmb_ocs_hcu_setkey,
+       .base.halg = {
                .digestsize     = SHA512_DIGEST_SIZE,
                .statesize      = sizeof(struct ocs_hcu_rctx),
                .base   = {
@@ -1134,7 +1138,8 @@ static struct ahash_alg ocs_hcu_algs[] = {
                        .cra_init               = kmb_ocs_hcu_hmac_cra_init,
                        .cra_exit               = kmb_ocs_hcu_hmac_cra_exit,
                }
-       }
+       },
+       .op.do_one_request = kmb_ocs_hcu_do_one_request,
 },
 };
 
@@ -1155,7 +1160,7 @@ static int kmb_ocs_hcu_remove(struct platform_device *pdev)
        if (!hcu_dev)
                return -ENODEV;
 
-       crypto_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
+       crypto_engine_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
 
        rc = crypto_engine_exit(hcu_dev->engine);
 
@@ -1170,7 +1175,6 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct ocs_hcu_dev *hcu_dev;
-       struct resource *hcu_mem;
        int rc;
 
        hcu_dev = devm_kzalloc(dev, sizeof(*hcu_dev), GFP_KERNEL);
@@ -1184,14 +1188,7 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
        if (rc)
                return rc;
 
-       /* Get the memory address and remap. */
-       hcu_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!hcu_mem) {
-               dev_err(dev, "Could not retrieve io mem resource.\n");
-               return -ENODEV;
-       }
-
-       hcu_dev->io_base = devm_ioremap_resource(dev, hcu_mem);
+       hcu_dev->io_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(hcu_dev->io_base))
                return PTR_ERR(hcu_dev->io_base);
 
@@ -1231,7 +1228,7 @@ static int kmb_ocs_hcu_probe(struct platform_device *pdev)
 
        /* Security infrastructure guarantees OCS clock is enabled. */
 
-       rc = crypto_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
+       rc = crypto_engine_register_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
        if (rc) {
                dev_err(dev, "Could not register algorithms.\n");
                goto cleanup;
index e543a9e..dd4464b 100644 (file)
@@ -3,11 +3,13 @@
 #include <linux/iopoll.h>
 #include <adf_accel_devices.h>
 #include <adf_cfg.h>
+#include <adf_clock.h>
 #include <adf_common_drv.h>
 #include <adf_gen4_dc.h>
 #include <adf_gen4_hw_data.h>
 #include <adf_gen4_pfvf.h>
 #include <adf_gen4_pm.h>
+#include <adf_gen4_timer.h>
 #include "adf_4xxx_hw_data.h"
 #include "icp_qat_hw.h"
 
@@ -223,6 +225,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
                          ICP_ACCEL_CAPABILITIES_HKDF |
                          ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
                          ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+                         ICP_ACCEL_CAPABILITIES_SM3 |
+                         ICP_ACCEL_CAPABILITIES_SM4 |
                          ICP_ACCEL_CAPABILITIES_AES_V2;
 
        /* A set bit in fusectl1 means the feature is OFF in this SKU */
@@ -246,12 +250,19 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
                capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
        }
 
+       if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
+               capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
+               capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
+       }
+
        capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
                          ICP_ACCEL_CAPABILITIES_CIPHER |
+                         ICP_ACCEL_CAPABILITIES_SM2 |
                          ICP_ACCEL_CAPABILITIES_ECEDMONT;
 
        if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
                capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+               capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
                capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
        }
 
@@ -317,6 +328,14 @@ static void get_admin_info(struct admin_info *admin_csrs_info)
        admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
 }
 
+static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
+{
+       /*
+        * 4XXX uses KPT counter for HB
+        */
+       return ADF_4XXX_KPT_COUNTER_FREQ;
+}
+
 static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
 {
        struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
@@ -508,6 +527,10 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
        hw_data->enable_pm = adf_gen4_enable_pm;
        hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
        hw_data->dev_config = adf_gen4_dev_config;
+       hw_data->start_timer = adf_gen4_timer_start;
+       hw_data->stop_timer = adf_gen4_timer_stop;
+       hw_data->get_hb_clock = get_heartbeat_clock;
+       hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
 
        adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
        adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
index e5b314d..bb3d95a 100644 (file)
@@ -3,6 +3,7 @@
 #ifndef ADF_4XXX_HW_DATA_H_
 #define ADF_4XXX_HW_DATA_H_
 
+#include <linux/units.h>
 #include <adf_accel_devices.h>
 
 /* PCIe configuration space */
@@ -64,6 +65,9 @@
 #define ADF_402XX_ASYM_OBJ     "qat_402xx_asym.bin"
 #define ADF_402XX_ADMIN_OBJ    "qat_402xx_admin.bin"
 
+/* Clocks frequency */
+#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ)
+
 /* qat_4xxx fuse bits are different from old GENs, redefine them */
 enum icp_qat_4xxx_slice_mask {
        ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0),
index 1a15600..6d4e2e1 100644 (file)
@@ -8,6 +8,7 @@
 #include <adf_cfg.h>
 #include <adf_common_drv.h>
 #include <adf_dbgfs.h>
+#include <adf_heartbeat.h>
 
 #include "adf_4xxx_hw_data.h"
 #include "qat_compression.h"
@@ -77,6 +78,8 @@ static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
        if (ret)
                return ret;
 
+       adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
+
        return 0;
 }
 
index 4756436..9c00c44 100644 (file)
@@ -1,12 +1,14 @@
 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
 /* Copyright(c) 2014 - 2021 Intel Corporation */
 #include <adf_accel_devices.h>
+#include <adf_clock.h>
 #include <adf_common_drv.h>
 #include <adf_gen2_config.h>
 #include <adf_gen2_dc.h>
 #include <adf_gen2_hw_data.h>
 #include <adf_gen2_pfvf.h>
 #include "adf_c3xxx_hw_data.h"
+#include "adf_heartbeat.h"
 #include "icp_qat_hw.h"
 
 /* Worker thread to service arbiter mappings */
@@ -50,6 +52,28 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
        return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK;
 }
 
+static u32 get_ts_clock(struct adf_hw_device_data *self)
+{
+       /*
+        * Timestamp update interval is 16 AE clock ticks for c3xxx.
+        */
+       return self->clock_frequency / 16;
+}
+
+static int measure_clock(struct adf_accel_dev *accel_dev)
+{
+       u32 frequency;
+       int ret;
+
+       ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C3XXX_MIN_AE_FREQ,
+                                   ADF_C3XXX_MAX_AE_FREQ);
+       if (ret)
+               return ret;
+
+       accel_dev->hw_device->clock_frequency = frequency;
+       return 0;
+}
+
 static u32 get_misc_bar_id(struct adf_hw_device_data *self)
 {
        return ADF_C3XXX_PMISC_BAR;
@@ -127,6 +151,10 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
        hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
        hw_data->disable_iov = adf_disable_sriov;
        hw_data->dev_config = adf_gen2_dev_config;
+       hw_data->measure_clock = measure_clock;
+       hw_data->get_hb_clock = get_ts_clock;
+       hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+       hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
 
        adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
index 336a06f..690c6a1 100644 (file)
@@ -3,6 +3,8 @@
 #ifndef ADF_C3XXX_HW_DATA_H_
 #define ADF_C3XXX_HW_DATA_H_
 
+#include <linux/units.h>
+
 /* PCIe configuration space */
 #define ADF_C3XXX_PMISC_BAR 0
 #define ADF_C3XXX_ETR_BAR 1
 #define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
 #define ADF_C3XXX_AE2FUNC_MAP_GRP_B_NUM_REGS 6
 
+/* Clocks frequency */
+#define ADF_C3XXX_AE_FREQ (685 * HZ_PER_MHZ)
+#define ADF_C3XXX_MIN_AE_FREQ (533 * HZ_PER_MHZ)
+#define ADF_C3XXX_MAX_AE_FREQ (685 * HZ_PER_MHZ)
+
 /* Firmware Binary */
 #define ADF_C3XXX_FW "qat_c3xxx.bin"
 #define ADF_C3XXX_MMP "qat_c3xxx_mmp.bin"
index e142707..355a781 100644 (file)
@@ -1,12 +1,14 @@
 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
 /* Copyright(c) 2014 - 2021 Intel Corporation */
 #include <adf_accel_devices.h>
+#include <adf_clock.h>
 #include <adf_common_drv.h>
 #include <adf_gen2_config.h>
 #include <adf_gen2_dc.h>
 #include <adf_gen2_hw_data.h>
 #include <adf_gen2_pfvf.h>
 #include "adf_c62x_hw_data.h"
+#include "adf_heartbeat.h"
 #include "icp_qat_hw.h"
 
 /* Worker thread to service arbiter mappings */
@@ -50,6 +52,28 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
        return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK;
 }
 
+static u32 get_ts_clock(struct adf_hw_device_data *self)
+{
+       /*
+        * Timestamp update interval is 16 AE clock ticks for c62x.
+        */
+       return self->clock_frequency / 16;
+}
+
+static int measure_clock(struct adf_accel_dev *accel_dev)
+{
+       u32 frequency;
+       int ret;
+
+       ret = adf_dev_measure_clock(accel_dev, &frequency, ADF_C62X_MIN_AE_FREQ,
+                                   ADF_C62X_MAX_AE_FREQ);
+       if (ret)
+               return ret;
+
+       accel_dev->hw_device->clock_frequency = frequency;
+       return 0;
+}
+
 static u32 get_misc_bar_id(struct adf_hw_device_data *self)
 {
        return ADF_C62X_PMISC_BAR;
@@ -129,6 +153,10 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
        hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
        hw_data->disable_iov = adf_disable_sriov;
        hw_data->dev_config = adf_gen2_dev_config;
+       hw_data->measure_clock = measure_clock;
+       hw_data->get_hb_clock = get_ts_clock;
+       hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+       hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
 
        adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
        adf_gen2_init_hw_csr_ops(&hw_data->csr_ops);
index 008c0a3..13e6ebf 100644 (file)
@@ -3,6 +3,8 @@
 #ifndef ADF_C62X_HW_DATA_H_
 #define ADF_C62X_HW_DATA_H_
 
+#include <linux/units.h>
+
 /* PCIe configuration space */
 #define ADF_C62X_SRAM_BAR 0
 #define ADF_C62X_PMISC_BAR 1
 #define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
 #define ADF_C62X_AE2FUNC_MAP_GRP_B_NUM_REGS 10
 
+/* Clocks frequency */
+#define ADF_C62X_AE_FREQ (685 * HZ_PER_MHZ)
+#define ADF_C62X_MIN_AE_FREQ (533 * HZ_PER_MHZ)
+#define ADF_C62X_MAX_AE_FREQ (800 * HZ_PER_MHZ)
+
 /* Firmware Binary */
 #define ADF_C62X_FW "qat_c62x.bin"
 #define ADF_C62X_MMP "qat_c62x_mmp.bin"
index 38de3ab..43622c7 100644 (file)
@@ -17,6 +17,8 @@ intel_qat-objs := adf_cfg.o \
        adf_gen4_pm.o \
        adf_gen2_dc.o \
        adf_gen4_dc.o \
+       adf_gen4_timer.o \
+       adf_clock.o \
        qat_crypto.o \
        qat_compression.o \
        qat_comp_algs.o \
@@ -28,6 +30,9 @@ intel_qat-objs := adf_cfg.o \
        qat_bl.o
 
 intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
+                               adf_fw_counters.o \
+                               adf_heartbeat.o \
+                               adf_heartbeat_dbgfs.o \
                                adf_dbgfs.o
 
 intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
index 0399417..e57abde 100644 (file)
@@ -188,6 +188,11 @@ struct adf_hw_device_data {
        int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
        void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
        int (*send_admin_init)(struct adf_accel_dev *accel_dev);
+       int (*start_timer)(struct adf_accel_dev *accel_dev);
+       void (*stop_timer)(struct adf_accel_dev *accel_dev);
+       void (*check_hb_ctrs)(struct adf_accel_dev *accel_dev);
+       uint32_t (*get_hb_clock)(struct adf_hw_device_data *self);
+       int (*measure_clock)(struct adf_accel_dev *accel_dev);
        int (*init_arb)(struct adf_accel_dev *accel_dev);
        void (*exit_arb)(struct adf_accel_dev *accel_dev);
        const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
@@ -229,6 +234,7 @@ struct adf_hw_device_data {
        u8 num_accel;
        u8 num_logical_accel;
        u8 num_engines;
+       u32 num_hb_ctrs;
 };
 
 /* CSR write macro */
@@ -241,6 +247,11 @@ struct adf_hw_device_data {
 #define ADF_CFG_NUM_SERVICES   4
 #define ADF_SRV_TYPE_BIT_LEN   3
 #define ADF_SRV_TYPE_MASK      0x7
+#define ADF_AE_ADMIN_THREAD    7
+#define ADF_NUM_THREADS_PER_AE 8
+#define ADF_NUM_PKE_STRAND     2
+#define ADF_AE_STRAND0_THREAD  8
+#define ADF_AE_STRAND1_THREAD  9
 
 #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
 #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
@@ -292,9 +303,12 @@ struct adf_accel_dev {
        unsigned long status;
        atomic_t ref_count;
        struct dentry *debugfs_dir;
+       struct dentry *fw_cntr_dbgfile;
        struct list_head list;
        struct module *owner;
        struct adf_accel_pci accel_pci_dev;
+       struct adf_timer *timer;
+       struct adf_heartbeat *heartbeat;
        union {
                struct {
                        /* protects VF2PF interrupts access */
index 118775e..ff79082 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/dma-mapping.h>
 #include "adf_accel_devices.h"
 #include "adf_common_drv.h"
+#include "adf_heartbeat.h"
 #include "icp_qat_fw_init_admin.h"
 
 #define ADF_ADMIN_MAILBOX_STRIDE 0x1000
@@ -15,6 +16,7 @@
 #define ADF_CONST_TABLE_SIZE 1024
 #define ADF_ADMIN_POLL_DELAY_US 20
 #define ADF_ADMIN_POLL_TIMEOUT_US (5 * USEC_PER_SEC)
+#define ADF_ONE_AE 1
 
 static const u8 const_tab[1024] __aligned(1024) = {
 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -194,6 +196,22 @@ static int adf_set_fw_constants(struct adf_accel_dev *accel_dev)
        return adf_send_admin(accel_dev, &req, &resp, ae_mask);
 }
 
+int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp)
+{
+       struct icp_qat_fw_init_admin_req req = { };
+       struct icp_qat_fw_init_admin_resp resp;
+       unsigned int ae_mask = ADF_ONE_AE;
+       int ret;
+
+       req.cmd_id = ICP_QAT_FW_TIMER_GET;
+       ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
+       if (ret)
+               return ret;
+
+       *timestamp = resp.timestamp;
+       return 0;
+}
+
 static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
                                   u32 *capabilities)
 {
@@ -223,6 +241,49 @@ static int adf_get_dc_capabilities(struct adf_accel_dev *accel_dev,
        return 0;
 }
 
+int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps)
+{
+       struct icp_qat_fw_init_admin_resp resp = { };
+       struct icp_qat_fw_init_admin_req req = { };
+       int ret;
+
+       req.cmd_id = ICP_QAT_FW_COUNTERS_GET;
+
+       ret = adf_put_admin_msg_sync(accel_dev, ae, &req, &resp);
+       if (ret || resp.status)
+               return -EFAULT;
+
+       *reqs = resp.req_rec_count;
+       *resps = resp.resp_sent_count;
+
+       return 0;
+}
+
+int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt)
+{
+       u32 ae_mask = accel_dev->hw_device->ae_mask;
+       struct icp_qat_fw_init_admin_req req = { };
+       struct icp_qat_fw_init_admin_resp resp = { };
+
+       req.cmd_id = ICP_QAT_FW_SYNC;
+       req.int_timer_ticks = cnt;
+
+       return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
+int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks)
+{
+       u32 ae_mask = accel_dev->hw_device->ae_mask;
+       struct icp_qat_fw_init_admin_req req = { };
+       struct icp_qat_fw_init_admin_resp resp;
+
+       req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET;
+       req.init_cfg_ptr = accel_dev->heartbeat->dma.phy_addr;
+       req.heartbeat_ticks = ticks;
+
+       return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
 /**
  * adf_send_admin_init() - Function sends init message to FW
  * @accel_dev: Pointer to acceleration device.
index 3ae1e5c..6066dc6 100644 (file)
@@ -47,4 +47,6 @@
 #define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
        ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY
 #define ADF_ACCEL_STR "Accelerator%d"
+#define ADF_HEARTBEAT_TIMER  "HeartbeatTimer"
+
 #endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.c b/drivers/crypto/intel/qat/qat_common/adf_clock.c
new file mode 100644 (file)
index 0000000..dc07786
--- /dev/null
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/export.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/time64.h>
+#include <linux/types.h>
+#include <linux/units.h>
+#include <asm/errno.h>
+#include "adf_accel_devices.h"
+#include "adf_clock.h"
+#include "adf_common_drv.h"
+
+#define MEASURE_CLOCK_RETRIES 10
+#define MEASURE_CLOCK_DELAY_US 10000
+#define ME_CLK_DIVIDER 16
+#define MEASURE_CLOCK_DELTA_THRESHOLD_US 100
+
+static inline u64 timespec_to_us(const struct timespec64 *ts)
+{
+       return (u64)DIV_ROUND_CLOSEST_ULL(timespec64_to_ns(ts), NSEC_PER_USEC);
+}
+
+static inline u64 timespec_to_ms(const struct timespec64 *ts)
+{
+       return (u64)DIV_ROUND_CLOSEST_ULL(timespec64_to_ns(ts), NSEC_PER_MSEC);
+}
+
+u64 adf_clock_get_current_time(void)
+{
+       struct timespec64 ts;
+
+       ktime_get_real_ts64(&ts);
+       return timespec_to_ms(&ts);
+}
+
+static int measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency)
+{
+       struct timespec64 ts1, ts2, ts3, ts4;
+       u64 timestamp1, timestamp2, temp;
+       u32 delta_us, tries;
+       int ret;
+
+       tries = MEASURE_CLOCK_RETRIES;
+       do {
+               ktime_get_real_ts64(&ts1);
+               ret = adf_get_fw_timestamp(accel_dev, &timestamp1);
+               if (ret) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to get fw timestamp\n");
+                       return ret;
+               }
+               ktime_get_real_ts64(&ts2);
+               delta_us = timespec_to_us(&ts2) - timespec_to_us(&ts1);
+       } while (delta_us > MEASURE_CLOCK_DELTA_THRESHOLD_US && --tries);
+
+       if (!tries) {
+               dev_err(&GET_DEV(accel_dev), "Excessive clock measure delay\n");
+               return -ETIMEDOUT;
+       }
+
+       fsleep(MEASURE_CLOCK_DELAY_US);
+
+       tries = MEASURE_CLOCK_RETRIES;
+       do {
+               ktime_get_real_ts64(&ts3);
+               if (adf_get_fw_timestamp(accel_dev, &timestamp2)) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "Failed to get fw timestamp\n");
+                       return -EIO;
+               }
+               ktime_get_real_ts64(&ts4);
+               delta_us = timespec_to_us(&ts4) - timespec_to_us(&ts3);
+       } while (delta_us > MEASURE_CLOCK_DELTA_THRESHOLD_US && --tries);
+
+       if (!tries) {
+               dev_err(&GET_DEV(accel_dev), "Excessive clock measure delay\n");
+               return -ETIMEDOUT;
+       }
+
+       delta_us = timespec_to_us(&ts3) - timespec_to_us(&ts1);
+       temp = (timestamp2 - timestamp1) * ME_CLK_DIVIDER * 10;
+       temp = DIV_ROUND_CLOSEST_ULL(temp, delta_us);
+       /*
+        * Enclose the division to allow the preprocessor to precalculate it,
+        * and avoid promoting r-value to 64-bit before division.
+        */
+       *frequency = temp * (HZ_PER_MHZ / 10);
+
+       return 0;
+}
+
+/**
+ * adf_dev_measure_clock() - measures device clock frequency
+ * @accel_dev: Pointer to acceleration device.
+ * @frequency: Pointer to variable where result will be stored
+ * @min: Minimal allowed frequency value
+ * @max: Maximal allowed frequency value
+ *
+ * If the measurement result will go beyond the min/max thresholds the value
+ * will take the value of the crossed threshold.
+ *
+ * This algorithm compares the device firmware timestamp with the kernel
+ * timestamp. So we can't expect too high accuracy from this measurement.
+ *
+ * Return:
+ * * 0 - measurement succeed
+ * * -ETIMEDOUT - measurement failed
+ */
+int adf_dev_measure_clock(struct adf_accel_dev *accel_dev,
+                         u32 *frequency, u32 min, u32 max)
+{
+       int ret;
+       u32 freq;
+
+       ret = measure_clock(accel_dev, &freq);
+       if (ret)
+               return ret;
+
+       *frequency = clamp(freq, min, max);
+
+       if (*frequency != freq)
+               dev_warn(&GET_DEV(accel_dev),
+                        "Measured clock %d Hz is out of range, assuming %d\n",
+                        freq, *frequency);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_measure_clock);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.h b/drivers/crypto/intel/qat/qat_common/adf_clock.h
new file mode 100644 (file)
index 0000000..e309bc0
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_CLOCK_H
+#define ADF_CLOCK_H
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+
+int adf_dev_measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency,
+                         u32 min, u32 max);
+u64 adf_clock_get_current_time(void);
+
+#endif
index b8132eb..673b504 100644 (file)
@@ -58,12 +58,6 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev);
 
 void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data);
 void adf_clean_vf_map(bool);
-
-int adf_ctl_dev_register(void);
-void adf_ctl_dev_unregister(void);
-int adf_processes_dev_register(void);
-void adf_processes_dev_unregister(void);
-
 int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev,
                       struct adf_accel_dev *pf);
 void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev,
@@ -94,7 +88,11 @@ void adf_exit_aer(void);
 int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
 void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
 int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps);
 int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
+int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt);
+int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks);
+int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp);
 int adf_init_arb(struct adf_accel_dev *accel_dev);
 void adf_exit_arb(struct adf_accel_dev *accel_dev);
 void adf_update_ring_arb(struct adf_etr_ring_data *ring);
@@ -178,8 +176,6 @@ int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
 int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
                    unsigned char ae, unsigned long ctx_mask,
                    unsigned short reg_num, unsigned int regdata);
-int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
-                 unsigned char ae, unsigned short lm_addr, unsigned int value);
 void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
                                unsigned char ae, unsigned char mode);
 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
@@ -193,6 +189,8 @@ int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
 int adf_init_misc_wq(void);
 void adf_exit_misc_wq(void);
 bool adf_misc_wq_queue_work(struct work_struct *work);
+bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
+                                   unsigned long delay);
 #if defined(CONFIG_PCI_IOV)
 int adf_sriov_configure(struct pci_dev *pdev, int numvfs);
 void adf_disable_sriov(struct adf_accel_dev *accel_dev);
index d0a2f89..04845f8 100644 (file)
@@ -6,6 +6,8 @@
 #include "adf_cfg.h"
 #include "adf_common_drv.h"
 #include "adf_dbgfs.h"
+#include "adf_fw_counters.h"
+#include "adf_heartbeat_dbgfs.h"
 
 /**
  * adf_dbgfs_init() - add persistent debugfs entries
@@ -56,6 +58,11 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
 {
        if (!accel_dev->debugfs_dir)
                return;
+
+       if (!accel_dev->is_vf) {
+               adf_fw_counters_dbgfs_add(accel_dev);
+               adf_heartbeat_dbgfs_add(accel_dev);
+       }
 }
 
 /**
@@ -66,4 +73,9 @@ void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
 {
        if (!accel_dev->debugfs_dir)
                return;
+
+       if (!accel_dev->is_vf) {
+               adf_heartbeat_dbgfs_rm(accel_dev);
+               adf_fw_counters_dbgfs_rm(accel_dev);
+       }
 }
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c
new file mode 100644 (file)
index 0000000..cb6e09e
--- /dev/null
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/types.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_fw_counters.h"
+
+#define ADF_FW_COUNTERS_MAX_PADDING 16
+
+enum adf_fw_counters_types {
+       ADF_FW_REQUESTS,
+       ADF_FW_RESPONSES,
+       ADF_FW_COUNTERS_COUNT
+};
+
+static const char * const adf_fw_counter_names[] = {
+       [ADF_FW_REQUESTS] = "Requests",
+       [ADF_FW_RESPONSES] = "Responses",
+};
+
+static_assert(ARRAY_SIZE(adf_fw_counter_names) == ADF_FW_COUNTERS_COUNT);
+
+struct adf_ae_counters {
+       u16 ae;
+       u64 values[ADF_FW_COUNTERS_COUNT];
+};
+
+struct adf_fw_counters {
+       u16 ae_count;
+       struct adf_ae_counters ae_counters[];
+};
+
+static void adf_fw_counters_parse_ae_values(struct adf_ae_counters *ae_counters, u32 ae,
+                                           u64 req_count, u64 resp_count)
+{
+       ae_counters->ae = ae;
+       ae_counters->values[ADF_FW_REQUESTS] = req_count;
+       ae_counters->values[ADF_FW_RESPONSES] = resp_count;
+}
+
+static int adf_fw_counters_load_from_device(struct adf_accel_dev *accel_dev,
+                                           struct adf_fw_counters *fw_counters)
+{
+       struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+       unsigned long ae_mask;
+       unsigned int i;
+       unsigned long ae;
+
+       /* Ignore the admin AEs */
+       ae_mask = hw_data->ae_mask & ~hw_data->admin_ae_mask;
+
+       if (hweight_long(ae_mask) > fw_counters->ae_count)
+               return -EINVAL;
+
+       i = 0;
+       for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
+               u64 req_count, resp_count;
+               int ret;
+
+               ret = adf_get_ae_fw_counters(accel_dev, ae, &req_count, &resp_count);
+               if (ret)
+                       return ret;
+
+               adf_fw_counters_parse_ae_values(&fw_counters->ae_counters[i++], ae,
+                                               req_count, resp_count);
+       }
+
+       return 0;
+}
+
+static struct adf_fw_counters *adf_fw_counters_allocate(unsigned long ae_count)
+{
+       struct adf_fw_counters *fw_counters;
+
+       if (unlikely(!ae_count))
+               return ERR_PTR(-EINVAL);
+
+       fw_counters = kmalloc(struct_size(fw_counters, ae_counters, ae_count), GFP_KERNEL);
+       if (!fw_counters)
+               return ERR_PTR(-ENOMEM);
+
+       fw_counters->ae_count = ae_count;
+
+       return fw_counters;
+}
+
+/**
+ * adf_fw_counters_get() - Return FW counters for the provided device.
+ * @accel_dev: Pointer to a QAT acceleration device
+ *
+ * Allocates and returns a table of counters containing execution statistics
+ * for each non-admin AE available through the supplied acceleration device.
+ * The caller becomes the owner of such memory and is responsible for
+ * the deallocation through a call to kfree().
+ *
+ * Returns: a pointer to a dynamically allocated struct adf_fw_counters
+ *          on success, or a negative value on error.
+ */
+static struct adf_fw_counters *adf_fw_counters_get(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+       struct adf_fw_counters *fw_counters;
+       unsigned long ae_count;
+       int ret;
+
+       if (!adf_dev_started(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev), "QAT Device not started\n");
+               return ERR_PTR(-EFAULT);
+       }
+
+       /* Ignore the admin AEs */
+       ae_count = hweight_long(hw_data->ae_mask & ~hw_data->admin_ae_mask);
+
+       fw_counters = adf_fw_counters_allocate(ae_count);
+       if (IS_ERR(fw_counters))
+               return fw_counters;
+
+       ret = adf_fw_counters_load_from_device(accel_dev, fw_counters);
+       if (ret) {
+               kfree(fw_counters);
+               dev_err(&GET_DEV(accel_dev),
+                       "Failed to create QAT fw_counters file table [%d].\n", ret);
+               return ERR_PTR(ret);
+       }
+
+       return fw_counters;
+}
+
+static void *qat_fw_counters_seq_start(struct seq_file *sfile, loff_t *pos)
+{
+       struct adf_fw_counters *fw_counters = sfile->private;
+
+       if (*pos == 0)
+               return SEQ_START_TOKEN;
+
+       if (*pos > fw_counters->ae_count)
+               return NULL;
+
+       return &fw_counters->ae_counters[*pos - 1];
+}
+
+static void *qat_fw_counters_seq_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+       struct adf_fw_counters *fw_counters = sfile->private;
+
+       (*pos)++;
+
+       if (*pos > fw_counters->ae_count)
+               return NULL;
+
+       return &fw_counters->ae_counters[*pos - 1];
+}
+
+static void qat_fw_counters_seq_stop(struct seq_file *sfile, void *v) {}
+
+static int qat_fw_counters_seq_show(struct seq_file *sfile, void *v)
+{
+       int i;
+
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(sfile, "AE ");
+               for (i = 0; i < ADF_FW_COUNTERS_COUNT; ++i)
+                       seq_printf(sfile, " %*s", ADF_FW_COUNTERS_MAX_PADDING,
+                                  adf_fw_counter_names[i]);
+       } else {
+               struct adf_ae_counters *ae_counters = (struct adf_ae_counters *)v;
+
+               seq_printf(sfile, "%2d:", ae_counters->ae);
+               for (i = 0; i < ADF_FW_COUNTERS_COUNT; ++i)
+                       seq_printf(sfile, " %*llu", ADF_FW_COUNTERS_MAX_PADDING,
+                                  ae_counters->values[i]);
+       }
+       seq_putc(sfile, '\n');
+
+       return 0;
+}
+
+static const struct seq_operations qat_fw_counters_sops = {
+       .start = qat_fw_counters_seq_start,
+       .next = qat_fw_counters_seq_next,
+       .stop = qat_fw_counters_seq_stop,
+       .show = qat_fw_counters_seq_show,
+};
+
+static int qat_fw_counters_file_open(struct inode *inode, struct file *file)
+{
+       struct adf_accel_dev *accel_dev = inode->i_private;
+       struct seq_file *fw_counters_seq_file;
+       struct adf_fw_counters *fw_counters;
+       int ret;
+
+       fw_counters = adf_fw_counters_get(accel_dev);
+       if (IS_ERR(fw_counters))
+               return PTR_ERR(fw_counters);
+
+       ret = seq_open(file, &qat_fw_counters_sops);
+       if (unlikely(ret)) {
+               kfree(fw_counters);
+               return ret;
+       }
+
+       fw_counters_seq_file = file->private_data;
+       fw_counters_seq_file->private = fw_counters;
+       return ret;
+}
+
+static int qat_fw_counters_file_release(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq = file->private_data;
+
+       kfree(seq->private);
+       seq->private = NULL;
+
+       return seq_release(inode, file); }
+
+static const struct file_operations qat_fw_counters_fops = {
+       .owner = THIS_MODULE,
+       .open = qat_fw_counters_file_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = qat_fw_counters_file_release,
+};
+
+/**
+ * adf_fw_counters_dbgfs_add() - Create a debugfs file containing FW
+ * execution counters.
+ * @accel_dev:  Pointer to a QAT acceleration device
+ *
+ * Function creates a file to display a table with statistics for the given
+ * QAT acceleration device. The table stores device specific execution values
+ * for each AE, such as the number of requests sent to the FW and responses
+ * received from the FW.
+ *
+ * Return: void
+ */
+void adf_fw_counters_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+       accel_dev->fw_cntr_dbgfile = debugfs_create_file("fw_counters", 0400,
+                                                        accel_dev->debugfs_dir,
+                                                        accel_dev,
+                                                        &qat_fw_counters_fops);
+}
+
+/**
+ * adf_fw_counters_dbgfs_rm() - Remove the debugfs file containing FW counters.
+ * @accel_dev:  Pointer to a QAT acceleration device.
+ *
+ * Function removes the file providing the table of statistics for the given
+ * QAT acceleration device.
+ *
+ * Return: void
+ */
+void adf_fw_counters_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+       debugfs_remove(accel_dev->fw_cntr_dbgfile);
+       accel_dev->fw_cntr_dbgfile = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.h b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.h
new file mode 100644 (file)
index 0000000..91b3b6a
--- /dev/null
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_FW_COUNTERS_H
+#define ADF_FW_COUNTERS_H
+
+struct adf_accel_dev;
+
+void adf_fw_counters_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_fw_counters_dbgfs_rm(struct adf_accel_dev *accel_dev);
+
+#endif
index eeb30da..c27ff6d 100644 (file)
@@ -7,6 +7,7 @@
 #include "adf_common_drv.h"
 #include "qat_crypto.h"
 #include "qat_compression.h"
+#include "adf_heartbeat.h"
 #include "adf_transport_access_macros.h"
 
 static int adf_gen2_crypto_dev_config(struct adf_accel_dev *accel_dev)
@@ -195,6 +196,12 @@ int adf_gen2_dev_config(struct adf_accel_dev *accel_dev)
        if (ret)
                goto err;
 
+       ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+       if (ret)
+               goto err;
+
+       adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_DEFAULT_MS);
+
        set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
 
        return ret;
index e4bc075..6bd3410 100644 (file)
@@ -145,6 +145,9 @@ do { \
 #define ADF_GEN2_CERRSSMSH(i)          ((i) * 0x4000 + 0x10)
 #define ADF_GEN2_ERRSSMSH_EN           BIT(3)
 
+/* Number of heartbeat counter pairs */
+#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+
 /* Interrupts */
 #define ADF_GEN2_SMIAPF0_MASK_OFFSET    (0x3A000 + 0x28)
 #define ADF_GEN2_SMIAPF1_MASK_OFFSET    (0x3A000 + 0x30)
index 4fb4b3d..02d7a01 100644 (file)
@@ -136,6 +136,9 @@ do { \
 
 #define ADF_GEN4_VFLNOTIFY     BIT(7)
 
+/* Number of heartbeat counter pairs */
+#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+
 void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
 void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
 int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
index dd11292..c276876 100644 (file)
@@ -35,7 +35,7 @@
 #define ADF_GEN4_PM_MSG_PENDING                        BIT(0)
 #define ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK       GENMASK(28, 1)
 
-#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER                (0x0)
+#define ADF_GEN4_PM_DEFAULT_IDLE_FILTER                (0x6)
 #define ADF_GEN4_PM_MAX_IDLE_FILTER            (0x7)
 #define ADF_GEN4_PM_DEFAULT_IDLE_SUPPORT       (0x1)
 
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
new file mode 100644 (file)
index 0000000..646c579
--- /dev/null
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/container_of.h>
+#include <linux/dev_printk.h>
+#include <linux/export.h>
+#include <linux/jiffies.h>
+#include <linux/ktime.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_timer.h"
+
+#define ADF_GEN4_TIMER_PERIOD_MS 200
+
+/* This periodic update is used to trigger HB, RL & TL fw events */
+static void work_handler(struct work_struct *work)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_timer *timer_ctx;
+       u32 time_periods;
+
+       timer_ctx = container_of(to_delayed_work(work), struct adf_timer, work_ctx);
+       accel_dev = timer_ctx->accel_dev;
+
+       adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
+                                      msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
+
+       time_periods = div_u64(ktime_ms_delta(ktime_get_real(), timer_ctx->initial_ktime),
+                              ADF_GEN4_TIMER_PERIOD_MS);
+
+       if (adf_send_admin_tim_sync(accel_dev, time_periods))
+               dev_err(&GET_DEV(accel_dev), "Failed to synchronize qat timer\n");
+}
+
+int adf_gen4_timer_start(struct adf_accel_dev *accel_dev)
+{
+       struct adf_timer *timer_ctx;
+
+       timer_ctx = kzalloc(sizeof(*timer_ctx), GFP_KERNEL);
+       if (!timer_ctx)
+               return -ENOMEM;
+
+       timer_ctx->accel_dev = accel_dev;
+       accel_dev->timer = timer_ctx;
+       timer_ctx->initial_ktime = ktime_get_real();
+
+       INIT_DELAYED_WORK(&timer_ctx->work_ctx, work_handler);
+       adf_misc_wq_queue_delayed_work(&timer_ctx->work_ctx,
+                                      msecs_to_jiffies(ADF_GEN4_TIMER_PERIOD_MS));
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_timer_start);
+
+void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev)
+{
+       struct adf_timer *timer_ctx = accel_dev->timer;
+
+       if (!timer_ctx)
+               return;
+
+       cancel_delayed_work_sync(&timer_ctx->work_ctx);
+
+       kfree(timer_ctx);
+       accel_dev->timer = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_timer_stop);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.h
new file mode 100644 (file)
index 0000000..66a709e
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_GEN4_TIMER_H_
+#define ADF_GEN4_TIMER_H_
+
+#include <linux/ktime.h>
+#include <linux/workqueue.h>
+
+struct adf_accel_dev;
+
+struct adf_timer {
+       struct adf_accel_dev *accel_dev;
+       struct delayed_work work_ctx;
+       ktime_t initial_ktime;
+};
+
+int adf_gen4_timer_start(struct adf_accel_dev *accel_dev);
+void adf_gen4_timer_stop(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_GEN4_TIMER_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
new file mode 100644 (file)
index 0000000..beef9a5
--- /dev/null
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/kstrtox.h>
+#include <linux/overflow.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <asm/errno.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "adf_clock.h"
+#include "adf_common_drv.h"
+#include "adf_heartbeat.h"
+#include "adf_transport_internal.h"
+#include "icp_qat_fw_init_admin.h"
+
+#define ADF_HB_EMPTY_SIG 0xA5A5A5A5
+
+/* Heartbeat counter pair */
+struct hb_cnt_pair {
+       __u16 resp_heartbeat_cnt;
+       __u16 req_heartbeat_cnt;
+};
+
+static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev)
+{
+       u64 curr_time = adf_clock_get_current_time();
+       u64 polling_time = curr_time - accel_dev->heartbeat->last_hb_check_time;
+
+       if (polling_time < accel_dev->heartbeat->hb_timer) {
+               dev_warn(&GET_DEV(accel_dev),
+                        "HB polling too frequent. Configured HB timer %d ms\n",
+                        accel_dev->heartbeat->hb_timer);
+               return -EINVAL;
+       }
+
+       accel_dev->heartbeat->last_hb_check_time = curr_time;
+       return 0;
+}
+
+/**
+ * validate_hb_ctrs_cnt() - checks if the number of heartbeat counters should
+ * be updated by one to support the currently loaded firmware.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Return:
+ * * true - hb_ctrs must increased by ADF_NUM_PKE_STRAND
+ * * false - no changes needed
+ */
+static bool validate_hb_ctrs_cnt(struct adf_accel_dev *accel_dev)
+{
+       const size_t hb_ctrs = accel_dev->hw_device->num_hb_ctrs;
+       const size_t max_aes = accel_dev->hw_device->num_engines;
+       const size_t hb_struct_size = sizeof(struct hb_cnt_pair);
+       const size_t exp_diff_size = array3_size(ADF_NUM_PKE_STRAND, max_aes,
+                                                hb_struct_size);
+       const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
+       const size_t stats_size = size_mul(dev_ctrs, hb_struct_size);
+       const u32 exp_diff_cnt = exp_diff_size / sizeof(u32);
+       const u32 stats_el_cnt = stats_size / sizeof(u32);
+       struct hb_cnt_pair *hb_stats = accel_dev->heartbeat->dma.virt_addr;
+       const u32 *mem_to_chk = (u32 *)(hb_stats + dev_ctrs);
+       u32 el_diff_cnt = 0;
+       int i;
+
+       /* count how many bytes are different from pattern */
+       for (i = 0; i < stats_el_cnt; i++) {
+               if (mem_to_chk[i] == ADF_HB_EMPTY_SIG)
+                       break;
+
+               el_diff_cnt++;
+       }
+
+       return el_diff_cnt && el_diff_cnt == exp_diff_cnt;
+}
+
+void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev)
+{
+       struct hb_cnt_pair *hb_stats = accel_dev->heartbeat->dma.virt_addr;
+       const size_t hb_ctrs = accel_dev->hw_device->num_hb_ctrs;
+       const size_t max_aes = accel_dev->hw_device->num_engines;
+       const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
+       const size_t stats_size = size_mul(dev_ctrs, sizeof(struct hb_cnt_pair));
+       const size_t mem_items_to_fill = size_mul(stats_size, 2) / sizeof(u32);
+
+       /* fill hb stats memory with pattern */
+       memset32((uint32_t *)hb_stats, ADF_HB_EMPTY_SIG, mem_items_to_fill);
+       accel_dev->heartbeat->ctrs_cnt_checked = false;
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_check_ctrs);
+
+static int get_timer_ticks(struct adf_accel_dev *accel_dev, unsigned int *value)
+{
+       char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
+       u32 timer_ms = ADF_CFG_HB_TIMER_DEFAULT_MS;
+       int cfg_read_status;
+       u32 ticks;
+       int ret;
+
+       cfg_read_status = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+                                                 ADF_HEARTBEAT_TIMER, timer_str);
+       if (cfg_read_status == 0) {
+               if (kstrtouint(timer_str, 10, &timer_ms))
+                       dev_dbg(&GET_DEV(accel_dev),
+                               "kstrtouint failed to parse the %s, param value",
+                               ADF_HEARTBEAT_TIMER);
+       }
+
+       if (timer_ms < ADF_CFG_HB_TIMER_MIN_MS) {
+               dev_err(&GET_DEV(accel_dev), "Timer cannot be less than %u\n",
+                       ADF_CFG_HB_TIMER_MIN_MS);
+               return -EINVAL;
+       }
+
+       /*
+        * On 4xxx devices adf_timer is responsible for HB updates and
+        * its period is fixed to 200ms
+        */
+       if (accel_dev->timer)
+               timer_ms = ADF_CFG_HB_TIMER_MIN_MS;
+
+       ret = adf_heartbeat_ms_to_ticks(accel_dev, timer_ms, &ticks);
+       if (ret)
+               return ret;
+
+       adf_heartbeat_save_cfg_param(accel_dev, timer_ms);
+
+       accel_dev->heartbeat->hb_timer = timer_ms;
+       *value = ticks;
+
+       return 0;
+}
+
+static int check_ae(struct hb_cnt_pair *curr, struct hb_cnt_pair *prev,
+                   u16 *count, const size_t hb_ctrs)
+{
+       size_t thr;
+
+       /* loop through all threads in AE */
+       for (thr = 0; thr < hb_ctrs; thr++) {
+               u16 req = curr[thr].req_heartbeat_cnt;
+               u16 resp = curr[thr].resp_heartbeat_cnt;
+               u16 last = prev[thr].resp_heartbeat_cnt;
+
+               if ((thr == ADF_AE_ADMIN_THREAD || req != resp) && resp == last) {
+                       u16 retry = ++count[thr];
+
+                       if (retry >= ADF_CFG_HB_COUNT_THRESHOLD)
+                               return -EIO;
+
+               } else {
+                       count[thr] = 0;
+               }
+       }
+       return 0;
+}
+
+static int adf_hb_get_status(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       struct hb_cnt_pair *live_stats, *last_stats, *curr_stats;
+       const size_t hb_ctrs = hw_device->num_hb_ctrs;
+       const unsigned long ae_mask = hw_device->ae_mask;
+       const size_t max_aes = hw_device->num_engines;
+       const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
+       const size_t stats_size = size_mul(dev_ctrs, sizeof(*curr_stats));
+       struct hb_cnt_pair *ae_curr_p, *ae_prev_p;
+       u16 *count_fails, *ae_count_p;
+       size_t ae_offset;
+       size_t ae = 0;
+       int ret = 0;
+
+       if (!accel_dev->heartbeat->ctrs_cnt_checked) {
+               if (validate_hb_ctrs_cnt(accel_dev))
+                       hw_device->num_hb_ctrs += ADF_NUM_PKE_STRAND;
+
+               accel_dev->heartbeat->ctrs_cnt_checked = true;
+       }
+
+       live_stats = accel_dev->heartbeat->dma.virt_addr;
+       last_stats = live_stats + dev_ctrs;
+       count_fails = (u16 *)(last_stats + dev_ctrs);
+
+       curr_stats = kmemdup(live_stats, stats_size, GFP_KERNEL);
+       if (!curr_stats)
+               return -ENOMEM;
+
+       /* loop through active AEs */
+       for_each_set_bit(ae, &ae_mask, max_aes) {
+               ae_offset = size_mul(ae, hb_ctrs);
+               ae_curr_p = curr_stats + ae_offset;
+               ae_prev_p = last_stats + ae_offset;
+               ae_count_p = count_fails + ae_offset;
+
+               ret = check_ae(ae_curr_p, ae_prev_p, ae_count_p, hb_ctrs);
+               if (ret)
+                       break;
+       }
+
+       /* Copy current stats for the next iteration */
+       memcpy(last_stats, curr_stats, stats_size);
+       kfree(curr_stats);
+
+       return ret;
+}
+
+void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
+                         enum adf_device_heartbeat_status *hb_status)
+{
+       struct adf_heartbeat *hb;
+
+       if (!adf_dev_started(accel_dev) ||
+           test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
+               *hb_status = HB_DEV_UNRESPONSIVE;
+               return;
+       }
+
+       if (adf_hb_check_polling_freq(accel_dev) == -EINVAL) {
+               *hb_status = HB_DEV_UNSUPPORTED;
+               return;
+       }
+
+       hb = accel_dev->heartbeat;
+       hb->hb_sent_counter++;
+
+       if (adf_hb_get_status(accel_dev)) {
+               dev_err(&GET_DEV(accel_dev),
+                       "Heartbeat ERROR: QAT is not responding.\n");
+               *hb_status = HB_DEV_UNRESPONSIVE;
+               hb->hb_failed_counter++;
+               return;
+       }
+
+       *hb_status = HB_DEV_ALIVE;
+}
+
+int adf_heartbeat_ms_to_ticks(struct adf_accel_dev *accel_dev, unsigned int time_ms,
+                             u32 *value)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       u32 clk_per_sec;
+
+       /* HB clock may be different than AE clock */
+       if (!hw_data->get_hb_clock)
+               return -EINVAL;
+
+       clk_per_sec = hw_data->get_hb_clock(hw_data);
+       *value = time_ms * (clk_per_sec / MSEC_PER_SEC);
+
+       return 0;
+}
+
+int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev,
+                                unsigned int timer_ms)
+{
+       char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+       snprintf(timer_str, sizeof(timer_str), "%u", timer_ms);
+       return adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+                                         ADF_HEARTBEAT_TIMER, timer_str,
+                                         ADF_STR);
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_save_cfg_param);
+
+int adf_heartbeat_init(struct adf_accel_dev *accel_dev)
+{
+       struct adf_heartbeat *hb;
+
+       hb = kzalloc(sizeof(*hb), GFP_KERNEL);
+       if (!hb)
+               goto err_ret;
+
+       hb->dma.virt_addr = dma_alloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                              &hb->dma.phy_addr, GFP_KERNEL);
+       if (!hb->dma.virt_addr)
+               goto err_free;
+
+       /*
+        * Default set this flag as true to avoid unnecessary checks,
+        * it will be reset on platforms that need such a check
+        */
+       hb->ctrs_cnt_checked = true;
+       accel_dev->heartbeat = hb;
+
+       return 0;
+
+err_free:
+       kfree(hb);
+err_ret:
+       return -ENOMEM;
+}
+
+int adf_heartbeat_start(struct adf_accel_dev *accel_dev)
+{
+       unsigned int timer_ticks;
+       int ret;
+
+       if (!accel_dev->heartbeat) {
+               dev_warn(&GET_DEV(accel_dev), "Heartbeat instance not found!");
+               return -EFAULT;
+       }
+
+       if (accel_dev->hw_device->check_hb_ctrs)
+               accel_dev->hw_device->check_hb_ctrs(accel_dev);
+
+       ret = get_timer_ticks(accel_dev, &timer_ticks);
+       if (ret)
+               return ret;
+
+       ret = adf_send_admin_hb_timer(accel_dev, timer_ticks);
+       if (ret)
+               dev_warn(&GET_DEV(accel_dev), "Heartbeat not supported!");
+
+       return ret;
+}
+
+void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev)
+{
+       struct adf_heartbeat *hb = accel_dev->heartbeat;
+
+       if (!hb)
+               return;
+
+       if (hb->dma.virt_addr)
+               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                 hb->dma.virt_addr, hb->dma.phy_addr);
+
+       kfree(hb);
+       accel_dev->heartbeat = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h
new file mode 100644 (file)
index 0000000..b22e3cb
--- /dev/null
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_HEARTBEAT_H_
+#define ADF_HEARTBEAT_H_
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+struct dentry;
+
+#define ADF_CFG_HB_TIMER_MIN_MS 200
+#define ADF_CFG_HB_TIMER_DEFAULT_MS 500
+#define ADF_CFG_HB_COUNT_THRESHOLD 3
+
+enum adf_device_heartbeat_status {
+       HB_DEV_UNRESPONSIVE = 0,
+       HB_DEV_ALIVE,
+       HB_DEV_UNSUPPORTED,
+};
+
+struct adf_heartbeat {
+       unsigned int hb_sent_counter;
+       unsigned int hb_failed_counter;
+       unsigned int hb_timer;
+       u64 last_hb_check_time;
+       bool ctrs_cnt_checked;
+       struct hb_dma_addr {
+               dma_addr_t phy_addr;
+               void *virt_addr;
+       } dma;
+       struct {
+               struct dentry *base_dir;
+               struct dentry *status;
+               struct dentry *cfg;
+               struct dentry *sent;
+               struct dentry *failed;
+       } dbgfs;
+};
+
+#ifdef CONFIG_DEBUG_FS
+int adf_heartbeat_init(struct adf_accel_dev *accel_dev);
+int adf_heartbeat_start(struct adf_accel_dev *accel_dev);
+void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev);
+
+int adf_heartbeat_ms_to_ticks(struct adf_accel_dev *accel_dev, unsigned int time_ms,
+                             uint32_t *value);
+int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev,
+                                unsigned int timer_ms);
+void adf_heartbeat_status(struct adf_accel_dev *accel_dev,
+                         enum adf_device_heartbeat_status *hb_status);
+void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev);
+
+#else
+static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev)
+{
+       return 0;
+}
+
+static inline int adf_heartbeat_start(struct adf_accel_dev *accel_dev)
+{
+       return 0;
+}
+
+static inline void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev,
+                                              unsigned int timer_ms)
+{
+       return 0;
+}
+
+static inline void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev)
+{
+}
+#endif
+#endif /* ADF_HEARTBEAT_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
new file mode 100644 (file)
index 0000000..803cbfd
--- /dev/null
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kstrtox.h>
+#include <linux/types.h>
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+#include "adf_heartbeat.h"
+#include "adf_heartbeat_dbgfs.h"
+
+#define HB_OK 0
+#define HB_ERROR -1
+#define HB_STATUS_MAX_STRLEN 4
+#define HB_STATS_MAX_STRLEN 16
+
+static ssize_t adf_hb_stats_read(struct file *file, char __user *user_buffer,
+                                size_t count, loff_t *ppos)
+{
+       char buf[HB_STATS_MAX_STRLEN];
+       unsigned int *value;
+       int len;
+
+       if (*ppos > 0)
+               return 0;
+
+       value = file->private_data;
+       len = scnprintf(buf, sizeof(buf), "%u\n", *value);
+
+       return simple_read_from_buffer(user_buffer, count, ppos, buf, len + 1);
+}
+
+static const struct file_operations adf_hb_stats_fops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .read = adf_hb_stats_read,
+};
+
+static ssize_t adf_hb_status_read(struct file *file, char __user *user_buf,
+                                 size_t count, loff_t *ppos)
+{
+       enum adf_device_heartbeat_status hb_status;
+       char ret_str[HB_STATUS_MAX_STRLEN];
+       struct adf_accel_dev *accel_dev;
+       int ret_code;
+       size_t len;
+
+       if (*ppos > 0)
+               return 0;
+
+       accel_dev = file->private_data;
+       ret_code = HB_OK;
+
+       adf_heartbeat_status(accel_dev, &hb_status);
+
+       if (hb_status != HB_DEV_ALIVE)
+               ret_code = HB_ERROR;
+
+       len = scnprintf(ret_str, sizeof(ret_str), "%d\n", ret_code);
+
+       return simple_read_from_buffer(user_buf, count, ppos, ret_str, len + 1);
+}
+
+static const struct file_operations adf_hb_status_fops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .read = adf_hb_status_read,
+};
+
+static ssize_t adf_hb_cfg_read(struct file *file, char __user *user_buf,
+                              size_t count, loff_t *ppos)
+{
+       char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       struct adf_accel_dev *accel_dev;
+       unsigned int timer_ms;
+       int len;
+
+       if (*ppos > 0)
+               return 0;
+
+       accel_dev = file->private_data;
+       timer_ms = accel_dev->heartbeat->hb_timer;
+       len = scnprintf(timer_str, sizeof(timer_str), "%u\n", timer_ms);
+
+       return simple_read_from_buffer(user_buf, count, ppos, timer_str,
+                                      len + 1);
+}
+
+static ssize_t adf_hb_cfg_write(struct file *file, const char __user *user_buf,
+                               size_t count, loff_t *ppos)
+{
+       char input_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { };
+       struct adf_accel_dev *accel_dev;
+       int ret, written_chars;
+       unsigned int timer_ms;
+       u32 ticks;
+
+       accel_dev = file->private_data;
+       timer_ms = ADF_CFG_HB_TIMER_DEFAULT_MS;
+
+       /* last byte left as string termination */
+       if (count > sizeof(input_str) - 1)
+               return -EINVAL;
+
+       written_chars = simple_write_to_buffer(input_str, sizeof(input_str) - 1,
+                                              ppos, user_buf, count);
+       if (written_chars > 0) {
+               ret = kstrtouint(input_str, 10, &timer_ms);
+               if (ret) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "heartbeat_cfg: Invalid value\n");
+                       return ret;
+               }
+
+               if (timer_ms < ADF_CFG_HB_TIMER_MIN_MS) {
+                       dev_err(&GET_DEV(accel_dev),
+                               "heartbeat_cfg: Invalid value\n");
+                       return -EINVAL;
+               }
+
+               /*
+                * On 4xxx devices adf_timer is responsible for HB updates and
+                * its period is fixed to 200ms
+                */
+               if (accel_dev->timer)
+                       timer_ms = ADF_CFG_HB_TIMER_MIN_MS;
+
+               ret = adf_heartbeat_save_cfg_param(accel_dev, timer_ms);
+               if (ret)
+                       return ret;
+
+               ret = adf_heartbeat_ms_to_ticks(accel_dev, timer_ms, &ticks);
+               if (ret)
+                       return ret;
+
+               ret = adf_send_admin_hb_timer(accel_dev, ticks);
+               if (ret)
+                       return ret;
+
+               accel_dev->heartbeat->hb_timer = timer_ms;
+       }
+
+       return written_chars;
+}
+
+static const struct file_operations adf_hb_cfg_fops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .read = adf_hb_cfg_read,
+       .write = adf_hb_cfg_write,
+};
+
+void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+       struct adf_heartbeat *hb = accel_dev->heartbeat;
+
+       if (!hb)
+               return;
+
+       hb->dbgfs.base_dir = debugfs_create_dir("heartbeat", accel_dev->debugfs_dir);
+       hb->dbgfs.status = debugfs_create_file("status", 0400, hb->dbgfs.base_dir,
+                                              accel_dev, &adf_hb_status_fops);
+       hb->dbgfs.sent = debugfs_create_file("queries_sent", 0400, hb->dbgfs.base_dir,
+                                            &hb->hb_sent_counter, &adf_hb_stats_fops);
+       hb->dbgfs.failed = debugfs_create_file("queries_failed", 0400, hb->dbgfs.base_dir,
+                                              &hb->hb_failed_counter, &adf_hb_stats_fops);
+       hb->dbgfs.cfg = debugfs_create_file("config", 0600, hb->dbgfs.base_dir,
+                                           accel_dev, &adf_hb_cfg_fops);
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_add);
+
+void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+       struct adf_heartbeat *hb = accel_dev->heartbeat;
+
+       if (!hb)
+               return;
+
+       debugfs_remove(hb->dbgfs.status);
+       hb->dbgfs.status = NULL;
+       debugfs_remove(hb->dbgfs.sent);
+       hb->dbgfs.sent = NULL;
+       debugfs_remove(hb->dbgfs.failed);
+       hb->dbgfs.failed = NULL;
+       debugfs_remove(hb->dbgfs.cfg);
+       hb->dbgfs.cfg = NULL;
+       debugfs_remove(hb->dbgfs.base_dir);
+       hb->dbgfs.base_dir = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_rm);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.h
new file mode 100644 (file)
index 0000000..84dd29e
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_HEARTBEAT_DBGFS_H_
+#define ADF_HEARTBEAT_DBGFS_H_
+
+struct adf_accel_dev;
+
+void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_HEARTBEAT_DBGFS_H_ */
index 826179c..89001fe 100644 (file)
@@ -8,6 +8,7 @@
 #include "adf_cfg.h"
 #include "adf_common_drv.h"
 #include "adf_dbgfs.h"
+#include "adf_heartbeat.h"
 
 static LIST_HEAD(service_table);
 static DEFINE_MUTEX(service_lock);
@@ -129,6 +130,8 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev)
                        return -EFAULT;
        }
 
+       adf_heartbeat_init(accel_dev);
+
        /*
         * Subservice initialisation is divided into two stages: init and start.
         * This is to facilitate any ordering dependencies between services
@@ -163,6 +166,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
        struct service_hndl *service;
        struct list_head *list_itr;
+       int ret;
 
        set_bit(ADF_STATUS_STARTING, &accel_dev->status);
 
@@ -177,6 +181,14 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
                return -EFAULT;
        }
 
+       if (hw_data->measure_clock) {
+               ret = hw_data->measure_clock(accel_dev);
+               if (ret) {
+                       dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n");
+                       return ret;
+               }
+       }
+
        /* Set ssm watch dog timer */
        if (hw_data->set_ssm_wdtimer)
                hw_data->set_ssm_wdtimer(accel_dev);
@@ -187,6 +199,16 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
                return -EFAULT;
        }
 
+       if (hw_data->start_timer) {
+               ret = hw_data->start_timer(accel_dev);
+               if (ret) {
+                       dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n");
+                       return ret;
+               }
+       }
+
+       adf_heartbeat_start(accel_dev);
+
        list_for_each(list_itr, &service_table) {
                service = list_entry(list_itr, struct service_hndl, list);
                if (service->event_hld(accel_dev, ADF_EVENT_START)) {
@@ -235,6 +257,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
  */
 static void adf_dev_stop(struct adf_accel_dev *accel_dev)
 {
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
        struct service_hndl *service;
        struct list_head *list_itr;
        bool wait = false;
@@ -270,6 +293,9 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
                }
        }
 
+       if (hw_data->stop_timer)
+               hw_data->stop_timer(accel_dev);
+
        if (wait)
                msleep(100);
 
@@ -326,6 +352,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
                        clear_bit(accel_dev->accel_id, service->init_status);
        }
 
+       adf_heartbeat_shutdown(accel_dev);
+
        hw_data->disable_iov(accel_dev);
 
        if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
index ad9e135..2aba194 100644 (file)
@@ -380,3 +380,9 @@ bool adf_misc_wq_queue_work(struct work_struct *work)
 {
        return queue_work(adf_misc_wq, work);
 }
+
+bool adf_misc_wq_queue_delayed_work(struct delayed_work *work,
+                                   unsigned long delay)
+{
+       return queue_delayed_work(adf_misc_wq, work, delay);
+}
index 56cb827..3e968a4 100644 (file)
@@ -16,6 +16,8 @@ enum icp_qat_fw_init_admin_cmd_id {
        ICP_QAT_FW_HEARTBEAT_SYNC = 7,
        ICP_QAT_FW_HEARTBEAT_GET = 8,
        ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
+       ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13,
+       ICP_QAT_FW_TIMER_GET = 19,
        ICP_QAT_FW_PM_STATE_CONFIG = 128,
 };
 
@@ -37,6 +39,12 @@ struct icp_qat_fw_init_admin_req {
                        __u16 ibuf_size_in_kb;
                        __u16 resrvd3;
                };
+               struct {
+                       __u32 int_timer_ticks;
+               };
+               struct {
+                       __u32 heartbeat_ticks;
+               };
                __u32 idle_filter;
        };
 
@@ -97,19 +105,6 @@ struct icp_qat_fw_init_admin_resp {
        };
 } __packed;
 
-#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
-#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1
-#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
-#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
-       ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
-
-#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
-       ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
+#define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC
 
-#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \
-       QAT_FIELD_GET(flags, \
-                ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \
-                ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK)
 #endif
index a65059e..0c8883e 100644 (file)
@@ -97,7 +97,10 @@ enum icp_qat_capabilities_mask {
        ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15),
        ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16),
        ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17),
-       /* Bits 18-21 are currently reserved */
+       ICP_ACCEL_CAPABILITIES_SM2 = BIT(18),
+       ICP_ACCEL_CAPABILITIES_SM3 = BIT(19),
+       ICP_ACCEL_CAPABILITIES_SM4 = BIT(20),
+       /* Bit 21 is currently reserved */
        ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY = BIT(22),
        ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23),
        ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24),
index 3f1f352..7842a9f 100644 (file)
@@ -234,8 +234,7 @@ static void qat_free_dc_data(struct adf_accel_dev *accel_dev)
 
        dma_unmap_single(dev, dc_data->ovf_buff_p, dc_data->ovf_buff_sz,
                         DMA_FROM_DEVICE);
-       memset(dc_data->ovf_buff, 0, dc_data->ovf_buff_sz);
-       kfree(dc_data->ovf_buff);
+       kfree_sensitive(dc_data->ovf_buff);
        devm_kfree(dev, dc_data);
        accel_dev->dc_data = NULL;
 }
index ce837bc..4bd150d 100644 (file)
@@ -11,7 +11,7 @@
 #include "icp_qat_hal.h"
 #include "icp_qat_fw_loader_handle.h"
 
-#define UWORD_CPYBUF_SIZE 1024
+#define UWORD_CPYBUF_SIZE 1024U
 #define INVLD_UWORD 0xffffffffffull
 #define PID_MINOR_REV 0xf
 #define PID_MAJOR_REV (0xf << 4)
@@ -1986,10 +1986,7 @@ static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
        uw_relative_addr = 0;
        words_num = encap_page->micro_words_num;
        while (words_num) {
-               if (words_num < UWORD_CPYBUF_SIZE)
-                       cpylen = words_num;
-               else
-                       cpylen = UWORD_CPYBUF_SIZE;
+               cpylen = min(words_num, UWORD_CPYBUF_SIZE);
 
                /* load the buffer */
                for (i = 0; i < cpylen; i++)
index 1ebe0b3..09551f9 100644 (file)
@@ -7,6 +7,7 @@
 #include <adf_gen2_hw_data.h>
 #include <adf_gen2_pfvf.h>
 #include "adf_dh895xcc_hw_data.h"
+#include "adf_heartbeat.h"
 #include "icp_qat_hw.h"
 
 #define ADF_DH895XCC_VF_MSK    0xFFFFFFFF
@@ -44,6 +45,14 @@ static u32 get_misc_bar_id(struct adf_hw_device_data *self)
        return ADF_DH895XCC_PMISC_BAR;
 }
 
+static u32 get_ts_clock(struct adf_hw_device_data *self)
+{
+       /*
+        * Timestamp update interval is 16 AE clock ticks for dh895xcc.
+        */
+       return self->clock_frequency / 16;
+}
+
 static u32 get_etr_bar_id(struct adf_hw_device_data *self)
 {
        return ADF_DH895XCC_ETR_BAR;
@@ -237,6 +246,10 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
        hw_data->reset_device = adf_reset_sbr;
        hw_data->disable_iov = adf_disable_sriov;
        hw_data->dev_config = adf_gen2_dev_config;
+       hw_data->clock_frequency = ADF_DH895X_AE_FREQ;
+       hw_data->get_hb_clock = get_ts_clock;
+       hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+       hw_data->check_hb_ctrs = adf_heartbeat_check_ctrs;
 
        adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops);
        hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts;
index 7b674bb..cd3a219 100644 (file)
@@ -3,6 +3,8 @@
 #ifndef ADF_DH895x_HW_DATA_H_
 #define ADF_DH895x_HW_DATA_H_
 
+#include <linux/units.h>
+
 /* PCIe configuration space */
 #define ADF_DH895XCC_SRAM_BAR 0
 #define ADF_DH895XCC_PMISC_BAR 1
@@ -30,6 +32,9 @@
 #define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
 #define ADF_DH895XCC_AE2FUNC_MAP_GRP_B_NUM_REGS 12
 
+/* Clocks frequency */
+#define ADF_DH895X_AE_FREQ (933 * HZ_PER_MHZ)
+
 /* FW names */
 #define ADF_DH895XCC_FW "qat_895xcc.bin"
 #define ADF_DH895XCC_MMP "qat_895xcc_mmp.bin"
index 4f6ca22..d5a32d7 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
 #include <linux/cpumask.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
index 9f937bd..c498950 100644 (file)
@@ -7,18 +7,21 @@
  * Copyright (c) 2016 Texas Instruments Incorporated
  */
 
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
 #include <linux/errno.h>
-#include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
-#include <linux/omap-dma.h>
 #include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/omap-dma.h>
 #include <linux/pm_runtime.h>
-#include <crypto/aes.h>
-#include <crypto/gcm.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/skcipher.h>
-#include <crypto/internal/aead.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
 
 #include "omap-crypto.h"
 #include "omap-aes.h"
@@ -212,12 +215,10 @@ static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
        return 0;
 }
 
-static int omap_aes_gcm_prepare_req(struct crypto_engine *engine, void *areq)
+static int omap_aes_gcm_prepare_req(struct aead_request *req,
+                                   struct omap_aes_dev *dd)
 {
-       struct aead_request *req = container_of(areq, struct aead_request,
-                                               base);
        struct omap_aes_reqctx *rctx = aead_request_ctx(req);
-       struct omap_aes_dev *dd = rctx->dd;
        struct omap_aes_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
        int err;
 
@@ -356,17 +357,21 @@ int omap_aes_4106gcm_setauthsize(struct crypto_aead *parent,
        return crypto_rfc4106_check_authsize(authsize);
 }
 
-static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
+int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
 {
        struct aead_request *req = container_of(areq, struct aead_request,
                                                base);
        struct omap_aes_reqctx *rctx = aead_request_ctx(req);
        struct omap_aes_dev *dd = rctx->dd;
-       int ret = 0;
+       int ret;
 
        if (!dd)
                return -ENODEV;
 
+       ret = omap_aes_gcm_prepare_req(req, dd);
+       if (ret)
+               return ret;
+
        if (dd->in_sg_len)
                ret = omap_aes_crypt_dma_start(dd);
        else
@@ -377,12 +382,6 @@ static int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq)
 
 int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
 {
-       struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
-
-       ctx->enginectx.op.prepare_request = omap_aes_gcm_prepare_req;
-       ctx->enginectx.op.unprepare_request = NULL;
-       ctx->enginectx.op.do_one_request = omap_aes_gcm_crypt_req;
-
        crypto_aead_set_reqsize(tfm, sizeof(struct omap_aes_reqctx));
 
        return 0;
index 67a99c7..ed83023 100644 (file)
 #define prn(num) pr_debug(#num "=%d\n", num)
 #define prx(num) pr_debug(#num "=%x\n", num)
 
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/engine.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
 #include <linux/err.h>
-#include <linux/module.h>
 #include <linux/init.h>
-#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-#include <linux/pm_runtime.h>
+#include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/of_address.h>
-#include <linux/io.h>
-#include <linux/crypto.h>
-#include <linux/interrupt.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/aes.h>
-#include <crypto/gcm.h>
-#include <crypto/engine.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/internal/aead.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
 
 #include "omap-crypto.h"
 #include "omap-aes.h"
@@ -426,20 +424,15 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
        return 0;
 }
 
-static int omap_aes_prepare_req(struct crypto_engine *engine,
-                               void *areq)
+static int omap_aes_prepare_req(struct skcipher_request *req,
+                               struct omap_aes_dev *dd)
 {
-       struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
        struct omap_aes_ctx *ctx = crypto_skcipher_ctx(
                        crypto_skcipher_reqtfm(req));
        struct omap_aes_reqctx *rctx = skcipher_request_ctx(req);
-       struct omap_aes_dev *dd = rctx->dd;
        int ret;
        u16 flags;
 
-       if (!dd)
-               return -ENODEV;
-
        /* assign new request to device */
        dd->req = req;
        dd->total = req->cryptlen;
@@ -491,7 +484,8 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
        if (!dd)
                return -ENODEV;
 
-       return omap_aes_crypt_dma_start(dd);
+       return omap_aes_prepare_req(req, dd) ?:
+              omap_aes_crypt_dma_start(dd);
 }
 
 static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf)
@@ -629,11 +623,6 @@ static int omap_aes_ctr_decrypt(struct skcipher_request *req)
        return omap_aes_crypt(req, FLAGS_CTR);
 }
 
-static int omap_aes_prepare_req(struct crypto_engine *engine,
-                               void *req);
-static int omap_aes_crypt_req(struct crypto_engine *engine,
-                             void *req);
-
 static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
 {
        const char *name = crypto_tfm_alg_name(&tfm->base);
@@ -649,10 +638,6 @@ static int omap_aes_init_tfm(struct crypto_skcipher *tfm)
        crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_aes_reqctx) +
                                         crypto_skcipher_reqsize(blk));
 
-       ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
-       ctx->enginectx.op.unprepare_request = NULL;
-       ctx->enginectx.op.do_one_request = omap_aes_crypt_req;
-
        return 0;
 }
 
@@ -668,68 +653,77 @@ static void omap_aes_exit_tfm(struct crypto_skcipher *tfm)
 
 /* ********************** ALGS ************************************ */
 
-static struct skcipher_alg algs_ecb_cbc[] = {
+static struct skcipher_engine_alg algs_ecb_cbc[] = {
 {
-       .base.cra_name          = "ecb(aes)",
-       .base.cra_driver_name   = "ecb-aes-omap",
-       .base.cra_priority      = 300,
-       .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_NEED_FALLBACK,
-       .base.cra_blocksize     = AES_BLOCK_SIZE,
-       .base.cra_ctxsize       = sizeof(struct omap_aes_ctx),
-       .base.cra_module        = THIS_MODULE,
-
-       .min_keysize            = AES_MIN_KEY_SIZE,
-       .max_keysize            = AES_MAX_KEY_SIZE,
-       .setkey                 = omap_aes_setkey,
-       .encrypt                = omap_aes_ecb_encrypt,
-       .decrypt                = omap_aes_ecb_decrypt,
-       .init                   = omap_aes_init_tfm,
-       .exit                   = omap_aes_exit_tfm,
+       .base = {
+               .base.cra_name          = "ecb(aes)",
+               .base.cra_driver_name   = "ecb-aes-omap",
+               .base.cra_priority      = 300,
+               .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                         CRYPTO_ALG_ASYNC |
+                                         CRYPTO_ALG_NEED_FALLBACK,
+               .base.cra_blocksize     = AES_BLOCK_SIZE,
+               .base.cra_ctxsize       = sizeof(struct omap_aes_ctx),
+               .base.cra_module        = THIS_MODULE,
+
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+               .setkey                 = omap_aes_setkey,
+               .encrypt                = omap_aes_ecb_encrypt,
+               .decrypt                = omap_aes_ecb_decrypt,
+               .init                   = omap_aes_init_tfm,
+               .exit                   = omap_aes_exit_tfm,
+       },
+       .op.do_one_request = omap_aes_crypt_req,
 },
 {
-       .base.cra_name          = "cbc(aes)",
-       .base.cra_driver_name   = "cbc-aes-omap",
-       .base.cra_priority      = 300,
-       .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_NEED_FALLBACK,
-       .base.cra_blocksize     = AES_BLOCK_SIZE,
-       .base.cra_ctxsize       = sizeof(struct omap_aes_ctx),
-       .base.cra_module        = THIS_MODULE,
-
-       .min_keysize            = AES_MIN_KEY_SIZE,
-       .max_keysize            = AES_MAX_KEY_SIZE,
-       .ivsize                 = AES_BLOCK_SIZE,
-       .setkey                 = omap_aes_setkey,
-       .encrypt                = omap_aes_cbc_encrypt,
-       .decrypt                = omap_aes_cbc_decrypt,
-       .init                   = omap_aes_init_tfm,
-       .exit                   = omap_aes_exit_tfm,
+       .base = {
+               .base.cra_name          = "cbc(aes)",
+               .base.cra_driver_name   = "cbc-aes-omap",
+               .base.cra_priority      = 300,
+               .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                         CRYPTO_ALG_ASYNC |
+                                         CRYPTO_ALG_NEED_FALLBACK,
+               .base.cra_blocksize     = AES_BLOCK_SIZE,
+               .base.cra_ctxsize       = sizeof(struct omap_aes_ctx),
+               .base.cra_module        = THIS_MODULE,
+
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+               .ivsize                 = AES_BLOCK_SIZE,
+               .setkey                 = omap_aes_setkey,
+               .encrypt                = omap_aes_cbc_encrypt,
+               .decrypt                = omap_aes_cbc_decrypt,
+               .init                   = omap_aes_init_tfm,
+               .exit                   = omap_aes_exit_tfm,
+       },
+       .op.do_one_request = omap_aes_crypt_req,
 }
 };
 
-static struct skcipher_alg algs_ctr[] = {
+static struct skcipher_engine_alg algs_ctr[] = {
 {
-       .base.cra_name          = "ctr(aes)",
-       .base.cra_driver_name   = "ctr-aes-omap",
-       .base.cra_priority      = 300,
-       .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_ASYNC |
-                                 CRYPTO_ALG_NEED_FALLBACK,
-       .base.cra_blocksize     = 1,
-       .base.cra_ctxsize       = sizeof(struct omap_aes_ctx),
-       .base.cra_module        = THIS_MODULE,
-
-       .min_keysize            = AES_MIN_KEY_SIZE,
-       .max_keysize            = AES_MAX_KEY_SIZE,
-       .ivsize                 = AES_BLOCK_SIZE,
-       .setkey                 = omap_aes_setkey,
-       .encrypt                = omap_aes_ctr_encrypt,
-       .decrypt                = omap_aes_ctr_decrypt,
-       .init                   = omap_aes_init_tfm,
-       .exit                   = omap_aes_exit_tfm,
+       .base = {
+               .base.cra_name          = "ctr(aes)",
+               .base.cra_driver_name   = "ctr-aes-omap",
+               .base.cra_priority      = 300,
+               .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                         CRYPTO_ALG_ASYNC |
+                                         CRYPTO_ALG_NEED_FALLBACK,
+               .base.cra_blocksize     = 1,
+               .base.cra_ctxsize       = sizeof(struct omap_aes_ctx),
+               .base.cra_module        = THIS_MODULE,
+
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+               .ivsize                 = AES_BLOCK_SIZE,
+               .setkey                 = omap_aes_setkey,
+               .encrypt                = omap_aes_ctr_encrypt,
+               .decrypt                = omap_aes_ctr_decrypt,
+               .init                   = omap_aes_init_tfm,
+               .exit                   = omap_aes_exit_tfm,
+       },
+       .op.do_one_request = omap_aes_crypt_req,
 }
 };
 
@@ -740,46 +734,52 @@ static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
        },
 };
 
-static struct aead_alg algs_aead_gcm[] = {
+static struct aead_engine_alg algs_aead_gcm[] = {
 {
        .base = {
-               .cra_name               = "gcm(aes)",
-               .cra_driver_name        = "gcm-aes-omap",
-               .cra_priority           = 300,
-               .cra_flags              = CRYPTO_ALG_ASYNC |
-                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
-               .cra_blocksize          = 1,
-               .cra_ctxsize            = sizeof(struct omap_aes_gcm_ctx),
-               .cra_alignmask          = 0xf,
-               .cra_module             = THIS_MODULE,
+               .base = {
+                       .cra_name               = "gcm(aes)",
+                       .cra_driver_name        = "gcm-aes-omap",
+                       .cra_priority           = 300,
+                       .cra_flags              = CRYPTO_ALG_ASYNC |
+                                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = sizeof(struct omap_aes_gcm_ctx),
+                       .cra_alignmask          = 0xf,
+                       .cra_module             = THIS_MODULE,
+               },
+               .init           = omap_aes_gcm_cra_init,
+               .ivsize         = GCM_AES_IV_SIZE,
+               .maxauthsize    = AES_BLOCK_SIZE,
+               .setkey         = omap_aes_gcm_setkey,
+               .setauthsize    = omap_aes_gcm_setauthsize,
+               .encrypt        = omap_aes_gcm_encrypt,
+               .decrypt        = omap_aes_gcm_decrypt,
        },
-       .init           = omap_aes_gcm_cra_init,
-       .ivsize         = GCM_AES_IV_SIZE,
-       .maxauthsize    = AES_BLOCK_SIZE,
-       .setkey         = omap_aes_gcm_setkey,
-       .setauthsize    = omap_aes_gcm_setauthsize,
-       .encrypt        = omap_aes_gcm_encrypt,
-       .decrypt        = omap_aes_gcm_decrypt,
+       .op.do_one_request = omap_aes_gcm_crypt_req,
 },
 {
        .base = {
-               .cra_name               = "rfc4106(gcm(aes))",
-               .cra_driver_name        = "rfc4106-gcm-aes-omap",
-               .cra_priority           = 300,
-               .cra_flags              = CRYPTO_ALG_ASYNC |
-                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
-               .cra_blocksize          = 1,
-               .cra_ctxsize            = sizeof(struct omap_aes_gcm_ctx),
-               .cra_alignmask          = 0xf,
-               .cra_module             = THIS_MODULE,
+               .base = {
+                       .cra_name               = "rfc4106(gcm(aes))",
+                       .cra_driver_name        = "rfc4106-gcm-aes-omap",
+                       .cra_priority           = 300,
+                       .cra_flags              = CRYPTO_ALG_ASYNC |
+                                                 CRYPTO_ALG_KERN_DRIVER_ONLY,
+                       .cra_blocksize          = 1,
+                       .cra_ctxsize            = sizeof(struct omap_aes_gcm_ctx),
+                       .cra_alignmask          = 0xf,
+                       .cra_module             = THIS_MODULE,
+               },
+               .init           = omap_aes_gcm_cra_init,
+               .maxauthsize    = AES_BLOCK_SIZE,
+               .ivsize         = GCM_RFC4106_IV_SIZE,
+               .setkey         = omap_aes_4106gcm_setkey,
+               .setauthsize    = omap_aes_4106gcm_setauthsize,
+               .encrypt        = omap_aes_4106gcm_encrypt,
+               .decrypt        = omap_aes_4106gcm_decrypt,
        },
-       .init           = omap_aes_gcm_cra_init,
-       .maxauthsize    = AES_BLOCK_SIZE,
-       .ivsize         = GCM_RFC4106_IV_SIZE,
-       .setkey         = omap_aes_4106gcm_setkey,
-       .setauthsize    = omap_aes_4106gcm_setauthsize,
-       .encrypt        = omap_aes_4106gcm_encrypt,
-       .decrypt        = omap_aes_4106gcm_decrypt,
+       .op.do_one_request = omap_aes_gcm_crypt_req,
 },
 };
 
@@ -1101,8 +1101,8 @@ static int omap_aes_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct omap_aes_dev *dd;
-       struct skcipher_alg *algp;
-       struct aead_alg *aalg;
+       struct skcipher_engine_alg *algp;
+       struct aead_engine_alg *aalg;
        struct resource res;
        int err = -ENOMEM, i, j, irq = -1;
        u32 reg;
@@ -1195,9 +1195,9 @@ static int omap_aes_probe(struct platform_device *pdev)
                        for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
                                algp = &dd->pdata->algs_info[i].algs_list[j];
 
-                               pr_debug("reg alg: %s\n", algp->base.cra_name);
+                               pr_debug("reg alg: %s\n", algp->base.base.cra_name);
 
-                               err = crypto_register_skcipher(algp);
+                               err = crypto_engine_register_skcipher(algp);
                                if (err)
                                        goto err_algs;
 
@@ -1211,9 +1211,9 @@ static int omap_aes_probe(struct platform_device *pdev)
                for (i = 0; i < dd->pdata->aead_algs_info->size; i++) {
                        aalg = &dd->pdata->aead_algs_info->algs_list[i];
 
-                       pr_debug("reg alg: %s\n", aalg->base.cra_name);
+                       pr_debug("reg alg: %s\n", aalg->base.base.cra_name);
 
-                       err = crypto_register_aead(aalg);
+                       err = crypto_engine_register_aead(aalg);
                        if (err)
                                goto err_aead_algs;
 
@@ -1231,12 +1231,12 @@ static int omap_aes_probe(struct platform_device *pdev)
 err_aead_algs:
        for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
                aalg = &dd->pdata->aead_algs_info->algs_list[i];
-               crypto_unregister_aead(aalg);
+               crypto_engine_unregister_aead(aalg);
        }
 err_algs:
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
-                       crypto_unregister_skcipher(
+                       crypto_engine_unregister_skcipher(
                                        &dd->pdata->algs_info[i].algs_list[j]);
 
 err_engine:
@@ -1258,7 +1258,7 @@ err_data:
 static int omap_aes_remove(struct platform_device *pdev)
 {
        struct omap_aes_dev *dd = platform_get_drvdata(pdev);
-       struct aead_alg *aalg;
+       struct aead_engine_alg *aalg;
        int i, j;
 
        spin_lock_bh(&list_lock);
@@ -1267,14 +1267,14 @@ static int omap_aes_remove(struct platform_device *pdev)
 
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
-                       crypto_unregister_skcipher(
+                       crypto_engine_unregister_skcipher(
                                        &dd->pdata->algs_info[i].algs_list[j]);
                        dd->pdata->algs_info[i].registered--;
                }
 
        for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
                aalg = &dd->pdata->aead_algs_info->algs_list[i];
-               crypto_unregister_aead(aalg);
+               crypto_engine_unregister_aead(aalg);
                dd->pdata->aead_algs_info->registered--;
        }
 
index 23d073e..0f35c91 100644 (file)
@@ -10,7 +10,6 @@
 #define __OMAP_AES_H__
 
 #include <crypto/aes.h>
-#include <crypto/engine.h>
 
 #define DST_MAXBURST                   4
 #define DMA_MIN                                (DST_MAXBURST * sizeof(u32))
@@ -93,7 +92,6 @@ struct omap_aes_gcm_result {
 };
 
 struct omap_aes_ctx {
-       struct crypto_engine_ctx enginectx;
        int             keylen;
        u32             key[AES_KEYSIZE_256 / sizeof(u32)];
        u8              nonce[4];
@@ -117,15 +115,15 @@ struct omap_aes_reqctx {
 #define OMAP_AES_CACHE_SIZE    0
 
 struct omap_aes_algs_info {
-       struct skcipher_alg     *algs_list;
-       unsigned int            size;
-       unsigned int            registered;
+       struct skcipher_engine_alg      *algs_list;
+       unsigned int                    size;
+       unsigned int                    registered;
 };
 
 struct omap_aes_aead_algs {
-       struct aead_alg *algs_list;
-       unsigned int    size;
-       unsigned int    registered;
+       struct aead_engine_alg          *algs_list;
+       unsigned int                    size;
+       unsigned int                    registered;
 };
 
 struct omap_aes_pdata {
@@ -218,5 +216,6 @@ int omap_aes_crypt_dma_start(struct omap_aes_dev *dd);
 int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd);
 void omap_aes_gcm_dma_out_callback(void *data);
 void omap_aes_clear_copy_flags(struct omap_aes_dev *dd);
+int omap_aes_gcm_crypt_req(struct crypto_engine *engine, void *areq);
 
 #endif
index f783769..089dd45 100644 (file)
 #define prx(num)  do { } while (0)
 #endif
 
+#include <crypto/engine.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
 #include <linux/err.h>
-#include <linux/module.h>
 #include <linux/init.h>
-#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
 #include <linux/pm_runtime.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
-#include <linux/io.h>
-#include <linux/crypto.h>
-#include <linux/interrupt.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/des.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/algapi.h>
-#include <crypto/engine.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
 
 #include "omap-crypto.h"
 
@@ -83,7 +79,6 @@
 #define FLAGS_OUT_DATA_ST_SHIFT        10
 
 struct omap_des_ctx {
-       struct crypto_engine_ctx enginectx;
        struct omap_des_dev *dd;
 
        int             keylen;
@@ -99,9 +94,9 @@ struct omap_des_reqctx {
 #define OMAP_DES_CACHE_SIZE    0
 
 struct omap_des_algs_info {
-       struct skcipher_alg     *algs_list;
-       unsigned int            size;
-       unsigned int            registered;
+       struct skcipher_engine_alg      *algs_list;
+       unsigned int                    size;
+       unsigned int                    registered;
 };
 
 struct omap_des_pdata {
@@ -522,20 +517,15 @@ static int omap_des_handle_queue(struct omap_des_dev *dd,
        return 0;
 }
 
-static int omap_des_prepare_req(struct crypto_engine *engine,
-                               void *areq)
+static int omap_des_prepare_req(struct skcipher_request *req,
+                               struct omap_des_dev *dd)
 {
-       struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
        struct omap_des_ctx *ctx = crypto_skcipher_ctx(
                        crypto_skcipher_reqtfm(req));
-       struct omap_des_dev *dd = omap_des_find_dev(ctx);
        struct omap_des_reqctx *rctx;
        int ret;
        u16 flags;
 
-       if (!dd)
-               return -ENODEV;
-
        /* assign new request to device */
        dd->req = req;
        dd->total = req->cryptlen;
@@ -590,7 +580,8 @@ static int omap_des_crypt_req(struct crypto_engine *engine,
        if (!dd)
                return -ENODEV;
 
-       return omap_des_crypt_dma_start(dd);
+       return omap_des_prepare_req(req, dd) ?:
+              omap_des_crypt_dma_start(dd);
 }
 
 static void omap_des_done_task(unsigned long data)
@@ -709,98 +700,99 @@ static int omap_des_cbc_decrypt(struct skcipher_request *req)
        return omap_des_crypt(req, FLAGS_CBC);
 }
 
-static int omap_des_prepare_req(struct crypto_engine *engine,
-                               void *areq);
-static int omap_des_crypt_req(struct crypto_engine *engine,
-                             void *areq);
-
 static int omap_des_init_tfm(struct crypto_skcipher *tfm)
 {
-       struct omap_des_ctx *ctx = crypto_skcipher_ctx(tfm);
-
        pr_debug("enter\n");
 
        crypto_skcipher_set_reqsize(tfm, sizeof(struct omap_des_reqctx));
 
-       ctx->enginectx.op.prepare_request = omap_des_prepare_req;
-       ctx->enginectx.op.unprepare_request = NULL;
-       ctx->enginectx.op.do_one_request = omap_des_crypt_req;
-
        return 0;
 }
 
 /* ********************** ALGS ************************************ */
 
-static struct skcipher_alg algs_ecb_cbc[] = {
+static struct skcipher_engine_alg algs_ecb_cbc[] = {
 {
-       .base.cra_name          = "ecb(des)",
-       .base.cra_driver_name   = "ecb-des-omap",
-       .base.cra_priority      = 300,
-       .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_ASYNC,
-       .base.cra_blocksize     = DES_BLOCK_SIZE,
-       .base.cra_ctxsize       = sizeof(struct omap_des_ctx),
-       .base.cra_module        = THIS_MODULE,
-
-       .min_keysize            = DES_KEY_SIZE,
-       .max_keysize            = DES_KEY_SIZE,
-       .setkey                 = omap_des_setkey,
-       .encrypt                = omap_des_ecb_encrypt,
-       .decrypt                = omap_des_ecb_decrypt,
-       .init                   = omap_des_init_tfm,
+       .base = {
+               .base.cra_name          = "ecb(des)",
+               .base.cra_driver_name   = "ecb-des-omap",
+               .base.cra_priority      = 300,
+               .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                         CRYPTO_ALG_ASYNC,
+               .base.cra_blocksize     = DES_BLOCK_SIZE,
+               .base.cra_ctxsize       = sizeof(struct omap_des_ctx),
+               .base.cra_module        = THIS_MODULE,
+
+               .min_keysize            = DES_KEY_SIZE,
+               .max_keysize            = DES_KEY_SIZE,
+               .setkey                 = omap_des_setkey,
+               .encrypt                = omap_des_ecb_encrypt,
+               .decrypt                = omap_des_ecb_decrypt,
+               .init                   = omap_des_init_tfm,
+       },
+       .op.do_one_request = omap_des_crypt_req,
 },
 {
-       .base.cra_name          = "cbc(des)",
-       .base.cra_driver_name   = "cbc-des-omap",
-       .base.cra_priority      = 300,
-       .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_ASYNC,
-       .base.cra_blocksize     = DES_BLOCK_SIZE,
-       .base.cra_ctxsize       = sizeof(struct omap_des_ctx),
-       .base.cra_module        = THIS_MODULE,
-
-       .min_keysize            = DES_KEY_SIZE,
-       .max_keysize            = DES_KEY_SIZE,
-       .ivsize                 = DES_BLOCK_SIZE,
-       .setkey                 = omap_des_setkey,
-       .encrypt                = omap_des_cbc_encrypt,
-       .decrypt                = omap_des_cbc_decrypt,
-       .init                   = omap_des_init_tfm,
+       .base = {
+               .base.cra_name          = "cbc(des)",
+               .base.cra_driver_name   = "cbc-des-omap",
+               .base.cra_priority      = 300,
+               .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                         CRYPTO_ALG_ASYNC,
+               .base.cra_blocksize     = DES_BLOCK_SIZE,
+               .base.cra_ctxsize       = sizeof(struct omap_des_ctx),
+               .base.cra_module        = THIS_MODULE,
+
+               .min_keysize            = DES_KEY_SIZE,
+               .max_keysize            = DES_KEY_SIZE,
+               .ivsize                 = DES_BLOCK_SIZE,
+               .setkey                 = omap_des_setkey,
+               .encrypt                = omap_des_cbc_encrypt,
+               .decrypt                = omap_des_cbc_decrypt,
+               .init                   = omap_des_init_tfm,
+       },
+       .op.do_one_request = omap_des_crypt_req,
 },
 {
-       .base.cra_name          = "ecb(des3_ede)",
-       .base.cra_driver_name   = "ecb-des3-omap",
-       .base.cra_priority      = 300,
-       .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_ASYNC,
-       .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
-       .base.cra_ctxsize       = sizeof(struct omap_des_ctx),
-       .base.cra_module        = THIS_MODULE,
-
-       .min_keysize            = DES3_EDE_KEY_SIZE,
-       .max_keysize            = DES3_EDE_KEY_SIZE,
-       .setkey                 = omap_des3_setkey,
-       .encrypt                = omap_des_ecb_encrypt,
-       .decrypt                = omap_des_ecb_decrypt,
-       .init                   = omap_des_init_tfm,
+       .base = {
+               .base.cra_name          = "ecb(des3_ede)",
+               .base.cra_driver_name   = "ecb-des3-omap",
+               .base.cra_priority      = 300,
+               .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                         CRYPTO_ALG_ASYNC,
+               .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
+               .base.cra_ctxsize       = sizeof(struct omap_des_ctx),
+               .base.cra_module        = THIS_MODULE,
+
+               .min_keysize            = DES3_EDE_KEY_SIZE,
+               .max_keysize            = DES3_EDE_KEY_SIZE,
+               .setkey                 = omap_des3_setkey,
+               .encrypt                = omap_des_ecb_encrypt,
+               .decrypt                = omap_des_ecb_decrypt,
+               .init                   = omap_des_init_tfm,
+       },
+       .op.do_one_request = omap_des_crypt_req,
 },
 {
-       .base.cra_name          = "cbc(des3_ede)",
-       .base.cra_driver_name   = "cbc-des3-omap",
-       .base.cra_priority      = 300,
-       .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                 CRYPTO_ALG_ASYNC,
-       .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
-       .base.cra_ctxsize       = sizeof(struct omap_des_ctx),
-       .base.cra_module        = THIS_MODULE,
-
-       .min_keysize            = DES3_EDE_KEY_SIZE,
-       .max_keysize            = DES3_EDE_KEY_SIZE,
-       .ivsize                 = DES3_EDE_BLOCK_SIZE,
-       .setkey                 = omap_des3_setkey,
-       .encrypt                = omap_des_cbc_encrypt,
-       .decrypt                = omap_des_cbc_decrypt,
-       .init                   = omap_des_init_tfm,
+       .base = {
+               .base.cra_name          = "cbc(des3_ede)",
+               .base.cra_driver_name   = "cbc-des3-omap",
+               .base.cra_priority      = 300,
+               .base.cra_flags         = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                         CRYPTO_ALG_ASYNC,
+               .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
+               .base.cra_ctxsize       = sizeof(struct omap_des_ctx),
+               .base.cra_module        = THIS_MODULE,
+
+               .min_keysize            = DES3_EDE_KEY_SIZE,
+               .max_keysize            = DES3_EDE_KEY_SIZE,
+               .ivsize                 = DES3_EDE_BLOCK_SIZE,
+               .setkey                 = omap_des3_setkey,
+               .encrypt                = omap_des_cbc_encrypt,
+               .decrypt                = omap_des_cbc_decrypt,
+               .init                   = omap_des_init_tfm,
+       },
+       .op.do_one_request = omap_des_crypt_req,
 }
 };
 
@@ -958,7 +950,7 @@ static int omap_des_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct omap_des_dev *dd;
-       struct skcipher_alg *algp;
+       struct skcipher_engine_alg *algp;
        struct resource *res;
        int err = -ENOMEM, i, j, irq = -1;
        u32 reg;
@@ -971,18 +963,12 @@ static int omap_des_probe(struct platform_device *pdev)
        dd->dev = dev;
        platform_set_drvdata(pdev, dd);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "no MEM resource info\n");
-               goto err_res;
-       }
-
        err = (dev->of_node) ? omap_des_get_of(dd, pdev) :
                               omap_des_get_pdev(dd, pdev);
        if (err)
                goto err_res;
 
-       dd->io_base = devm_ioremap_resource(dev, res);
+       dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
        if (IS_ERR(dd->io_base)) {
                err = PTR_ERR(dd->io_base);
                goto err_res;
@@ -1052,9 +1038,9 @@ static int omap_des_probe(struct platform_device *pdev)
                for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
                        algp = &dd->pdata->algs_info[i].algs_list[j];
 
-                       pr_debug("reg alg: %s\n", algp->base.cra_name);
+                       pr_debug("reg alg: %s\n", algp->base.base.cra_name);
 
-                       err = crypto_register_skcipher(algp);
+                       err = crypto_engine_register_skcipher(algp);
                        if (err)
                                goto err_algs;
 
@@ -1067,7 +1053,7 @@ static int omap_des_probe(struct platform_device *pdev)
 err_algs:
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
-                       crypto_unregister_skcipher(
+                       crypto_engine_unregister_skcipher(
                                        &dd->pdata->algs_info[i].algs_list[j]);
 
 err_engine:
@@ -1097,7 +1083,7 @@ static int omap_des_remove(struct platform_device *pdev)
 
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
-                       crypto_unregister_skcipher(
+                       crypto_engine_unregister_skcipher(
                                        &dd->pdata->algs_info[i].algs_list[j]);
 
        tasklet_kill(&dd->done_task);
index cbeda59..a6b4a0b 100644 (file)
 
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
+#include <crypto/engine.h>
+#include <crypto/hmac.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
 #include <linux/err.h>
 #include <linux/device.h>
-#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
 #include <linux/init.h>
-#include <linux/errno.h>
 #include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/irq.h>
 #include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-#include <linux/pm_runtime.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
-#include <linux/delay.h>
-#include <linux/crypto.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/algapi.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/hash.h>
-#include <crypto/hmac.h>
-#include <crypto/internal/hash.h>
-#include <crypto/engine.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
 
 #define MD5_DIGEST_SIZE                        16
 
@@ -168,7 +164,6 @@ struct omap_sham_hmac_ctx {
 };
 
 struct omap_sham_ctx {
-       struct crypto_engine_ctx        enginectx;
        unsigned long           flags;
 
        /* fallback stuff */
@@ -180,7 +175,7 @@ struct omap_sham_ctx {
 #define OMAP_SHAM_QUEUE_LENGTH 10
 
 struct omap_sham_algs_info {
-       struct ahash_alg        *algs_list;
+       struct ahash_engine_alg *algs_list;
        unsigned int            size;
        unsigned int            registered;
 };
@@ -1074,6 +1069,10 @@ static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
        dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
                ctx->op, ctx->total, ctx->digcnt, final);
 
+       err = omap_sham_prepare_request(engine, areq);
+       if (err)
+               return err;
+
        err = pm_runtime_resume_and_get(dd->dev);
        if (err < 0) {
                dev_err(dd->dev, "failed to get sync: %d\n", err);
@@ -1349,10 +1348,6 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
 
        }
 
-       tctx->enginectx.op.do_one_request = omap_sham_hash_one_req;
-       tctx->enginectx.op.prepare_request = omap_sham_prepare_request;
-       tctx->enginectx.op.unprepare_request = NULL;
-
        return 0;
 }
 
@@ -1423,15 +1418,15 @@ static int omap_sham_import(struct ahash_request *req, const void *in)
        return 0;
 }
 
-static struct ahash_alg algs_sha1_md5[] = {
+static struct ahash_engine_alg algs_sha1_md5[] = {
 {
-       .init           = omap_sham_init,
-       .update         = omap_sham_update,
-       .final          = omap_sham_final,
-       .finup          = omap_sham_finup,
-       .digest         = omap_sham_digest,
-       .halg.digestsize        = SHA1_DIGEST_SIZE,
-       .halg.base      = {
+       .base.init              = omap_sham_init,
+       .base.update            = omap_sham_update,
+       .base.final             = omap_sham_final,
+       .base.finup             = omap_sham_finup,
+       .base.digest            = omap_sham_digest,
+       .base.halg.digestsize   = SHA1_DIGEST_SIZE,
+       .base.halg.base = {
                .cra_name               = "sha1",
                .cra_driver_name        = "omap-sha1",
                .cra_priority           = 400,
@@ -1444,16 +1439,17 @@ static struct ahash_alg algs_sha1_md5[] = {
                .cra_module             = THIS_MODULE,
                .cra_init               = omap_sham_cra_init,
                .cra_exit               = omap_sham_cra_exit,
-       }
+       },
+       .op.do_one_request = omap_sham_hash_one_req,
 },
 {
-       .init           = omap_sham_init,
-       .update         = omap_sham_update,
-       .final          = omap_sham_final,
-       .finup          = omap_sham_finup,
-       .digest         = omap_sham_digest,
-       .halg.digestsize        = MD5_DIGEST_SIZE,
-       .halg.base      = {
+       .base.init              = omap_sham_init,
+       .base.update            = omap_sham_update,
+       .base.final             = omap_sham_final,
+       .base.finup             = omap_sham_finup,
+       .base.digest            = omap_sham_digest,
+       .base.halg.digestsize   = MD5_DIGEST_SIZE,
+       .base.halg.base = {
                .cra_name               = "md5",
                .cra_driver_name        = "omap-md5",
                .cra_priority           = 400,
@@ -1466,17 +1462,18 @@ static struct ahash_alg algs_sha1_md5[] = {
                .cra_module             = THIS_MODULE,
                .cra_init               = omap_sham_cra_init,
                .cra_exit               = omap_sham_cra_exit,
-       }
+       },
+       .op.do_one_request = omap_sham_hash_one_req,
 },
 {
-       .init           = omap_sham_init,
-       .update         = omap_sham_update,
-       .final          = omap_sham_final,
-       .finup          = omap_sham_finup,
-       .digest         = omap_sham_digest,
-       .setkey         = omap_sham_setkey,
-       .halg.digestsize        = SHA1_DIGEST_SIZE,
-       .halg.base      = {
+       .base.init              = omap_sham_init,
+       .base.update            = omap_sham_update,
+       .base.final             = omap_sham_final,
+       .base.finup             = omap_sham_finup,
+       .base.digest            = omap_sham_digest,
+       .base.setkey            = omap_sham_setkey,
+       .base.halg.digestsize   = SHA1_DIGEST_SIZE,
+       .base.halg.base = {
                .cra_name               = "hmac(sha1)",
                .cra_driver_name        = "omap-hmac-sha1",
                .cra_priority           = 400,
@@ -1490,17 +1487,18 @@ static struct ahash_alg algs_sha1_md5[] = {
                .cra_module             = THIS_MODULE,
                .cra_init               = omap_sham_cra_sha1_init,
                .cra_exit               = omap_sham_cra_exit,
-       }
+       },
+       .op.do_one_request = omap_sham_hash_one_req,
 },
 {
-       .init           = omap_sham_init,
-       .update         = omap_sham_update,
-       .final          = omap_sham_final,
-       .finup          = omap_sham_finup,
-       .digest         = omap_sham_digest,
-       .setkey         = omap_sham_setkey,
-       .halg.digestsize        = MD5_DIGEST_SIZE,
-       .halg.base      = {
+       .base.init              = omap_sham_init,
+       .base.update            = omap_sham_update,
+       .base.final             = omap_sham_final,
+       .base.finup             = omap_sham_finup,
+       .base.digest            = omap_sham_digest,
+       .base.setkey            = omap_sham_setkey,
+       .base.halg.digestsize   = MD5_DIGEST_SIZE,
+       .base.halg.base = {
                .cra_name               = "hmac(md5)",
                .cra_driver_name        = "omap-hmac-md5",
                .cra_priority           = 400,
@@ -1514,20 +1512,21 @@ static struct ahash_alg algs_sha1_md5[] = {
                .cra_module             = THIS_MODULE,
                .cra_init               = omap_sham_cra_md5_init,
                .cra_exit               = omap_sham_cra_exit,
-       }
+       },
+       .op.do_one_request = omap_sham_hash_one_req,
 }
 };
 
 /* OMAP4 has some algs in addition to what OMAP2 has */
-static struct ahash_alg algs_sha224_sha256[] = {
-{
-       .init           = omap_sham_init,
-       .update         = omap_sham_update,
-       .final          = omap_sham_final,
-       .finup          = omap_sham_finup,
-       .digest         = omap_sham_digest,
-       .halg.digestsize        = SHA224_DIGEST_SIZE,
-       .halg.base      = {
+static struct ahash_engine_alg algs_sha224_sha256[] = {
+{
+       .base.init              = omap_sham_init,
+       .base.update            = omap_sham_update,
+       .base.final             = omap_sham_final,
+       .base.finup             = omap_sham_finup,
+       .base.digest            = omap_sham_digest,
+       .base.halg.digestsize   = SHA224_DIGEST_SIZE,
+       .base.halg.base = {
                .cra_name               = "sha224",
                .cra_driver_name        = "omap-sha224",
                .cra_priority           = 400,
@@ -1540,16 +1539,17 @@ static struct ahash_alg algs_sha224_sha256[] = {
                .cra_module             = THIS_MODULE,
                .cra_init               = omap_sham_cra_init,
                .cra_exit               = omap_sham_cra_exit,
-       }
+       },
+       .op.do_one_request = omap_sham_hash_one_req,
 },
 {
-       .init           = omap_sham_init,
-       .update         = omap_sham_update,
-       .final          = omap_sham_final,
-       .finup          = omap_sham_finup,
-       .digest         = omap_sham_digest,
-       .halg.digestsize        = SHA256_DIGEST_SIZE,
-       .halg.base      = {
+       .base.init              = omap_sham_init,
+       .base.update            = omap_sham_update,
+       .base.final             = omap_sham_final,
+       .base.finup             = omap_sham_finup,
+       .base.digest            = omap_sham_digest,
+       .base.halg.digestsize   = SHA256_DIGEST_SIZE,
+       .base.halg.base = {
                .cra_name               = "sha256",
                .cra_driver_name        = "omap-sha256",
                .cra_priority           = 400,
@@ -1562,17 +1562,18 @@ static struct ahash_alg algs_sha224_sha256[] = {
                .cra_module             = THIS_MODULE,
                .cra_init               = omap_sham_cra_init,
                .cra_exit               = omap_sham_cra_exit,
-       }
+       },
+       .op.do_one_request = omap_sham_hash_one_req,
 },
 {
-       .init           = omap_sham_init,
-       .update         = omap_sham_update,
-       .final          = omap_sham_final,
-       .finup          = omap_sham_finup,
-       .digest         = omap_sham_digest,
-       .setkey         = omap_sham_setkey,
-       .halg.digestsize        = SHA224_DIGEST_SIZE,
-       .halg.base      = {
+       .base.init              = omap_sham_init,
+       .base.update            = omap_sham_update,
+       .base.final             = omap_sham_final,
+       .base.finup             = omap_sham_finup,
+       .base.digest            = omap_sham_digest,
+       .base.setkey            = omap_sham_setkey,
+       .base.halg.digestsize   = SHA224_DIGEST_SIZE,
+       .base.halg.base = {
                .cra_name               = "hmac(sha224)",
                .cra_driver_name        = "omap-hmac-sha224",
                .cra_priority           = 400,
@@ -1586,17 +1587,18 @@ static struct ahash_alg algs_sha224_sha256[] = {
                .cra_module             = THIS_MODULE,
                .cra_init               = omap_sham_cra_sha224_init,
                .cra_exit               = omap_sham_cra_exit,
-       }
+       },
+       .op.do_one_request = omap_sham_hash_one_req,
 },
 {
-       .init           = omap_sham_init,
-       .update         = omap_sham_update,
-       .final          = omap_sham_final,
-       .finup          = omap_sham_finup,
-       .digest         = omap_sham_digest,
-       .setkey         = omap_sham_setkey,
-       .halg.digestsize        = SHA256_DIGEST_SIZE,
-       .halg.base      = {
+       .base.init              = omap_sham_init,
+       .base.update            = omap_sham_update,
+       .base.final             = omap_sham_final,
+       .base.finup             = omap_sham_finup,
+       .base.digest            = omap_sham_digest,
+       .base.setkey            = omap_sham_setkey,
+       .base.halg.digestsize   = SHA256_DIGEST_SIZE,
+       .base.halg.base = {
                .cra_name               = "hmac(sha256)",
                .cra_driver_name        = "omap-hmac-sha256",
                .cra_priority           = 400,
@@ -1610,19 +1612,20 @@ static struct ahash_alg algs_sha224_sha256[] = {
                .cra_module             = THIS_MODULE,
                .cra_init               = omap_sham_cra_sha256_init,
                .cra_exit               = omap_sham_cra_exit,
-       }
+       },
+       .op.do_one_request = omap_sham_hash_one_req,
 },
 };
 
-static struct ahash_alg algs_sha384_sha512[] = {
+static struct ahash_engine_alg algs_sha384_sha512[] = {
 {
-       .init           = omap_sham_init,
-       .update         = omap_sham_update,
-       .final          = omap_sham_final,
-       .finup          = omap_sham_finup,
-       .digest         = omap_sham_digest,
-       .halg.digestsize        = SHA384_DIGEST_SIZE,
-       .halg.base      = {
+       .base.init              = omap_sham_init,
+       .base.update            = omap_sham_update,
+       .base.final             = omap_sham_final,
+       .base.finup             = omap_sham_finup,
+       .base.digest            = omap_sham_digest,
+       .base.halg.digestsize   = SHA384_DIGEST_SIZE,
+       .base.halg.base = {
                .cra_name               = "sha384",
                .cra_driver_name        = "omap-sha384",
                .cra_priority           = 400,
@@ -1635,16 +1638,17 @@ static struct ahash_alg algs_sha384_sha512[] = {
                .cra_module             = THIS_MODULE,
                .cra_init               = omap_sham_cra_init,
                .cra_exit               = omap_sham_cra_exit,
-       }
+       },
+       .op.do_one_request = omap_sham_hash_one_req,
 },
 {
-       .init           = omap_sham_init,
-       .update         = omap_sham_update,
-       .final          = omap_sham_final,
-       .finup          = omap_sham_finup,
-       .digest         = omap_sham_digest,
-       .halg.digestsize        = SHA512_DIGEST_SIZE,
-       .halg.base      = {
+       .base.init              = omap_sham_init,
+       .base.update            = omap_sham_update,
+       .base.final             = omap_sham_final,
+       .base.finup             = omap_sham_finup,
+       .base.digest            = omap_sham_digest,
+       .base.halg.digestsize   = SHA512_DIGEST_SIZE,
+       .base.halg.base = {
                .cra_name               = "sha512",
                .cra_driver_name        = "omap-sha512",
                .cra_priority           = 400,
@@ -1657,17 +1661,18 @@ static struct ahash_alg algs_sha384_sha512[] = {
                .cra_module             = THIS_MODULE,
                .cra_init               = omap_sham_cra_init,
                .cra_exit               = omap_sham_cra_exit,
-       }
+       },
+       .op.do_one_request = omap_sham_hash_one_req,
 },
 {
-       .init           = omap_sham_init,
-       .update         = omap_sham_update,
-       .final          = omap_sham_final,
-       .finup          = omap_sham_finup,
-       .digest         = omap_sham_digest,
-       .setkey         = omap_sham_setkey,
-       .halg.digestsize        = SHA384_DIGEST_SIZE,
-       .halg.base      = {
+       .base.init              = omap_sham_init,
+       .base.update            = omap_sham_update,
+       .base.final             = omap_sham_final,
+       .base.finup             = omap_sham_finup,
+       .base.digest            = omap_sham_digest,
+       .base.setkey            = omap_sham_setkey,
+       .base.halg.digestsize   = SHA384_DIGEST_SIZE,
+       .base.halg.base = {
                .cra_name               = "hmac(sha384)",
                .cra_driver_name        = "omap-hmac-sha384",
                .cra_priority           = 400,
@@ -1681,17 +1686,18 @@ static struct ahash_alg algs_sha384_sha512[] = {
                .cra_module             = THIS_MODULE,
                .cra_init               = omap_sham_cra_sha384_init,
                .cra_exit               = omap_sham_cra_exit,
-       }
+       },
+       .op.do_one_request = omap_sham_hash_one_req,
 },
 {
-       .init           = omap_sham_init,
-       .update         = omap_sham_update,
-       .final          = omap_sham_final,
-       .finup          = omap_sham_finup,
-       .digest         = omap_sham_digest,
-       .setkey         = omap_sham_setkey,
-       .halg.digestsize        = SHA512_DIGEST_SIZE,
-       .halg.base      = {
+       .base.init              = omap_sham_init,
+       .base.update            = omap_sham_update,
+       .base.final             = omap_sham_final,
+       .base.finup             = omap_sham_finup,
+       .base.digest            = omap_sham_digest,
+       .base.setkey            = omap_sham_setkey,
+       .base.halg.digestsize   = SHA512_DIGEST_SIZE,
+       .base.halg.base = {
                .cra_name               = "hmac(sha512)",
                .cra_driver_name        = "omap-hmac-sha512",
                .cra_priority           = 400,
@@ -1705,7 +1711,8 @@ static struct ahash_alg algs_sha384_sha512[] = {
                .cra_module             = THIS_MODULE,
                .cra_init               = omap_sham_cra_sha512_init,
                .cra_exit               = omap_sham_cra_exit,
-       }
+       },
+       .op.do_one_request = omap_sham_hash_one_req,
 },
 };
 
@@ -2146,14 +2153,16 @@ static int omap_sham_probe(struct platform_device *pdev)
                        break;
 
                for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
+                       struct ahash_engine_alg *ealg;
                        struct ahash_alg *alg;
 
-                       alg = &dd->pdata->algs_info[i].algs_list[j];
+                       ealg = &dd->pdata->algs_info[i].algs_list[j];
+                       alg = &ealg->base;
                        alg->export = omap_sham_export;
                        alg->import = omap_sham_import;
                        alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
                                              BUFLEN;
-                       err = crypto_register_ahash(alg);
+                       err = crypto_engine_register_ahash(ealg);
                        if (err)
                                goto err_algs;
 
@@ -2172,7 +2181,7 @@ static int omap_sham_probe(struct platform_device *pdev)
 err_algs:
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
-                       crypto_unregister_ahash(
+                       crypto_engine_unregister_ahash(
                                        &dd->pdata->algs_info[i].algs_list[j]);
 err_engine_start:
        crypto_engine_exit(dd->engine);
@@ -2203,7 +2212,7 @@ static int omap_sham_remove(struct platform_device *pdev)
        spin_unlock_bh(&sham.lock);
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
-                       crypto_unregister_ahash(
+                       crypto_engine_unregister_ahash(
                                        &dd->pdata->algs_info[i].algs_list[j]);
                        dd->pdata->algs_info[i].registered--;
                }
index 72dd1a4..825a729 100644 (file)
@@ -173,13 +173,9 @@ static int qcom_rng_probe(struct platform_device *pdev)
        if (IS_ERR(rng->base))
                return PTR_ERR(rng->base);
 
-       /* ACPI systems have clk already on, so skip clk_get */
-       if (!has_acpi_companion(&pdev->dev)) {
-               rng->clk = devm_clk_get(&pdev->dev, "core");
-               if (IS_ERR(rng->clk))
-                       return PTR_ERR(rng->clk);
-       }
-
+       rng->clk = devm_clk_get_optional(&pdev->dev, "core");
+       if (IS_ERR(rng->clk))
+               return PTR_ERR(rng->clk);
 
        rng->skip_init = (unsigned long)device_get_match_data(&pdev->dev);
 
index 9f6ba77..77d5705 100644 (file)
  */
 
 #include "rk3288_crypto.h"
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/clk.h>
 #include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/clk.h>
-#include <linux/crypto.h>
 #include <linux/reset.h>
+#include <linux/spinlock.h>
 
 static struct rockchip_ip rocklist = {
        .dev_list = LIST_HEAD_INIT(rocklist.dev_list),
@@ -184,7 +191,6 @@ static struct rk_crypto_tmp *rk_cipher_algs[] = {
        &rk_ahash_md5,
 };
 
-#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
 static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
 {
        struct rk_crypto_info *dd;
@@ -204,8 +210,8 @@ static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
                switch (rk_cipher_algs[i]->type) {
                case CRYPTO_ALG_TYPE_SKCIPHER:
                        seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
-                                  rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name,
-                                  rk_cipher_algs[i]->alg.skcipher.base.cra_name,
+                                  rk_cipher_algs[i]->alg.skcipher.base.base.cra_driver_name,
+                                  rk_cipher_algs[i]->alg.skcipher.base.base.cra_name,
                                   rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
                        seq_printf(seq, "\tfallback due to length: %lu\n",
                                   rk_cipher_algs[i]->stat_fb_len);
@@ -216,8 +222,8 @@ static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
                        break;
                case CRYPTO_ALG_TYPE_AHASH:
                        seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
-                                  rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name,
-                                  rk_cipher_algs[i]->alg.hash.halg.base.cra_name,
+                                  rk_cipher_algs[i]->alg.hash.base.halg.base.cra_driver_name,
+                                  rk_cipher_algs[i]->alg.hash.base.halg.base.cra_name,
                                   rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb);
                        break;
                }
@@ -226,17 +232,20 @@ static int rk_crypto_debugfs_show(struct seq_file *seq, void *v)
 }
 
 DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs);
-#endif
 
 static void register_debugfs(struct rk_crypto_info *crypto_info)
 {
-#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+       struct dentry *dbgfs_dir __maybe_unused;
+       struct dentry *dbgfs_stats __maybe_unused;
+
        /* Ignore error of debugfs */
-       rocklist.dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
-       rocklist.dbgfs_stats = debugfs_create_file("stats", 0444,
-                                                  rocklist.dbgfs_dir,
-                                                  &rocklist,
-                                                  &rk_crypto_debugfs_fops);
+       dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL);
+       dbgfs_stats = debugfs_create_file("stats", 0444, dbgfs_dir, &rocklist,
+                                         &rk_crypto_debugfs_fops);
+
+#ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG
+       rocklist.dbgfs_dir = dbgfs_dir;
+       rocklist.dbgfs_stats = dbgfs_stats;
 #endif
 }
 
@@ -250,15 +259,15 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
                switch (rk_cipher_algs[i]->type) {
                case CRYPTO_ALG_TYPE_SKCIPHER:
                        dev_info(crypto_info->dev, "Register %s as %s\n",
-                                rk_cipher_algs[i]->alg.skcipher.base.cra_name,
-                                rk_cipher_algs[i]->alg.skcipher.base.cra_driver_name);
-                       err = crypto_register_skcipher(&rk_cipher_algs[i]->alg.skcipher);
+                                rk_cipher_algs[i]->alg.skcipher.base.base.cra_name,
+                                rk_cipher_algs[i]->alg.skcipher.base.base.cra_driver_name);
+                       err = crypto_engine_register_skcipher(&rk_cipher_algs[i]->alg.skcipher);
                        break;
                case CRYPTO_ALG_TYPE_AHASH:
                        dev_info(crypto_info->dev, "Register %s as %s\n",
-                                rk_cipher_algs[i]->alg.hash.halg.base.cra_name,
-                                rk_cipher_algs[i]->alg.hash.halg.base.cra_driver_name);
-                       err = crypto_register_ahash(&rk_cipher_algs[i]->alg.hash);
+                                rk_cipher_algs[i]->alg.hash.base.halg.base.cra_name,
+                                rk_cipher_algs[i]->alg.hash.base.halg.base.cra_driver_name);
+                       err = crypto_engine_register_ahash(&rk_cipher_algs[i]->alg.hash);
                        break;
                default:
                        dev_err(crypto_info->dev, "unknown algorithm\n");
@@ -271,9 +280,9 @@ static int rk_crypto_register(struct rk_crypto_info *crypto_info)
 err_cipher_algs:
        for (k = 0; k < i; k++) {
                if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER)
-                       crypto_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
+                       crypto_engine_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher);
                else
-                       crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+                       crypto_engine_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
        }
        return err;
 }
@@ -284,9 +293,9 @@ static void rk_crypto_unregister(void)
 
        for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
                if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER)
-                       crypto_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
+                       crypto_engine_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher);
                else
-                       crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
+                       crypto_engine_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
        }
 }
 
index b269525..3aa03cb 100644 (file)
@@ -3,21 +3,18 @@
 #define __RK3288_CRYPTO_H__
 
 #include <crypto/aes.h>
-#include <crypto/internal/des.h>
-#include <crypto/algapi.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/pm_runtime.h>
-#include <linux/scatterlist.h>
 #include <crypto/engine.h>
+#include <crypto/internal/des.h>
 #include <crypto/internal/hash.h>
 #include <crypto/internal/skcipher.h>
-
 #include <crypto/md5.h>
 #include <crypto/sha1.h>
 #include <crypto/sha2.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
 
 #define _SBF(v, f)                     ((v) << (f))
 
@@ -231,7 +228,6 @@ struct rk_crypto_info {
 
 /* the private variable of hash */
 struct rk_ahash_ctx {
-       struct crypto_engine_ctx enginectx;
        /* for fallback */
        struct crypto_ahash             *fallback_tfm;
 };
@@ -246,7 +242,6 @@ struct rk_ahash_rctx {
 
 /* the private variable of cipher */
 struct rk_cipher_ctx {
-       struct crypto_engine_ctx enginectx;
        unsigned int                    keylen;
        u8                              key[AES_MAX_KEY_SIZE];
        u8                              iv[AES_BLOCK_SIZE];
@@ -264,8 +259,8 @@ struct rk_crypto_tmp {
        u32 type;
        struct rk_crypto_info           *dev;
        union {
-               struct skcipher_alg     skcipher;
-               struct ahash_alg        hash;
+               struct skcipher_engine_alg skcipher;
+               struct ahash_engine_alg hash;
        } alg;
        unsigned long stat_req;
        unsigned long stat_fb;
index a78ff3d..8c14318 100644 (file)
@@ -8,9 +8,15 @@
  *
  * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
  */
-#include <linux/device.h>
+
 #include <asm/unaligned.h>
+#include <crypto/internal/hash.h>
+#include <linux/device.h>
+#include <linux/err.h>
 #include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
 #include "rk3288_crypto.h"
 
 /*
@@ -40,8 +46,8 @@ static int rk_ahash_digest_fb(struct ahash_request *areq)
        struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
        struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
-       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+       struct ahash_alg *alg = crypto_ahash_alg(tfm);
+       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base);
 
        algt->stat_fb++;
 
@@ -240,14 +246,13 @@ static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
        return 0;
 }
 
-static int rk_hash_unprepare(struct crypto_engine *engine, void *breq)
+static void rk_hash_unprepare(struct crypto_engine *engine, void *breq)
 {
        struct ahash_request *areq = container_of(breq, struct ahash_request, base);
        struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
        struct rk_crypto_info *rkc = rctx->dev;
 
        dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
-       return 0;
 }
 
 static int rk_hash_run(struct crypto_engine *engine, void *breq)
@@ -255,11 +260,11 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
        struct ahash_request *areq = container_of(breq, struct ahash_request, base);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
        struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
-       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+       struct ahash_alg *alg = crypto_ahash_alg(tfm);
+       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base);
        struct scatterlist *sg = areq->src;
        struct rk_crypto_info *rkc = rctx->dev;
-       int err = 0;
+       int err;
        int i;
        u32 v;
 
@@ -267,6 +272,10 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
        if (err)
                return err;
 
+       err = rk_hash_prepare(engine, breq);
+       if (err)
+               goto theend;
+
        rctx->mode = 0;
 
        algt->stat_req++;
@@ -327,15 +336,17 @@ theend:
        crypto_finalize_hash_request(engine, breq, err);
        local_bh_enable();
 
+       rk_hash_unprepare(engine, breq);
+
        return 0;
 }
 
-static int rk_cra_hash_init(struct crypto_tfm *tfm)
+static int rk_hash_init_tfm(struct crypto_ahash *tfm)
 {
-       struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
-       const char *alg_name = crypto_tfm_alg_name(tfm);
-       struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
-       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
+       struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
+       const char *alg_name = crypto_ahash_alg_name(tfm);
+       struct ahash_alg *alg = crypto_ahash_alg(tfm);
+       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base);
 
        /* for fallback */
        tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
@@ -345,27 +356,23 @@ static int rk_cra_hash_init(struct crypto_tfm *tfm)
                return PTR_ERR(tctx->fallback_tfm);
        }
 
-       crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+       crypto_ahash_set_reqsize(tfm,
                                 sizeof(struct rk_ahash_rctx) +
                                 crypto_ahash_reqsize(tctx->fallback_tfm));
 
-       tctx->enginectx.op.do_one_request = rk_hash_run;
-       tctx->enginectx.op.prepare_request = rk_hash_prepare;
-       tctx->enginectx.op.unprepare_request = rk_hash_unprepare;
-
        return 0;
 }
 
-static void rk_cra_hash_exit(struct crypto_tfm *tfm)
+static void rk_hash_exit_tfm(struct crypto_ahash *tfm)
 {
-       struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
+       struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
 
        crypto_free_ahash(tctx->fallback_tfm);
 }
 
 struct rk_crypto_tmp rk_ahash_sha1 = {
        .type = CRYPTO_ALG_TYPE_AHASH,
-       .alg.hash = {
+       .alg.hash.base = {
                .init = rk_ahash_init,
                .update = rk_ahash_update,
                .final = rk_ahash_final,
@@ -373,6 +380,8 @@ struct rk_crypto_tmp rk_ahash_sha1 = {
                .export = rk_ahash_export,
                .import = rk_ahash_import,
                .digest = rk_ahash_digest,
+               .init_tfm = rk_hash_init_tfm,
+               .exit_tfm = rk_hash_exit_tfm,
                .halg = {
                         .digestsize = SHA1_DIGEST_SIZE,
                         .statesize = sizeof(struct sha1_state),
@@ -385,17 +394,18 @@ struct rk_crypto_tmp rk_ahash_sha1 = {
                                  .cra_blocksize = SHA1_BLOCK_SIZE,
                                  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
                                  .cra_alignmask = 3,
-                                 .cra_init = rk_cra_hash_init,
-                                 .cra_exit = rk_cra_hash_exit,
                                  .cra_module = THIS_MODULE,
                        }
                }
-       }
+       },
+       .alg.hash.op = {
+               .do_one_request = rk_hash_run,
+       },
 };
 
 struct rk_crypto_tmp rk_ahash_sha256 = {
        .type = CRYPTO_ALG_TYPE_AHASH,
-       .alg.hash = {
+       .alg.hash.base = {
                .init = rk_ahash_init,
                .update = rk_ahash_update,
                .final = rk_ahash_final,
@@ -403,6 +413,8 @@ struct rk_crypto_tmp rk_ahash_sha256 = {
                .export = rk_ahash_export,
                .import = rk_ahash_import,
                .digest = rk_ahash_digest,
+               .init_tfm = rk_hash_init_tfm,
+               .exit_tfm = rk_hash_exit_tfm,
                .halg = {
                         .digestsize = SHA256_DIGEST_SIZE,
                         .statesize = sizeof(struct sha256_state),
@@ -415,17 +427,18 @@ struct rk_crypto_tmp rk_ahash_sha256 = {
                                  .cra_blocksize = SHA256_BLOCK_SIZE,
                                  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
                                  .cra_alignmask = 3,
-                                 .cra_init = rk_cra_hash_init,
-                                 .cra_exit = rk_cra_hash_exit,
                                  .cra_module = THIS_MODULE,
                        }
                }
-       }
+       },
+       .alg.hash.op = {
+               .do_one_request = rk_hash_run,
+       },
 };
 
 struct rk_crypto_tmp rk_ahash_md5 = {
        .type = CRYPTO_ALG_TYPE_AHASH,
-       .alg.hash = {
+       .alg.hash.base = {
                .init = rk_ahash_init,
                .update = rk_ahash_update,
                .final = rk_ahash_final,
@@ -433,6 +446,8 @@ struct rk_crypto_tmp rk_ahash_md5 = {
                .export = rk_ahash_export,
                .import = rk_ahash_import,
                .digest = rk_ahash_digest,
+               .init_tfm = rk_hash_init_tfm,
+               .exit_tfm = rk_hash_exit_tfm,
                .halg = {
                         .digestsize = MD5_DIGEST_SIZE,
                         .statesize = sizeof(struct md5_state),
@@ -445,10 +460,11 @@ struct rk_crypto_tmp rk_ahash_md5 = {
                                  .cra_blocksize = SHA1_BLOCK_SIZE,
                                  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
                                  .cra_alignmask = 3,
-                                 .cra_init = rk_cra_hash_init,
-                                 .cra_exit = rk_cra_hash_exit,
                                  .cra_module = THIS_MODULE,
                        }
                }
-       }
+       },
+       .alg.hash.op = {
+               .do_one_request = rk_hash_run,
+       },
 };
index 5906945..da95747 100644 (file)
@@ -8,8 +8,14 @@
  *
  * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
  */
-#include <linux/device.h>
+
+#include <crypto/engine.h>
+#include <crypto/internal/skcipher.h>
 #include <crypto/scatterwalk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
 #include "rk3288_crypto.h"
 
 #define RK_CRYPTO_DEC                  BIT(0)
@@ -18,7 +24,7 @@ static int rk_cipher_need_fallback(struct skcipher_request *req)
 {
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
        struct scatterlist *sgs, *sgd;
        unsigned int stodo, dtodo, len;
        unsigned int bs = crypto_skcipher_blocksize(tfm);
@@ -65,7 +71,7 @@ static int rk_cipher_fallback(struct skcipher_request *areq)
        struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
        struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
        int err;
 
        algt->stat_fb++;
@@ -305,7 +311,7 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
        unsigned int len = areq->cryptlen;
        unsigned int todo;
        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
        struct rk_crypto_info *rkc = rctx->dev;
 
        err = pm_runtime_resume_and_get(rkc->dev);
@@ -430,7 +436,7 @@ static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
        struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
        const char *name = crypto_tfm_alg_name(&tfm->base);
        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
-       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
+       struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
 
        ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
        if (IS_ERR(ctx->fallback_tfm)) {
@@ -442,8 +448,6 @@ static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
        tfm->reqsize = sizeof(struct rk_cipher_rctx) +
                crypto_skcipher_reqsize(ctx->fallback_tfm);
 
-       ctx->enginectx.op.do_one_request = rk_cipher_run;
-
        return 0;
 }
 
@@ -457,7 +461,7 @@ static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm)
 
 struct rk_crypto_tmp rk_ecb_aes_alg = {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base.cra_name          = "ecb(aes)",
                .base.cra_driver_name   = "ecb-aes-rk",
                .base.cra_priority      = 300,
@@ -474,12 +478,15 @@ struct rk_crypto_tmp rk_ecb_aes_alg = {
                .setkey                 = rk_aes_setkey,
                .encrypt                = rk_aes_ecb_encrypt,
                .decrypt                = rk_aes_ecb_decrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = rk_cipher_run,
+       },
 };
 
 struct rk_crypto_tmp rk_cbc_aes_alg = {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base.cra_name          = "cbc(aes)",
                .base.cra_driver_name   = "cbc-aes-rk",
                .base.cra_priority      = 300,
@@ -497,12 +504,15 @@ struct rk_crypto_tmp rk_cbc_aes_alg = {
                .setkey                 = rk_aes_setkey,
                .encrypt                = rk_aes_cbc_encrypt,
                .decrypt                = rk_aes_cbc_decrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = rk_cipher_run,
+       },
 };
 
 struct rk_crypto_tmp rk_ecb_des_alg = {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base.cra_name          = "ecb(des)",
                .base.cra_driver_name   = "ecb-des-rk",
                .base.cra_priority      = 300,
@@ -519,12 +529,15 @@ struct rk_crypto_tmp rk_ecb_des_alg = {
                .setkey                 = rk_des_setkey,
                .encrypt                = rk_des_ecb_encrypt,
                .decrypt                = rk_des_ecb_decrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = rk_cipher_run,
+       },
 };
 
 struct rk_crypto_tmp rk_cbc_des_alg = {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base.cra_name          = "cbc(des)",
                .base.cra_driver_name   = "cbc-des-rk",
                .base.cra_priority      = 300,
@@ -542,12 +555,15 @@ struct rk_crypto_tmp rk_cbc_des_alg = {
                .setkey                 = rk_des_setkey,
                .encrypt                = rk_des_cbc_encrypt,
                .decrypt                = rk_des_cbc_decrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = rk_cipher_run,
+       },
 };
 
 struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base.cra_name          = "ecb(des3_ede)",
                .base.cra_driver_name   = "ecb-des3-ede-rk",
                .base.cra_priority      = 300,
@@ -564,12 +580,15 @@ struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
                .setkey                 = rk_tdes_setkey,
                .encrypt                = rk_des3_ede_ecb_encrypt,
                .decrypt                = rk_des3_ede_ecb_decrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = rk_cipher_run,
+       },
 };
 
 struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
        .type = CRYPTO_ALG_TYPE_SKCIPHER,
-       .alg.skcipher = {
+       .alg.skcipher.base = {
                .base.cra_name          = "cbc(des3_ede)",
                .base.cra_driver_name   = "cbc-des3-ede-rk",
                .base.cra_priority      = 300,
@@ -587,5 +606,8 @@ struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
                .setkey                 = rk_tdes_setkey,
                .encrypt                = rk_des3_ede_cbc_encrypt,
                .decrypt                = rk_des3_ede_cbc_decrypt,
-       }
+       },
+       .alg.skcipher.op = {
+               .do_one_request = rk_cipher_run,
+       },
 };
index 1c4d5fb..fe8cf9b 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/scatterlist.h>
 
index df5f9d6..6238d34 100644 (file)
@@ -15,7 +15,8 @@
 #include <linux/dmapool.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 
index 4c799df..62d9352 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/kthread.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/spinlock.h>
 
index df745fc..2cb1925 100644 (file)
@@ -12,6 +12,8 @@ config CRYPTO_DEV_JH7110
        select CRYPTO_SHA512
        select CRYPTO_SM3_GENERIC
        select CRYPTO_RSA
+       select CRYPTO_AES
+       select CRYPTO_CCM
        help
          Support for StarFive JH7110 crypto hardware acceleration engine.
          This module provides acceleration for public key algo,
index 98b01d2..8c137af 100644 (file)
@@ -1,4 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 
 obj-$(CONFIG_CRYPTO_DEV_JH7110) += jh7110-crypto.o
-jh7110-crypto-objs := jh7110-cryp.o jh7110-hash.o jh7110-rsa.o
+jh7110-crypto-objs := jh7110-cryp.o jh7110-hash.o jh7110-rsa.o jh7110-aes.o
diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c
new file mode 100644 (file)
index 0000000..9378e66
--- /dev/null
@@ -0,0 +1,1024 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * StarFive AES acceleration driver
+ *
+ * Copyright (c) 2022 StarFive Technology
+ */
+
+#include <crypto/engine.h>
+#include <crypto/gcm.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
+#include "jh7110-cryp.h"
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#define STARFIVE_AES_REGS_OFFSET       0x100
+#define STARFIVE_AES_AESDIO0R          (STARFIVE_AES_REGS_OFFSET + 0x0)
+#define STARFIVE_AES_KEY0              (STARFIVE_AES_REGS_OFFSET + 0x4)
+#define STARFIVE_AES_KEY1              (STARFIVE_AES_REGS_OFFSET + 0x8)
+#define STARFIVE_AES_KEY2              (STARFIVE_AES_REGS_OFFSET + 0xC)
+#define STARFIVE_AES_KEY3              (STARFIVE_AES_REGS_OFFSET + 0x10)
+#define STARFIVE_AES_KEY4              (STARFIVE_AES_REGS_OFFSET + 0x14)
+#define STARFIVE_AES_KEY5              (STARFIVE_AES_REGS_OFFSET + 0x18)
+#define STARFIVE_AES_KEY6              (STARFIVE_AES_REGS_OFFSET + 0x1C)
+#define STARFIVE_AES_KEY7              (STARFIVE_AES_REGS_OFFSET + 0x20)
+#define STARFIVE_AES_CSR               (STARFIVE_AES_REGS_OFFSET + 0x24)
+#define STARFIVE_AES_IV0               (STARFIVE_AES_REGS_OFFSET + 0x28)
+#define STARFIVE_AES_IV1               (STARFIVE_AES_REGS_OFFSET + 0x2C)
+#define STARFIVE_AES_IV2               (STARFIVE_AES_REGS_OFFSET + 0x30)
+#define STARFIVE_AES_IV3               (STARFIVE_AES_REGS_OFFSET + 0x34)
+#define STARFIVE_AES_NONCE0            (STARFIVE_AES_REGS_OFFSET + 0x3C)
+#define STARFIVE_AES_NONCE1            (STARFIVE_AES_REGS_OFFSET + 0x40)
+#define STARFIVE_AES_NONCE2            (STARFIVE_AES_REGS_OFFSET + 0x44)
+#define STARFIVE_AES_NONCE3            (STARFIVE_AES_REGS_OFFSET + 0x48)
+#define STARFIVE_AES_ALEN0             (STARFIVE_AES_REGS_OFFSET + 0x4C)
+#define STARFIVE_AES_ALEN1             (STARFIVE_AES_REGS_OFFSET + 0x50)
+#define STARFIVE_AES_MLEN0             (STARFIVE_AES_REGS_OFFSET + 0x54)
+#define STARFIVE_AES_MLEN1             (STARFIVE_AES_REGS_OFFSET + 0x58)
+#define STARFIVE_AES_IVLEN             (STARFIVE_AES_REGS_OFFSET + 0x5C)
+
+#define FLG_MODE_MASK                  GENMASK(2, 0)
+#define FLG_ENCRYPT                    BIT(4)
+
+/* Misc */
+#define CCM_B0_ADATA                   0x40
+#define AES_BLOCK_32                   (AES_BLOCK_SIZE / sizeof(u32))
+
+static inline int starfive_aes_wait_busy(struct starfive_cryp_dev *cryp)
+{
+       u32 status;
+
+       return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status,
+                                         !(status & STARFIVE_AES_BUSY), 10, 100000);
+}
+
+static inline int starfive_aes_wait_keydone(struct starfive_cryp_dev *cryp)
+{
+       u32 status;
+
+       return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status,
+                                         (status & STARFIVE_AES_KEY_DONE), 10, 100000);
+}
+
+static inline int starfive_aes_wait_gcmdone(struct starfive_cryp_dev *cryp)
+{
+       u32 status;
+
+       return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status,
+                                         (status & STARFIVE_AES_GCM_DONE), 10, 100000);
+}
+
+static inline int is_gcm(struct starfive_cryp_dev *cryp)
+{
+       return (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_GCM;
+}
+
+static inline int is_encrypt(struct starfive_cryp_dev *cryp)
+{
+       return cryp->flags & FLG_ENCRYPT;
+}
+
+static void starfive_aes_aead_hw_start(struct starfive_cryp_ctx *ctx, u32 hw_mode)
+{
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+       unsigned int value;
+
+       switch (hw_mode) {
+       case STARFIVE_AES_MODE_GCM:
+               value = readl(ctx->cryp->base + STARFIVE_AES_CSR);
+               value |= STARFIVE_AES_GCM_START;
+               writel(value, cryp->base + STARFIVE_AES_CSR);
+               starfive_aes_wait_gcmdone(cryp);
+               break;
+       case STARFIVE_AES_MODE_CCM:
+               value = readl(ctx->cryp->base + STARFIVE_AES_CSR);
+               value |= STARFIVE_AES_CCM_START;
+               writel(value, cryp->base + STARFIVE_AES_CSR);
+               break;
+       }
+}
+
+static inline void starfive_aes_set_ivlen(struct starfive_cryp_ctx *ctx)
+{
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+
+       if (is_gcm(cryp))
+               writel(GCM_AES_IV_SIZE, cryp->base + STARFIVE_AES_IVLEN);
+       else
+               writel(AES_BLOCK_SIZE, cryp->base + STARFIVE_AES_IVLEN);
+}
+
+static inline void starfive_aes_set_alen(struct starfive_cryp_ctx *ctx)
+{
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+
+       writel(upper_32_bits(cryp->assoclen), cryp->base + STARFIVE_AES_ALEN0);
+       writel(lower_32_bits(cryp->assoclen), cryp->base + STARFIVE_AES_ALEN1);
+}
+
+static inline void starfive_aes_set_mlen(struct starfive_cryp_ctx *ctx)
+{
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+
+       writel(upper_32_bits(cryp->total_in), cryp->base + STARFIVE_AES_MLEN0);
+       writel(lower_32_bits(cryp->total_in), cryp->base + STARFIVE_AES_MLEN1);
+}
+
+static inline int starfive_aes_ccm_check_iv(const u8 *iv)
+{
+       /* 2 <= L <= 8, so 1 <= L' <= 7. */
+       if (iv[0] < 1 || iv[0] > 7)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int starfive_aes_write_iv(struct starfive_cryp_ctx *ctx, u32 *iv)
+{
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+
+       writel(iv[0], cryp->base + STARFIVE_AES_IV0);
+       writel(iv[1], cryp->base + STARFIVE_AES_IV1);
+       writel(iv[2], cryp->base + STARFIVE_AES_IV2);
+
+       if (is_gcm(cryp)) {
+               if (starfive_aes_wait_gcmdone(cryp))
+                       return -ETIMEDOUT;
+
+               return 0;
+       }
+
+       writel(iv[3], cryp->base + STARFIVE_AES_IV3);
+
+       return 0;
+}
+
+static inline void starfive_aes_get_iv(struct starfive_cryp_dev *cryp, u32 *iv)
+{
+       iv[0] = readl(cryp->base + STARFIVE_AES_IV0);
+       iv[1] = readl(cryp->base + STARFIVE_AES_IV1);
+       iv[2] = readl(cryp->base + STARFIVE_AES_IV2);
+       iv[3] = readl(cryp->base + STARFIVE_AES_IV3);
+}
+
+static inline void starfive_aes_write_nonce(struct starfive_cryp_ctx *ctx, u32 *nonce)
+{
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+
+       writel(nonce[0], cryp->base + STARFIVE_AES_NONCE0);
+       writel(nonce[1], cryp->base + STARFIVE_AES_NONCE1);
+       writel(nonce[2], cryp->base + STARFIVE_AES_NONCE2);
+       writel(nonce[3], cryp->base + STARFIVE_AES_NONCE3);
+}
+
+static int starfive_aes_write_key(struct starfive_cryp_ctx *ctx)
+{
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+       u32 *key = (u32 *)ctx->key;
+
+       if (ctx->keylen >= AES_KEYSIZE_128) {
+               writel(key[0], cryp->base + STARFIVE_AES_KEY0);
+               writel(key[1], cryp->base + STARFIVE_AES_KEY1);
+               writel(key[2], cryp->base + STARFIVE_AES_KEY2);
+               writel(key[3], cryp->base + STARFIVE_AES_KEY3);
+       }
+
+       if (ctx->keylen >= AES_KEYSIZE_192) {
+               writel(key[4], cryp->base + STARFIVE_AES_KEY4);
+               writel(key[5], cryp->base + STARFIVE_AES_KEY5);
+       }
+
+       if (ctx->keylen >= AES_KEYSIZE_256) {
+               writel(key[6], cryp->base + STARFIVE_AES_KEY6);
+               writel(key[7], cryp->base + STARFIVE_AES_KEY7);
+       }
+
+       if (starfive_aes_wait_keydone(cryp))
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static int starfive_aes_ccm_init(struct starfive_cryp_ctx *ctx)
+{
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+       u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE];
+       unsigned int textlen;
+
+       memcpy(iv, cryp->req.areq->iv, AES_BLOCK_SIZE);
+       memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1);
+
+       /* Build B0 */
+       memcpy(b0, iv, AES_BLOCK_SIZE);
+
+       b0[0] |= (8 * ((cryp->authsize - 2) / 2));
+
+       if (cryp->assoclen)
+               b0[0] |= CCM_B0_ADATA;
+
+       textlen = cryp->total_in;
+
+       b0[AES_BLOCK_SIZE - 2] = textlen >> 8;
+       b0[AES_BLOCK_SIZE - 1] = textlen & 0xFF;
+
+       starfive_aes_write_nonce(ctx, (u32 *)b0);
+
+       return 0;
+}
+
+static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
+{
+       struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+       u32 hw_mode;
+
+       /* reset */
+       rctx->csr.aes.v = 0;
+       rctx->csr.aes.aesrst = 1;
+       writel(rctx->csr.aes.v, cryp->base + STARFIVE_AES_CSR);
+
+       /* csr setup */
+       hw_mode = cryp->flags & FLG_MODE_MASK;
+
+       rctx->csr.aes.v = 0;
+
+       switch (ctx->keylen) {
+       case AES_KEYSIZE_128:
+               rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_128;
+               break;
+       case AES_KEYSIZE_192:
+               rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_192;
+               break;
+       case AES_KEYSIZE_256:
+               rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_256;
+               break;
+       }
+
+       rctx->csr.aes.mode  = hw_mode;
+       rctx->csr.aes.cmode = !is_encrypt(cryp);
+       rctx->csr.aes.ie = 1;
+
+       if (hw_mode == STARFIVE_AES_MODE_CFB ||
+           hw_mode == STARFIVE_AES_MODE_OFB)
+               rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_128;
+       else
+               rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_1;
+
+       if (cryp->side_chan) {
+               rctx->csr.aes.delay_aes = 1;
+               rctx->csr.aes.vaes_start = 1;
+       }
+
+       writel(rctx->csr.aes.v, cryp->base + STARFIVE_AES_CSR);
+
+       cryp->err = starfive_aes_write_key(ctx);
+       if (cryp->err)
+               return cryp->err;
+
+       switch (hw_mode) {
+       case STARFIVE_AES_MODE_GCM:
+               starfive_aes_set_alen(ctx);
+               starfive_aes_set_mlen(ctx);
+               starfive_aes_set_ivlen(ctx);
+               starfive_aes_aead_hw_start(ctx, hw_mode);
+               starfive_aes_write_iv(ctx, (void *)cryp->req.areq->iv);
+               break;
+       case STARFIVE_AES_MODE_CCM:
+               starfive_aes_set_alen(ctx);
+               starfive_aes_set_mlen(ctx);
+               starfive_aes_ccm_init(ctx);
+               starfive_aes_aead_hw_start(ctx, hw_mode);
+               break;
+       case STARFIVE_AES_MODE_OFB:
+       case STARFIVE_AES_MODE_CFB:
+       case STARFIVE_AES_MODE_CBC:
+       case STARFIVE_AES_MODE_CTR:
+               starfive_aes_write_iv(ctx, (void *)cryp->req.sreq->iv);
+               break;
+       default:
+               break;
+       }
+
+       return cryp->err;
+}
+
+static int starfive_aes_read_authtag(struct starfive_cryp_dev *cryp)
+{
+       int i, start_addr;
+
+       if (starfive_aes_wait_busy(cryp))
+               return dev_err_probe(cryp->dev, -ETIMEDOUT,
+                                    "Timeout waiting for tag generation.");
+
+       start_addr = STARFIVE_AES_NONCE0;
+
+       if (is_gcm(cryp))
+               for (i = 0; i < AES_BLOCK_32; i++, start_addr += 4)
+                       cryp->tag_out[i] = readl(cryp->base + start_addr);
+       else
+               for (i = 0; i < AES_BLOCK_32; i++)
+                       cryp->tag_out[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R);
+
+       if (is_encrypt(cryp)) {
+               scatterwalk_copychunks(cryp->tag_out, &cryp->out_walk, cryp->authsize, 1);
+       } else {
+               scatterwalk_copychunks(cryp->tag_in, &cryp->in_walk, cryp->authsize, 0);
+
+               if (crypto_memneq(cryp->tag_in, cryp->tag_out, cryp->authsize))
+                       return dev_err_probe(cryp->dev, -EBADMSG, "Failed tag verification\n");
+       }
+
+       return 0;
+}
+
+static void starfive_aes_finish_req(struct starfive_cryp_dev *cryp)
+{
+       union starfive_aes_csr csr;
+       int err = cryp->err;
+
+       if (!err && cryp->authsize)
+               err = starfive_aes_read_authtag(cryp);
+
+       if (!err && ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CBC ||
+                    (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CTR))
+               starfive_aes_get_iv(cryp, (void *)cryp->req.sreq->iv);
+
+       /* reset irq flags*/
+       csr.v = 0;
+       csr.aesrst = 1;
+       writel(csr.v, cryp->base + STARFIVE_AES_CSR);
+
+       if (cryp->authsize)
+               crypto_finalize_aead_request(cryp->engine, cryp->req.areq, err);
+       else
+               crypto_finalize_skcipher_request(cryp->engine, cryp->req.sreq,
+                                                err);
+}
+
+void starfive_aes_done_task(unsigned long param)
+{
+       struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param;
+       u32 block[AES_BLOCK_32];
+       u32 stat;
+       int i;
+
+       for (i = 0; i < AES_BLOCK_32; i++)
+               block[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R);
+
+       scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, AES_BLOCK_SIZE,
+                                                            cryp->total_out), 1);
+
+       cryp->total_out -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_out);
+
+       if (!cryp->total_out) {
+               starfive_aes_finish_req(cryp);
+               return;
+       }
+
+       memset(block, 0, AES_BLOCK_SIZE);
+       scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
+                                                           cryp->total_in), 0);
+       cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
+
+       for (i = 0; i < AES_BLOCK_32; i++)
+               writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
+
+       stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+       stat &= ~STARFIVE_IE_MASK_AES_DONE;
+       writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+}
+
+static int starfive_aes_gcm_write_adata(struct starfive_cryp_ctx *ctx)
+{
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+       struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+       u32 *buffer;
+       int total_len, loop;
+
+       total_len = ALIGN(cryp->assoclen, AES_BLOCK_SIZE) / sizeof(unsigned int);
+       buffer = (u32 *)rctx->adata;
+
+       for (loop = 0; loop < total_len; loop += 4) {
+               writel(*buffer, cryp->base + STARFIVE_AES_NONCE0);
+               buffer++;
+               writel(*buffer, cryp->base + STARFIVE_AES_NONCE1);
+               buffer++;
+               writel(*buffer, cryp->base + STARFIVE_AES_NONCE2);
+               buffer++;
+               writel(*buffer, cryp->base + STARFIVE_AES_NONCE3);
+               buffer++;
+       }
+
+       if (starfive_aes_wait_gcmdone(cryp))
+               return dev_err_probe(cryp->dev, -ETIMEDOUT,
+                                    "Timeout processing gcm aad block");
+
+       return 0;
+}
+
+static int starfive_aes_ccm_write_adata(struct starfive_cryp_ctx *ctx)
+{
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+       struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+       u32 *buffer;
+       u8 *ci;
+       int total_len, loop;
+
+       total_len = cryp->assoclen;
+
+       ci = rctx->adata;
+       writeb(*ci, cryp->base + STARFIVE_AES_AESDIO0R);
+       ci++;
+       writeb(*ci, cryp->base + STARFIVE_AES_AESDIO0R);
+       ci++;
+       total_len -= 2;
+       buffer = (u32 *)ci;
+
+       for (loop = 0; loop < 3; loop++, buffer++)
+               writel(*buffer, cryp->base + STARFIVE_AES_AESDIO0R);
+
+       total_len -= 12;
+
+       while (total_len > 0) {
+               for (loop = 0; loop < AES_BLOCK_32; loop++, buffer++)
+                       writel(*buffer, cryp->base + STARFIVE_AES_AESDIO0R);
+
+               total_len -= AES_BLOCK_SIZE;
+       }
+
+       if (starfive_aes_wait_busy(cryp))
+               return dev_err_probe(cryp->dev, -ETIMEDOUT,
+                                    "Timeout processing ccm aad block");
+
+       return 0;
+}
+
+static int starfive_aes_prepare_req(struct skcipher_request *req,
+                                   struct aead_request *areq)
+{
+       struct starfive_cryp_ctx *ctx;
+       struct starfive_cryp_request_ctx *rctx;
+       struct starfive_cryp_dev *cryp;
+
+       if (!req && !areq)
+               return -EINVAL;
+
+       ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) :
+                   crypto_aead_ctx(crypto_aead_reqtfm(areq));
+
+       cryp = ctx->cryp;
+       rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq);
+
+       if (req) {
+               cryp->req.sreq = req;
+               cryp->total_in = req->cryptlen;
+               cryp->total_out = req->cryptlen;
+               cryp->assoclen = 0;
+               cryp->authsize = 0;
+       } else {
+               cryp->req.areq = areq;
+               cryp->assoclen = areq->assoclen;
+               cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq));
+               if (is_encrypt(cryp)) {
+                       cryp->total_in = areq->cryptlen;
+                       cryp->total_out = areq->cryptlen;
+               } else {
+                       cryp->total_in = areq->cryptlen - cryp->authsize;
+                       cryp->total_out = cryp->total_in;
+               }
+       }
+
+       rctx->in_sg = req ? req->src : areq->src;
+       scatterwalk_start(&cryp->in_walk, rctx->in_sg);
+
+       rctx->out_sg = req ? req->dst : areq->dst;
+       scatterwalk_start(&cryp->out_walk, rctx->out_sg);
+
+       if (cryp->assoclen) {
+               rctx->adata = kzalloc(ALIGN(cryp->assoclen, AES_BLOCK_SIZE), GFP_KERNEL);
+               if (!rctx->adata)
+                       return dev_err_probe(cryp->dev, -ENOMEM,
+                                            "Failed to alloc memory for adata");
+
+               scatterwalk_copychunks(rctx->adata, &cryp->in_walk, cryp->assoclen, 0);
+               scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->assoclen, 2);
+       }
+
+       ctx->rctx = rctx;
+
+       return starfive_aes_hw_init(ctx);
+}
+
+static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq)
+{
+       struct skcipher_request *req =
+               container_of(areq, struct skcipher_request, base);
+       struct starfive_cryp_ctx *ctx =
+               crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+       u32 block[AES_BLOCK_32];
+       u32 stat;
+       int err;
+       int i;
+
+       err = starfive_aes_prepare_req(req, NULL);
+       if (err)
+               return err;
+
+       /*
+        * Write first plain/ciphertext block to start the module
+        * then let irq tasklet handle the rest of the data blocks.
+        */
+       scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
+                                                           cryp->total_in), 0);
+       cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
+
+       for (i = 0; i < AES_BLOCK_32; i++)
+               writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
+
+       stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+       stat &= ~STARFIVE_IE_MASK_AES_DONE;
+       writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+
+       return 0;
+}
+
+static int starfive_aes_init_tfm(struct crypto_skcipher *tfm)
+{
+       struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       ctx->cryp = starfive_cryp_find_dev(ctx);
+       if (!ctx->cryp)
+               return -ENODEV;
+
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) +
+                                   sizeof(struct skcipher_request));
+
+       return 0;
+}
+
+static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq)
+{
+       struct aead_request *req =
+               container_of(areq, struct aead_request, base);
+       struct starfive_cryp_ctx *ctx =
+               crypto_aead_ctx(crypto_aead_reqtfm(req));
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+       struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+       u32 block[AES_BLOCK_32];
+       u32 stat;
+       int err;
+       int i;
+
+       err = starfive_aes_prepare_req(NULL, req);
+       if (err)
+               return err;
+
+       if (!cryp->assoclen)
+               goto write_text;
+
+       if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CCM)
+               cryp->err = starfive_aes_ccm_write_adata(ctx);
+       else
+               cryp->err = starfive_aes_gcm_write_adata(ctx);
+
+       kfree(rctx->adata);
+
+       if (cryp->err)
+               return cryp->err;
+
+write_text:
+       if (!cryp->total_in)
+               goto finish_req;
+
+       /*
+        * Write first plain/ciphertext block to start the module
+        * then let irq tasklet handle the rest of the data blocks.
+        */
+       scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE,
+                                                           cryp->total_in), 0);
+       cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in);
+
+       for (i = 0; i < AES_BLOCK_32; i++)
+               writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R);
+
+       stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
+       stat &= ~STARFIVE_IE_MASK_AES_DONE;
+       writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
+
+       return 0;
+
+finish_req:
+       starfive_aes_finish_req(cryp);
+       return 0;
+}
+
+static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm)
+{
+       struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+       struct crypto_tfm *aead = crypto_aead_tfm(tfm);
+       struct crypto_alg *alg = aead->__crt_alg;
+
+       ctx->cryp = starfive_cryp_find_dev(ctx);
+       if (!ctx->cryp)
+               return -ENODEV;
+
+       if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
+               ctx->aead_fbk = crypto_alloc_aead(alg->cra_name, 0,
+                                                 CRYPTO_ALG_NEED_FALLBACK);
+               if (IS_ERR(ctx->aead_fbk))
+                       return dev_err_probe(cryp->dev, PTR_ERR(ctx->aead_fbk),
+                                            "%s() failed to allocate fallback for %s\n",
+                                            __func__, alg->cra_name);
+       }
+
+       crypto_aead_set_reqsize(tfm, sizeof(struct starfive_cryp_ctx) +
+                               sizeof(struct aead_request));
+
+       return 0;
+}
+
+static void starfive_aes_aead_exit_tfm(struct crypto_aead *tfm)
+{
+       struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+       crypto_free_aead(ctx->aead_fbk);
+}
+
+static int starfive_aes_crypt(struct skcipher_request *req, unsigned long flags)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+       unsigned int blocksize_align = crypto_skcipher_blocksize(tfm) - 1;
+
+       cryp->flags = flags;
+
+       if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_ECB ||
+           (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CBC)
+               if (req->cryptlen & blocksize_align)
+                       return -EINVAL;
+
+       return crypto_transfer_skcipher_request_to_engine(cryp->engine, req);
+}
+
+static int starfive_aes_aead_crypt(struct aead_request *req, unsigned long flags)
+{
+       struct starfive_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
+       struct starfive_cryp_dev *cryp = ctx->cryp;
+
+       cryp->flags = flags;
+
+       /*
+        * HW engine could not perform CCM tag verification on
+        * non-blocksize aligned text, use fallback algo instead
+        */
+       if (ctx->aead_fbk && !is_encrypt(cryp)) {
+               struct aead_request *subreq = aead_request_ctx(req);
+
+               aead_request_set_tfm(subreq, ctx->aead_fbk);
+               aead_request_set_callback(subreq, req->base.flags,
+                                         req->base.complete, req->base.data);
+               aead_request_set_crypt(subreq, req->src,
+                                      req->dst, req->cryptlen, req->iv);
+               aead_request_set_ad(subreq, req->assoclen);
+
+               return crypto_aead_decrypt(subreq);
+       }
+
+       return crypto_transfer_aead_request_to_engine(cryp->engine, req);
+}
+
+static int starfive_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                              unsigned int keylen)
+{
+       struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       if (!key || !keylen)
+               return -EINVAL;
+
+       if (keylen != AES_KEYSIZE_128 &&
+           keylen != AES_KEYSIZE_192 &&
+           keylen != AES_KEYSIZE_256)
+               return -EINVAL;
+
+       memcpy(ctx->key, key, keylen);
+       ctx->keylen = keylen;
+
+       return 0;
+}
+
+static int starfive_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+                                   unsigned int keylen)
+{
+       struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+       if (!key || !keylen)
+               return -EINVAL;
+
+       if (keylen != AES_KEYSIZE_128 &&
+           keylen != AES_KEYSIZE_192 &&
+           keylen != AES_KEYSIZE_256)
+               return -EINVAL;
+
+       memcpy(ctx->key, key, keylen);
+       ctx->keylen = keylen;
+
+       if (ctx->aead_fbk)
+               return crypto_aead_setkey(ctx->aead_fbk, key, keylen);
+
+       return 0;
+}
+
+static int starfive_aes_gcm_setauthsize(struct crypto_aead *tfm,
+                                       unsigned int authsize)
+{
+       return crypto_gcm_check_authsize(authsize);
+}
+
+static int starfive_aes_ccm_setauthsize(struct crypto_aead *tfm,
+                                       unsigned int authsize)
+{
+       struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm);
+
+       switch (authsize) {
+       case 4:
+       case 6:
+       case 8:
+       case 10:
+       case 12:
+       case 14:
+       case 16:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return crypto_aead_setauthsize(ctx->aead_fbk, authsize);
+}
+
+static int starfive_aes_ecb_encrypt(struct skcipher_request *req)
+{
+       return starfive_aes_crypt(req, STARFIVE_AES_MODE_ECB | FLG_ENCRYPT);
+}
+
+static int starfive_aes_ecb_decrypt(struct skcipher_request *req)
+{
+       return starfive_aes_crypt(req, STARFIVE_AES_MODE_ECB);
+}
+
+static int starfive_aes_cbc_encrypt(struct skcipher_request *req)
+{
+       return starfive_aes_crypt(req, STARFIVE_AES_MODE_CBC | FLG_ENCRYPT);
+}
+
+static int starfive_aes_cbc_decrypt(struct skcipher_request *req)
+{
+       return starfive_aes_crypt(req, STARFIVE_AES_MODE_CBC);
+}
+
+static int starfive_aes_cfb_encrypt(struct skcipher_request *req)
+{
+       return starfive_aes_crypt(req, STARFIVE_AES_MODE_CFB | FLG_ENCRYPT);
+}
+
+static int starfive_aes_cfb_decrypt(struct skcipher_request *req)
+{
+       return starfive_aes_crypt(req, STARFIVE_AES_MODE_CFB);
+}
+
+static int starfive_aes_ofb_encrypt(struct skcipher_request *req)
+{
+       return starfive_aes_crypt(req, STARFIVE_AES_MODE_OFB | FLG_ENCRYPT);
+}
+
+static int starfive_aes_ofb_decrypt(struct skcipher_request *req)
+{
+       return starfive_aes_crypt(req, STARFIVE_AES_MODE_OFB);
+}
+
+static int starfive_aes_ctr_encrypt(struct skcipher_request *req)
+{
+       return starfive_aes_crypt(req, STARFIVE_AES_MODE_CTR | FLG_ENCRYPT);
+}
+
+static int starfive_aes_ctr_decrypt(struct skcipher_request *req)
+{
+       return starfive_aes_crypt(req, STARFIVE_AES_MODE_CTR);
+}
+
+static int starfive_aes_gcm_encrypt(struct aead_request *req)
+{
+       return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_GCM | FLG_ENCRYPT);
+}
+
+static int starfive_aes_gcm_decrypt(struct aead_request *req)
+{
+       return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_GCM);
+}
+
+static int starfive_aes_ccm_encrypt(struct aead_request *req)
+{
+       int ret;
+
+       ret = starfive_aes_ccm_check_iv(req->iv);
+       if (ret)
+               return ret;
+
+       return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM | FLG_ENCRYPT);
+}
+
+static int starfive_aes_ccm_decrypt(struct aead_request *req)
+{
+       int ret;
+
+       ret = starfive_aes_ccm_check_iv(req->iv);
+       if (ret)
+               return ret;
+
+       return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM);
+}
+
+static struct skcipher_engine_alg skcipher_algs[] = {
+{
+       .base.init                      = starfive_aes_init_tfm,
+       .base.setkey                    = starfive_aes_setkey,
+       .base.encrypt                   = starfive_aes_ecb_encrypt,
+       .base.decrypt                   = starfive_aes_ecb_decrypt,
+       .base.min_keysize               = AES_MIN_KEY_SIZE,
+       .base.max_keysize               = AES_MAX_KEY_SIZE,
+       .base.base = {
+               .cra_name               = "ecb(aes)",
+               .cra_driver_name        = "starfive-ecb-aes",
+               .cra_priority           = 200,
+               .cra_flags              = CRYPTO_ALG_ASYNC,
+               .cra_blocksize          = AES_BLOCK_SIZE,
+               .cra_ctxsize            = sizeof(struct starfive_cryp_ctx),
+               .cra_alignmask          = 0xf,
+               .cra_module             = THIS_MODULE,
+       },
+       .op = {
+               .do_one_request = starfive_aes_do_one_req,
+       },
+}, {
+       .base.init                      = starfive_aes_init_tfm,
+       .base.setkey                    = starfive_aes_setkey,
+       .base.encrypt                   = starfive_aes_cbc_encrypt,
+       .base.decrypt                   = starfive_aes_cbc_decrypt,
+       .base.min_keysize               = AES_MIN_KEY_SIZE,
+       .base.max_keysize               = AES_MAX_KEY_SIZE,
+       .base.ivsize                    = AES_BLOCK_SIZE,
+       .base.base = {
+               .cra_name               = "cbc(aes)",
+               .cra_driver_name        = "starfive-cbc-aes",
+               .cra_priority           = 200,
+               .cra_flags              = CRYPTO_ALG_ASYNC,
+               .cra_blocksize          = AES_BLOCK_SIZE,
+               .cra_ctxsize            = sizeof(struct starfive_cryp_ctx),
+               .cra_alignmask          = 0xf,
+               .cra_module             = THIS_MODULE,
+       },
+       .op = {
+               .do_one_request = starfive_aes_do_one_req,
+       },
+}, {
+       .base.init                      = starfive_aes_init_tfm,
+       .base.setkey                    = starfive_aes_setkey,
+       .base.encrypt                   = starfive_aes_ctr_encrypt,
+       .base.decrypt                   = starfive_aes_ctr_decrypt,
+       .base.min_keysize               = AES_MIN_KEY_SIZE,
+       .base.max_keysize               = AES_MAX_KEY_SIZE,
+       .base.ivsize                    = AES_BLOCK_SIZE,
+       .base.base = {
+               .cra_name               = "ctr(aes)",
+               .cra_driver_name        = "starfive-ctr-aes",
+               .cra_priority           = 200,
+               .cra_flags              = CRYPTO_ALG_ASYNC,
+               .cra_blocksize          = 1,
+               .cra_ctxsize            = sizeof(struct starfive_cryp_ctx),
+               .cra_alignmask          = 0xf,
+               .cra_module             = THIS_MODULE,
+       },
+       .op = {
+               .do_one_request = starfive_aes_do_one_req,
+       },
+}, {
+       .base.init                      = starfive_aes_init_tfm,
+       .base.setkey                    = starfive_aes_setkey,
+       .base.encrypt                   = starfive_aes_cfb_encrypt,
+       .base.decrypt                   = starfive_aes_cfb_decrypt,
+       .base.min_keysize               = AES_MIN_KEY_SIZE,
+       .base.max_keysize               = AES_MAX_KEY_SIZE,
+       .base.ivsize                    = AES_BLOCK_SIZE,
+       .base.base = {
+               .cra_name               = "cfb(aes)",
+               .cra_driver_name        = "starfive-cfb-aes",
+               .cra_priority           = 200,
+               .cra_flags              = CRYPTO_ALG_ASYNC,
+               .cra_blocksize          = 1,
+               .cra_ctxsize            = sizeof(struct starfive_cryp_ctx),
+               .cra_alignmask          = 0xf,
+               .cra_module             = THIS_MODULE,
+       },
+       .op = {
+               .do_one_request = starfive_aes_do_one_req,
+       },
+}, {
+       .base.init                      = starfive_aes_init_tfm,
+       .base.setkey                    = starfive_aes_setkey,
+       .base.encrypt                   = starfive_aes_ofb_encrypt,
+       .base.decrypt                   = starfive_aes_ofb_decrypt,
+       .base.min_keysize               = AES_MIN_KEY_SIZE,
+       .base.max_keysize               = AES_MAX_KEY_SIZE,
+       .base.ivsize                    = AES_BLOCK_SIZE,
+       .base.base = {
+               .cra_name               = "ofb(aes)",
+               .cra_driver_name        = "starfive-ofb-aes",
+               .cra_priority           = 200,
+               .cra_flags              = CRYPTO_ALG_ASYNC,
+               .cra_blocksize          = 1,
+               .cra_ctxsize            = sizeof(struct starfive_cryp_ctx),
+               .cra_alignmask          = 0xf,
+               .cra_module             = THIS_MODULE,
+       },
+       .op = {
+               .do_one_request = starfive_aes_do_one_req,
+       },
+},
+};
+
+static struct aead_engine_alg aead_algs[] = {
+{
+       .base.setkey                    = starfive_aes_aead_setkey,
+       .base.setauthsize               = starfive_aes_gcm_setauthsize,
+       .base.encrypt                   = starfive_aes_gcm_encrypt,
+       .base.decrypt                   = starfive_aes_gcm_decrypt,
+       .base.init                      = starfive_aes_aead_init_tfm,
+       .base.exit                      = starfive_aes_aead_exit_tfm,
+       .base.ivsize                    = GCM_AES_IV_SIZE,
+       .base.maxauthsize               = AES_BLOCK_SIZE,
+       .base.base = {
+               .cra_name               = "gcm(aes)",
+               .cra_driver_name        = "starfive-gcm-aes",
+               .cra_priority           = 200,
+               .cra_flags              = CRYPTO_ALG_ASYNC,
+               .cra_blocksize          = 1,
+               .cra_ctxsize            = sizeof(struct starfive_cryp_ctx),
+               .cra_alignmask          = 0xf,
+               .cra_module             = THIS_MODULE,
+       },
+       .op = {
+               .do_one_request = starfive_aes_aead_do_one_req,
+       },
+}, {
+       .base.setkey                    = starfive_aes_aead_setkey,
+       .base.setauthsize               = starfive_aes_ccm_setauthsize,
+       .base.encrypt                   = starfive_aes_ccm_encrypt,
+       .base.decrypt                   = starfive_aes_ccm_decrypt,
+       .base.init                      = starfive_aes_aead_init_tfm,
+       .base.exit                      = starfive_aes_aead_exit_tfm,
+       .base.ivsize                    = AES_BLOCK_SIZE,
+       .base.maxauthsize               = AES_BLOCK_SIZE,
+       .base.base = {
+               .cra_name               = "ccm(aes)",
+               .cra_driver_name        = "starfive-ccm-aes",
+               .cra_priority           = 200,
+               .cra_flags              = CRYPTO_ALG_ASYNC |
+                                         CRYPTO_ALG_NEED_FALLBACK,
+               .cra_blocksize          = 1,
+               .cra_ctxsize            = sizeof(struct starfive_cryp_ctx),
+               .cra_alignmask          = 0xf,
+               .cra_module             = THIS_MODULE,
+       },
+       .op = {
+               .do_one_request = starfive_aes_aead_do_one_req,
+       },
+},
+};
+
+int starfive_aes_register_algs(void)
+{
+       int ret;
+
+       ret = crypto_engine_register_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
+       if (ret)
+               return ret;
+
+       ret = crypto_engine_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+       if (ret)
+               crypto_engine_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
+
+       return ret;
+}
+
+void starfive_aes_unregister_algs(void)
+{
+       crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+       crypto_engine_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs));
+}
index cc43556..08e974e 100644 (file)
@@ -7,17 +7,20 @@
  *
  */
 
+#include <crypto/engine.h>
+#include "jh7110-cryp.h"
 #include <linux/clk.h>
-#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
-
-#include "jh7110-cryp.h"
+#include <linux/spinlock.h>
 
 #define DRIVER_NAME             "jh7110-crypto"
 
@@ -51,6 +54,13 @@ struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx)
        return cryp;
 }
 
+static u16 side_chan;
+module_param(side_chan, ushort, 0);
+MODULE_PARM_DESC(side_chan, "Enable side channel mitigation for AES module.\n"
+                           "Enabling this feature will reduce speed performance.\n"
+                           " 0 - Disabled\n"
+                           " other - Enabled");
+
 static int starfive_dma_init(struct starfive_cryp_dev *cryp)
 {
        dma_cap_mask_t mask;
@@ -82,20 +92,26 @@ static void starfive_dma_cleanup(struct starfive_cryp_dev *cryp)
 static irqreturn_t starfive_cryp_irq(int irq, void *priv)
 {
        u32 status;
+       u32 mask;
        struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)priv;
 
+       mask = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
        status = readl(cryp->base + STARFIVE_IE_FLAG_OFFSET);
+       if (status & STARFIVE_IE_FLAG_AES_DONE) {
+               mask |= STARFIVE_IE_MASK_AES_DONE;
+               writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
+               tasklet_schedule(&cryp->aes_done);
+       }
+
        if (status & STARFIVE_IE_FLAG_HASH_DONE) {
-               status = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
-               status |= STARFIVE_IE_MASK_HASH_DONE;
-               writel(status, cryp->base + STARFIVE_IE_MASK_OFFSET);
+               mask |= STARFIVE_IE_MASK_HASH_DONE;
+               writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
                tasklet_schedule(&cryp->hash_done);
        }
 
        if (status & STARFIVE_IE_FLAG_PKA_DONE) {
-               status = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
-               status |= STARFIVE_IE_MASK_PKA_DONE;
-               writel(status, cryp->base + STARFIVE_IE_MASK_OFFSET);
+               mask |= STARFIVE_IE_MASK_PKA_DONE;
+               writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
                complete(&cryp->pka_done);
        }
 
@@ -121,10 +137,12 @@ static int starfive_cryp_probe(struct platform_device *pdev)
                return dev_err_probe(&pdev->dev, PTR_ERR(cryp->base),
                                     "Error remapping memory for platform device\n");
 
+       tasklet_init(&cryp->aes_done, starfive_aes_done_task, (unsigned long)cryp);
        tasklet_init(&cryp->hash_done, starfive_hash_done_task, (unsigned long)cryp);
 
        cryp->phys_base = res->start;
        cryp->dma_maxburst = 32;
+       cryp->side_chan = side_chan;
 
        cryp->hclk = devm_clk_get(&pdev->dev, "hclk");
        if (IS_ERR(cryp->hclk))
@@ -180,6 +198,10 @@ static int starfive_cryp_probe(struct platform_device *pdev)
        if (ret)
                goto err_engine_start;
 
+       ret = starfive_aes_register_algs();
+       if (ret)
+               goto err_algs_aes;
+
        ret = starfive_hash_register_algs();
        if (ret)
                goto err_algs_hash;
@@ -193,6 +215,8 @@ static int starfive_cryp_probe(struct platform_device *pdev)
 err_algs_rsa:
        starfive_hash_unregister_algs();
 err_algs_hash:
+       starfive_aes_unregister_algs();
+err_algs_aes:
        crypto_engine_stop(cryp->engine);
 err_engine_start:
        crypto_engine_exit(cryp->engine);
@@ -207,18 +231,21 @@ err_dma_init:
        clk_disable_unprepare(cryp->ahb);
        reset_control_assert(cryp->rst);
 
+       tasklet_kill(&cryp->aes_done);
        tasklet_kill(&cryp->hash_done);
 err_probe_defer:
        return ret;
 }
 
-static int starfive_cryp_remove(struct platform_device *pdev)
+static void starfive_cryp_remove(struct platform_device *pdev)
 {
        struct starfive_cryp_dev *cryp = platform_get_drvdata(pdev);
 
+       starfive_aes_unregister_algs();
        starfive_hash_unregister_algs();
        starfive_rsa_unregister_algs();
 
+       tasklet_kill(&cryp->aes_done);
        tasklet_kill(&cryp->hash_done);
 
        crypto_engine_stop(cryp->engine);
@@ -233,8 +260,6 @@ static int starfive_cryp_remove(struct platform_device *pdev)
        clk_disable_unprepare(cryp->hclk);
        clk_disable_unprepare(cryp->ahb);
        reset_control_assert(cryp->rst);
-
-       return 0;
 }
 
 static const struct of_device_id starfive_dt_ids[] __maybe_unused = {
@@ -245,7 +270,7 @@ MODULE_DEVICE_TABLE(of, starfive_dt_ids);
 
 static struct platform_driver starfive_cryp_driver = {
        .probe  = starfive_cryp_probe,
-       .remove = starfive_cryp_remove,
+       .remove_new = starfive_cryp_remove,
        .driver = {
                .name           = DRIVER_NAME,
                .of_match_table = starfive_dt_ids,
index 0cdcffc..fe011d5 100644 (file)
@@ -2,13 +2,15 @@
 #ifndef __STARFIVE_STR_H__
 #define __STARFIVE_STR_H__
 
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha2.h>
+#include <crypto/sm3.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
-
-#include <crypto/engine.h>
-#include <crypto/sha2.h>
-#include <crypto/sm3.h>
+#include <linux/interrupt.h>
 
 #define STARFIVE_ALG_CR_OFFSET                 0x0
 #define STARFIVE_ALG_FIFO_OFFSET               0x4
 #define STARFIVE_DMA_IN_LEN_OFFSET             0x10
 #define STARFIVE_DMA_OUT_LEN_OFFSET            0x14
 
+#define STARFIVE_IE_MASK_AES_DONE              0x1
 #define STARFIVE_IE_MASK_HASH_DONE             0x4
 #define STARFIVE_IE_MASK_PKA_DONE              0x8
+#define STARFIVE_IE_FLAG_AES_DONE              0x1
 #define STARFIVE_IE_FLAG_HASH_DONE             0x4
 #define STARFIVE_IE_FLAG_PKA_DONE              0x8
 
 #define STARFIVE_MSG_BUFFER_SIZE               SZ_16K
 #define MAX_KEY_SIZE                           SHA512_BLOCK_SIZE
+#define STARFIVE_AES_IV_LEN                    AES_BLOCK_SIZE
+#define STARFIVE_AES_CTR_LEN                   AES_BLOCK_SIZE
+
+union starfive_aes_csr {
+       u32 v;
+       struct {
+               u32 cmode                       :1;
+#define STARFIVE_AES_KEYMODE_128               0x0
+#define STARFIVE_AES_KEYMODE_192               0x1
+#define STARFIVE_AES_KEYMODE_256               0x2
+               u32 keymode                     :2;
+#define STARFIVE_AES_BUSY                      BIT(3)
+               u32 busy                        :1;
+               u32 done                        :1;
+#define STARFIVE_AES_KEY_DONE                  BIT(5)
+               u32 krdy                        :1;
+               u32 aesrst                      :1;
+               u32 ie                          :1;
+#define STARFIVE_AES_CCM_START                 BIT(8)
+               u32 ccm_start                   :1;
+#define STARFIVE_AES_MODE_ECB                  0x0
+#define STARFIVE_AES_MODE_CBC                  0x1
+#define STARFIVE_AES_MODE_CFB                  0x2
+#define STARFIVE_AES_MODE_OFB                  0x3
+#define STARFIVE_AES_MODE_CTR                  0x4
+#define STARFIVE_AES_MODE_CCM                  0x5
+#define STARFIVE_AES_MODE_GCM                  0x6
+               u32 mode                        :3;
+#define STARFIVE_AES_GCM_START                 BIT(12)
+               u32 gcm_start                   :1;
+#define STARFIVE_AES_GCM_DONE                  BIT(13)
+               u32 gcm_done                    :1;
+               u32 delay_aes                   :1;
+               u32 vaes_start                  :1;
+               u32 rsvd_0                      :8;
+#define STARFIVE_AES_MODE_XFB_1                        0x0
+#define STARFIVE_AES_MODE_XFB_128              0x5
+               u32 stmode                      :3;
+               u32 rsvd_1                      :5;
+       };
+};
 
 union starfive_hash_csr {
        u32 v;
@@ -105,7 +150,6 @@ union starfive_alg_cr {
 };
 
 struct starfive_cryp_ctx {
-       struct crypto_engine_ctx                enginectx;
        struct starfive_cryp_dev                *cryp;
        struct starfive_cryp_request_ctx        *rctx;
 
@@ -116,6 +160,7 @@ struct starfive_cryp_ctx {
        struct starfive_rsa_key                 rsa_key;
        struct crypto_akcipher                  *akcipher_fbk;
        struct crypto_ahash                     *ahash_fbk;
+       struct crypto_aead                      *aead_fbk;
 };
 
 struct starfive_cryp_dev {
@@ -133,13 +178,26 @@ struct starfive_cryp_dev {
        struct dma_chan                         *rx;
        struct dma_slave_config                 cfg_in;
        struct dma_slave_config                 cfg_out;
+       struct scatter_walk                     in_walk;
+       struct scatter_walk                     out_walk;
        struct crypto_engine                    *engine;
+       struct tasklet_struct                   aes_done;
        struct tasklet_struct                   hash_done;
        struct completion                       pka_done;
+       size_t                                  assoclen;
+       size_t                                  total_in;
+       size_t                                  total_out;
+       u32                                     tag_in[4];
+       u32                                     tag_out[4];
+       unsigned int                            authsize;
+       unsigned long                           flags;
        int                                     err;
+       bool                                    side_chan;
        union starfive_alg_cr                   alg_cr;
        union {
                struct ahash_request            *hreq;
+               struct aead_request             *areq;
+               struct skcipher_request         *sreq;
        } req;
 };
 
@@ -147,6 +205,7 @@ struct starfive_cryp_request_ctx {
        union {
                union starfive_hash_csr         hash;
                union starfive_pka_cacr         pka;
+               union starfive_aes_csr          aes;
        } csr;
 
        struct scatterlist                      *in_sg;
@@ -157,6 +216,7 @@ struct starfive_cryp_request_ctx {
        unsigned int                            blksize;
        unsigned int                            digsize;
        unsigned long                           in_sg_len;
+       unsigned char                           *adata;
        u8 rsa_data[] __aligned(sizeof(u32));
 };
 
@@ -168,5 +228,9 @@ void starfive_hash_unregister_algs(void);
 int starfive_rsa_register_algs(void);
 void starfive_rsa_unregister_algs(void);
 
+int starfive_aes_register_algs(void);
+void starfive_aes_unregister_algs(void);
+
 void starfive_hash_done_task(unsigned long param);
+void starfive_aes_done_task(unsigned long param);
 #endif
index 5064150..cc76501 100644 (file)
@@ -6,25 +6,20 @@
  *
  */
 
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include "jh7110-cryp.h"
+#include <linux/amba/pl080.h>
 #include <linux/clk.h>
-#include <linux/crypto.h>
 #include <linux/dma-direct.h>
 #include <linux/interrupt.h>
-#include <linux/io.h>
 #include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
-#include <linux/amba/pl080.h>
-
-#include <crypto/hash.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/hash.h>
-
-#include "jh7110-cryp.h"
 
 #define STARFIVE_HASH_REGS_OFFSET      0x300
 #define STARFIVE_HASH_SHACSR           (STARFIVE_HASH_REGS_OFFSET + 0x0)
@@ -433,10 +428,6 @@ static int starfive_hash_init_tfm(struct crypto_ahash *hash,
        ctx->keylen = 0;
        ctx->hash_mode = mode;
 
-       ctx->enginectx.op.do_one_request = starfive_hash_one_request;
-       ctx->enginectx.op.prepare_request = NULL;
-       ctx->enginectx.op.unprepare_request = NULL;
-
        return 0;
 }
 
@@ -445,11 +436,6 @@ static void starfive_hash_exit_tfm(struct crypto_ahash *hash)
        struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash);
 
        crypto_free_ahash(ctx->ahash_fbk);
-
-       ctx->ahash_fbk = NULL;
-       ctx->enginectx.op.do_one_request = NULL;
-       ctx->enginectx.op.prepare_request = NULL;
-       ctx->enginectx.op.unprepare_request = NULL;
 }
 
 static int starfive_hash_long_setkey(struct starfive_cryp_ctx *ctx,
@@ -619,18 +605,18 @@ static int starfive_hmac_sm3_init_tfm(struct crypto_ahash *hash)
                                      STARFIVE_HASH_SM3);
 }
 
-static struct ahash_alg algs_sha2_sm3[] = {
+static struct ahash_engine_alg algs_sha2_sm3[] = {
 {
-       .init     = starfive_hash_init,
-       .update   = starfive_hash_update,
-       .final    = starfive_hash_final,
-       .finup    = starfive_hash_finup,
-       .digest   = starfive_hash_digest,
-       .export   = starfive_hash_export,
-       .import   = starfive_hash_import,
-       .init_tfm = starfive_sha224_init_tfm,
-       .exit_tfm = starfive_hash_exit_tfm,
-       .halg = {
+       .base.init     = starfive_hash_init,
+       .base.update   = starfive_hash_update,
+       .base.final    = starfive_hash_final,
+       .base.finup    = starfive_hash_finup,
+       .base.digest   = starfive_hash_digest,
+       .base.export   = starfive_hash_export,
+       .base.import   = starfive_hash_import,
+       .base.init_tfm = starfive_sha224_init_tfm,
+       .base.exit_tfm = starfive_hash_exit_tfm,
+       .base.halg = {
                .digestsize = SHA224_DIGEST_SIZE,
                .statesize  = sizeof(struct sha256_state),
                .base = {
@@ -645,19 +631,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
                        .cra_alignmask          = 3,
                        .cra_module             = THIS_MODULE,
                }
-       }
+       },
+       .op = {
+               .do_one_request = starfive_hash_one_request,
+       },
 }, {
-       .init     = starfive_hash_init,
-       .update   = starfive_hash_update,
-       .final    = starfive_hash_final,
-       .finup    = starfive_hash_finup,
-       .digest   = starfive_hash_digest,
-       .export   = starfive_hash_export,
-       .import   = starfive_hash_import,
-       .init_tfm = starfive_hmac_sha224_init_tfm,
-       .exit_tfm = starfive_hash_exit_tfm,
-       .setkey   = starfive_hash_setkey,
-       .halg = {
+       .base.init     = starfive_hash_init,
+       .base.update   = starfive_hash_update,
+       .base.final    = starfive_hash_final,
+       .base.finup    = starfive_hash_finup,
+       .base.digest   = starfive_hash_digest,
+       .base.export   = starfive_hash_export,
+       .base.import   = starfive_hash_import,
+       .base.init_tfm = starfive_hmac_sha224_init_tfm,
+       .base.exit_tfm = starfive_hash_exit_tfm,
+       .base.setkey   = starfive_hash_setkey,
+       .base.halg = {
                .digestsize = SHA224_DIGEST_SIZE,
                .statesize  = sizeof(struct sha256_state),
                .base = {
@@ -672,18 +661,21 @@ static struct ahash_alg algs_sha2_sm3[] = {
                        .cra_alignmask          = 3,
                        .cra_module             = THIS_MODULE,
                }
-       }
+       },
+       .op = {
+               .do_one_request = starfive_hash_one_request,
+       },
 }, {
-       .init     = starfive_hash_init,
-       .update   = starfive_hash_update,
-       .final    = starfive_hash_final,
-       .finup    = starfive_hash_finup,
-       .digest   = starfive_hash_digest,
-       .export   = starfive_hash_export,
-       .import   = starfive_hash_import,
-       .init_tfm = starfive_sha256_init_tfm,
-       .exit_tfm = starfive_hash_exit_tfm,
-       .halg = {
+       .base.init     = starfive_hash_init,
+       .base.update   = starfive_hash_update,
+       .base.final    = starfive_hash_final,
+       .base.finup    = starfive_hash_finup,
+       .base.digest   = starfive_hash_digest,
+       .base.export   = starfive_hash_export,
+       .base.import   = starfive_hash_import,
+       .base.init_tfm = starfive_sha256_init_tfm,
+       .base.exit_tfm = starfive_hash_exit_tfm,
+       .base.halg = {
                .digestsize = SHA256_DIGEST_SIZE,
                .statesize  = sizeof(struct sha256_state),
                .base = {
@@ -698,19 +690,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
                        .cra_alignmask          = 3,
                        .cra_module             = THIS_MODULE,
                }
-       }
+       },
+       .op = {
+               .do_one_request = starfive_hash_one_request,
+       },
 }, {
-       .init     = starfive_hash_init,
-       .update   = starfive_hash_update,
-       .final    = starfive_hash_final,
-       .finup    = starfive_hash_finup,
-       .digest   = starfive_hash_digest,
-       .export   = starfive_hash_export,
-       .import   = starfive_hash_import,
-       .init_tfm = starfive_hmac_sha256_init_tfm,
-       .exit_tfm = starfive_hash_exit_tfm,
-       .setkey   = starfive_hash_setkey,
-       .halg = {
+       .base.init     = starfive_hash_init,
+       .base.update   = starfive_hash_update,
+       .base.final    = starfive_hash_final,
+       .base.finup    = starfive_hash_finup,
+       .base.digest   = starfive_hash_digest,
+       .base.export   = starfive_hash_export,
+       .base.import   = starfive_hash_import,
+       .base.init_tfm = starfive_hmac_sha256_init_tfm,
+       .base.exit_tfm = starfive_hash_exit_tfm,
+       .base.setkey   = starfive_hash_setkey,
+       .base.halg = {
                .digestsize = SHA256_DIGEST_SIZE,
                .statesize  = sizeof(struct sha256_state),
                .base = {
@@ -725,18 +720,21 @@ static struct ahash_alg algs_sha2_sm3[] = {
                        .cra_alignmask          = 3,
                        .cra_module             = THIS_MODULE,
                }
-       }
+       },
+       .op = {
+               .do_one_request = starfive_hash_one_request,
+       },
 }, {
-       .init     = starfive_hash_init,
-       .update   = starfive_hash_update,
-       .final    = starfive_hash_final,
-       .finup    = starfive_hash_finup,
-       .digest   = starfive_hash_digest,
-       .export   = starfive_hash_export,
-       .import   = starfive_hash_import,
-       .init_tfm = starfive_sha384_init_tfm,
-       .exit_tfm = starfive_hash_exit_tfm,
-       .halg = {
+       .base.init     = starfive_hash_init,
+       .base.update   = starfive_hash_update,
+       .base.final    = starfive_hash_final,
+       .base.finup    = starfive_hash_finup,
+       .base.digest   = starfive_hash_digest,
+       .base.export   = starfive_hash_export,
+       .base.import   = starfive_hash_import,
+       .base.init_tfm = starfive_sha384_init_tfm,
+       .base.exit_tfm = starfive_hash_exit_tfm,
+       .base.halg = {
                .digestsize = SHA384_DIGEST_SIZE,
                .statesize  = sizeof(struct sha512_state),
                .base = {
@@ -751,19 +749,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
                        .cra_alignmask          = 3,
                        .cra_module             = THIS_MODULE,
                }
-       }
+       },
+       .op = {
+               .do_one_request = starfive_hash_one_request,
+       },
 }, {
-       .init     = starfive_hash_init,
-       .update   = starfive_hash_update,
-       .final    = starfive_hash_final,
-       .finup    = starfive_hash_finup,
-       .digest   = starfive_hash_digest,
-       .export   = starfive_hash_export,
-       .import   = starfive_hash_import,
-       .init_tfm = starfive_hmac_sha384_init_tfm,
-       .exit_tfm = starfive_hash_exit_tfm,
-       .setkey   = starfive_hash_setkey,
-       .halg = {
+       .base.init     = starfive_hash_init,
+       .base.update   = starfive_hash_update,
+       .base.final    = starfive_hash_final,
+       .base.finup    = starfive_hash_finup,
+       .base.digest   = starfive_hash_digest,
+       .base.export   = starfive_hash_export,
+       .base.import   = starfive_hash_import,
+       .base.init_tfm = starfive_hmac_sha384_init_tfm,
+       .base.exit_tfm = starfive_hash_exit_tfm,
+       .base.setkey   = starfive_hash_setkey,
+       .base.halg = {
                .digestsize = SHA384_DIGEST_SIZE,
                .statesize  = sizeof(struct sha512_state),
                .base = {
@@ -778,18 +779,21 @@ static struct ahash_alg algs_sha2_sm3[] = {
                        .cra_alignmask          = 3,
                        .cra_module             = THIS_MODULE,
                }
-       }
+       },
+       .op = {
+               .do_one_request = starfive_hash_one_request,
+       },
 }, {
-       .init     = starfive_hash_init,
-       .update   = starfive_hash_update,
-       .final    = starfive_hash_final,
-       .finup    = starfive_hash_finup,
-       .digest   = starfive_hash_digest,
-       .export   = starfive_hash_export,
-       .import   = starfive_hash_import,
-       .init_tfm = starfive_sha512_init_tfm,
-       .exit_tfm = starfive_hash_exit_tfm,
-       .halg = {
+       .base.init     = starfive_hash_init,
+       .base.update   = starfive_hash_update,
+       .base.final    = starfive_hash_final,
+       .base.finup    = starfive_hash_finup,
+       .base.digest   = starfive_hash_digest,
+       .base.export   = starfive_hash_export,
+       .base.import   = starfive_hash_import,
+       .base.init_tfm = starfive_sha512_init_tfm,
+       .base.exit_tfm = starfive_hash_exit_tfm,
+       .base.halg = {
                .digestsize = SHA512_DIGEST_SIZE,
                .statesize  = sizeof(struct sha512_state),
                .base = {
@@ -804,19 +808,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
                        .cra_alignmask          = 3,
                        .cra_module             = THIS_MODULE,
                }
-       }
+       },
+       .op = {
+               .do_one_request = starfive_hash_one_request,
+       },
 }, {
-       .init     = starfive_hash_init,
-       .update   = starfive_hash_update,
-       .final    = starfive_hash_final,
-       .finup    = starfive_hash_finup,
-       .digest   = starfive_hash_digest,
-       .export   = starfive_hash_export,
-       .import   = starfive_hash_import,
-       .init_tfm = starfive_hmac_sha512_init_tfm,
-       .exit_tfm = starfive_hash_exit_tfm,
-       .setkey   = starfive_hash_setkey,
-       .halg = {
+       .base.init     = starfive_hash_init,
+       .base.update   = starfive_hash_update,
+       .base.final    = starfive_hash_final,
+       .base.finup    = starfive_hash_finup,
+       .base.digest   = starfive_hash_digest,
+       .base.export   = starfive_hash_export,
+       .base.import   = starfive_hash_import,
+       .base.init_tfm = starfive_hmac_sha512_init_tfm,
+       .base.exit_tfm = starfive_hash_exit_tfm,
+       .base.setkey   = starfive_hash_setkey,
+       .base.halg = {
                .digestsize = SHA512_DIGEST_SIZE,
                .statesize  = sizeof(struct sha512_state),
                .base = {
@@ -831,18 +838,21 @@ static struct ahash_alg algs_sha2_sm3[] = {
                        .cra_alignmask          = 3,
                        .cra_module             = THIS_MODULE,
                }
-       }
+       },
+       .op = {
+               .do_one_request = starfive_hash_one_request,
+       },
 }, {
-       .init     = starfive_hash_init,
-       .update   = starfive_hash_update,
-       .final    = starfive_hash_final,
-       .finup    = starfive_hash_finup,
-       .digest   = starfive_hash_digest,
-       .export   = starfive_hash_export,
-       .import   = starfive_hash_import,
-       .init_tfm = starfive_sm3_init_tfm,
-       .exit_tfm = starfive_hash_exit_tfm,
-       .halg = {
+       .base.init     = starfive_hash_init,
+       .base.update   = starfive_hash_update,
+       .base.final    = starfive_hash_final,
+       .base.finup    = starfive_hash_finup,
+       .base.digest   = starfive_hash_digest,
+       .base.export   = starfive_hash_export,
+       .base.import   = starfive_hash_import,
+       .base.init_tfm = starfive_sm3_init_tfm,
+       .base.exit_tfm = starfive_hash_exit_tfm,
+       .base.halg = {
                .digestsize = SM3_DIGEST_SIZE,
                .statesize  = sizeof(struct sm3_state),
                .base = {
@@ -857,19 +867,22 @@ static struct ahash_alg algs_sha2_sm3[] = {
                        .cra_alignmask          = 3,
                        .cra_module             = THIS_MODULE,
                }
-       }
+       },
+       .op = {
+               .do_one_request = starfive_hash_one_request,
+       },
 }, {
-       .init     = starfive_hash_init,
-       .update   = starfive_hash_update,
-       .final    = starfive_hash_final,
-       .finup    = starfive_hash_finup,
-       .digest   = starfive_hash_digest,
-       .export   = starfive_hash_export,
-       .import   = starfive_hash_import,
-       .init_tfm = starfive_hmac_sm3_init_tfm,
-       .exit_tfm = starfive_hash_exit_tfm,
-       .setkey   = starfive_hash_setkey,
-       .halg = {
+       .base.init        = starfive_hash_init,
+       .base.update      = starfive_hash_update,
+       .base.final       = starfive_hash_final,
+       .base.finup       = starfive_hash_finup,
+       .base.digest      = starfive_hash_digest,
+       .base.export      = starfive_hash_export,
+       .base.import      = starfive_hash_import,
+       .base.init_tfm = starfive_hmac_sm3_init_tfm,
+       .base.exit_tfm = starfive_hash_exit_tfm,
+       .base.setkey      = starfive_hash_setkey,
+       .base.halg = {
                .digestsize = SM3_DIGEST_SIZE,
                .statesize  = sizeof(struct sm3_state),
                .base = {
@@ -884,16 +897,19 @@ static struct ahash_alg algs_sha2_sm3[] = {
                        .cra_alignmask          = 3,
                        .cra_module             = THIS_MODULE,
                }
-       }
+       },
+       .op = {
+               .do_one_request = starfive_hash_one_request,
+       },
 },
 };
 
 int starfive_hash_register_algs(void)
 {
-       return crypto_register_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
+       return crypto_engine_register_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
 }
 
 void starfive_hash_unregister_algs(void)
 {
-       crypto_unregister_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
+       crypto_engine_unregister_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3));
 }
index 4fc581e..49dfd16 100644 (file)
@@ -16,6 +16,8 @@ config CRYPTO_DEV_STM32_HASH
        select CRYPTO_MD5
        select CRYPTO_SHA1
        select CRYPTO_SHA256
+       select CRYPTO_SHA512
+       select CRYPTO_SHA3
        select CRYPTO_ENGINE
        help
          This enables support for the HASH hw accelerator which can be found
index 6b8d731..f095f00 100644 (file)
@@ -5,22 +5,24 @@
  * Ux500 support taken from snippets in the old Ux500 cryp driver
  */
 
+#include <crypto/aes.h>
+#include <crypto/engine.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/des.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
-#include <linux/interrupt.h>
+#include <linux/err.h>
 #include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
-
-#include <crypto/aes.h>
-#include <crypto/internal/des.h>
-#include <crypto/engine.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/internal/aead.h>
-#include <crypto/internal/skcipher.h>
+#include <linux/string.h>
 
 #define DRIVER_NAME             "stm32-cryp"
 
@@ -156,7 +158,6 @@ struct stm32_cryp_caps {
 };
 
 struct stm32_cryp_ctx {
-       struct crypto_engine_ctx enginectx;
        struct stm32_cryp       *cryp;
        int                     keylen;
        __be32                  key[AES_KEYSIZE_256 / sizeof(u32)];
@@ -825,35 +826,20 @@ static int stm32_cryp_cpu_start(struct stm32_cryp *cryp)
 }
 
 static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq);
-static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
-                                        void *areq);
 
 static int stm32_cryp_init_tfm(struct crypto_skcipher *tfm)
 {
-       struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(tfm);
-
        crypto_skcipher_set_reqsize(tfm, sizeof(struct stm32_cryp_reqctx));
 
-       ctx->enginectx.op.do_one_request = stm32_cryp_cipher_one_req;
-       ctx->enginectx.op.prepare_request = stm32_cryp_prepare_cipher_req;
-       ctx->enginectx.op.unprepare_request = NULL;
        return 0;
 }
 
 static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq);
-static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine,
-                                      void *areq);
 
 static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm)
 {
-       struct stm32_cryp_ctx *ctx = crypto_aead_ctx(tfm);
-
        tfm->reqsize = sizeof(struct stm32_cryp_reqctx);
 
-       ctx->enginectx.op.do_one_request = stm32_cryp_aead_one_req;
-       ctx->enginectx.op.prepare_request = stm32_cryp_prepare_aead_req;
-       ctx->enginectx.op.unprepare_request = NULL;
-
        return 0;
 }
 
@@ -1180,9 +1166,6 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
 
        cryp = ctx->cryp;
 
-       if (!cryp)
-               return -ENODEV;
-
        rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq);
        rctx->mode &= FLG_MODE_MASK;
 
@@ -1248,16 +1231,6 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req,
        return ret;
 }
 
-static int stm32_cryp_prepare_cipher_req(struct crypto_engine *engine,
-                                        void *areq)
-{
-       struct skcipher_request *req = container_of(areq,
-                                                     struct skcipher_request,
-                                                     base);
-
-       return stm32_cryp_prepare_req(req, NULL);
-}
-
 static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
 {
        struct skcipher_request *req = container_of(areq,
@@ -1270,15 +1243,8 @@ static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq)
        if (!cryp)
                return -ENODEV;
 
-       return stm32_cryp_cpu_start(cryp);
-}
-
-static int stm32_cryp_prepare_aead_req(struct crypto_engine *engine, void *areq)
-{
-       struct aead_request *req = container_of(areq, struct aead_request,
-                                               base);
-
-       return stm32_cryp_prepare_req(NULL, req);
+       return stm32_cryp_prepare_req(req, NULL) ?:
+              stm32_cryp_cpu_start(cryp);
 }
 
 static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
@@ -1287,10 +1253,15 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq)
                                                base);
        struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
        struct stm32_cryp *cryp = ctx->cryp;
+       int err;
 
        if (!cryp)
                return -ENODEV;
 
+       err = stm32_cryp_prepare_req(NULL, req);
+       if (err)
+               return err;
+
        if (unlikely(!cryp->payload_in && !cryp->header_in)) {
                /* No input data to process: get tag and finish */
                stm32_cryp_finish_req(cryp, 0);
@@ -1709,143 +1680,178 @@ static irqreturn_t stm32_cryp_irq(int irq, void *arg)
        return IRQ_WAKE_THREAD;
 }
 
-static struct skcipher_alg crypto_algs[] = {
+static struct skcipher_engine_alg crypto_algs[] = {
 {
-       .base.cra_name          = "ecb(aes)",
-       .base.cra_driver_name   = "stm32-ecb-aes",
-       .base.cra_priority      = 200,
-       .base.cra_flags         = CRYPTO_ALG_ASYNC,
-       .base.cra_blocksize     = AES_BLOCK_SIZE,
-       .base.cra_ctxsize       = sizeof(struct stm32_cryp_ctx),
-       .base.cra_alignmask     = 0,
-       .base.cra_module        = THIS_MODULE,
-
-       .init                   = stm32_cryp_init_tfm,
-       .min_keysize            = AES_MIN_KEY_SIZE,
-       .max_keysize            = AES_MAX_KEY_SIZE,
-       .setkey                 = stm32_cryp_aes_setkey,
-       .encrypt                = stm32_cryp_aes_ecb_encrypt,
-       .decrypt                = stm32_cryp_aes_ecb_decrypt,
+       .base = {
+               .base.cra_name          = "ecb(aes)",
+               .base.cra_driver_name   = "stm32-ecb-aes",
+               .base.cra_priority      = 200,
+               .base.cra_flags         = CRYPTO_ALG_ASYNC,
+               .base.cra_blocksize     = AES_BLOCK_SIZE,
+               .base.cra_ctxsize       = sizeof(struct stm32_cryp_ctx),
+               .base.cra_alignmask     = 0,
+               .base.cra_module        = THIS_MODULE,
+
+               .init                   = stm32_cryp_init_tfm,
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+               .setkey                 = stm32_cryp_aes_setkey,
+               .encrypt                = stm32_cryp_aes_ecb_encrypt,
+               .decrypt                = stm32_cryp_aes_ecb_decrypt,
+       },
+       .op = {
+               .do_one_request = stm32_cryp_cipher_one_req,
+       },
 },
 {
-       .base.cra_name          = "cbc(aes)",
-       .base.cra_driver_name   = "stm32-cbc-aes",
-       .base.cra_priority      = 200,
-       .base.cra_flags         = CRYPTO_ALG_ASYNC,
-       .base.cra_blocksize     = AES_BLOCK_SIZE,
-       .base.cra_ctxsize       = sizeof(struct stm32_cryp_ctx),
-       .base.cra_alignmask     = 0,
-       .base.cra_module        = THIS_MODULE,
-
-       .init                   = stm32_cryp_init_tfm,
-       .min_keysize            = AES_MIN_KEY_SIZE,
-       .max_keysize            = AES_MAX_KEY_SIZE,
-       .ivsize                 = AES_BLOCK_SIZE,
-       .setkey                 = stm32_cryp_aes_setkey,
-       .encrypt                = stm32_cryp_aes_cbc_encrypt,
-       .decrypt                = stm32_cryp_aes_cbc_decrypt,
+       .base = {
+               .base.cra_name          = "cbc(aes)",
+               .base.cra_driver_name   = "stm32-cbc-aes",
+               .base.cra_priority      = 200,
+               .base.cra_flags         = CRYPTO_ALG_ASYNC,
+               .base.cra_blocksize     = AES_BLOCK_SIZE,
+               .base.cra_ctxsize       = sizeof(struct stm32_cryp_ctx),
+               .base.cra_alignmask     = 0,
+               .base.cra_module        = THIS_MODULE,
+
+               .init                   = stm32_cryp_init_tfm,
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+               .ivsize                 = AES_BLOCK_SIZE,
+               .setkey                 = stm32_cryp_aes_setkey,
+               .encrypt                = stm32_cryp_aes_cbc_encrypt,
+               .decrypt                = stm32_cryp_aes_cbc_decrypt,
+       },
+       .op = {
+               .do_one_request = stm32_cryp_cipher_one_req,
+       },
 },
 {
-       .base.cra_name          = "ctr(aes)",
-       .base.cra_driver_name   = "stm32-ctr-aes",
-       .base.cra_priority      = 200,
-       .base.cra_flags         = CRYPTO_ALG_ASYNC,
-       .base.cra_blocksize     = 1,
-       .base.cra_ctxsize       = sizeof(struct stm32_cryp_ctx),
-       .base.cra_alignmask     = 0,
-       .base.cra_module        = THIS_MODULE,
-
-       .init                   = stm32_cryp_init_tfm,
-       .min_keysize            = AES_MIN_KEY_SIZE,
-       .max_keysize            = AES_MAX_KEY_SIZE,
-       .ivsize                 = AES_BLOCK_SIZE,
-       .setkey                 = stm32_cryp_aes_setkey,
-       .encrypt                = stm32_cryp_aes_ctr_encrypt,
-       .decrypt                = stm32_cryp_aes_ctr_decrypt,
+       .base = {
+               .base.cra_name          = "ctr(aes)",
+               .base.cra_driver_name   = "stm32-ctr-aes",
+               .base.cra_priority      = 200,
+               .base.cra_flags         = CRYPTO_ALG_ASYNC,
+               .base.cra_blocksize     = 1,
+               .base.cra_ctxsize       = sizeof(struct stm32_cryp_ctx),
+               .base.cra_alignmask     = 0,
+               .base.cra_module        = THIS_MODULE,
+
+               .init                   = stm32_cryp_init_tfm,
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+               .ivsize                 = AES_BLOCK_SIZE,
+               .setkey                 = stm32_cryp_aes_setkey,
+               .encrypt                = stm32_cryp_aes_ctr_encrypt,
+               .decrypt                = stm32_cryp_aes_ctr_decrypt,
+       },
+       .op = {
+               .do_one_request = stm32_cryp_cipher_one_req,
+       },
 },
 {
-       .base.cra_name          = "ecb(des)",
-       .base.cra_driver_name   = "stm32-ecb-des",
-       .base.cra_priority      = 200,
-       .base.cra_flags         = CRYPTO_ALG_ASYNC,
-       .base.cra_blocksize     = DES_BLOCK_SIZE,
-       .base.cra_ctxsize       = sizeof(struct stm32_cryp_ctx),
-       .base.cra_alignmask     = 0,
-       .base.cra_module        = THIS_MODULE,
-
-       .init                   = stm32_cryp_init_tfm,
-       .min_keysize            = DES_BLOCK_SIZE,
-       .max_keysize            = DES_BLOCK_SIZE,
-       .setkey                 = stm32_cryp_des_setkey,
-       .encrypt                = stm32_cryp_des_ecb_encrypt,
-       .decrypt                = stm32_cryp_des_ecb_decrypt,
+       .base = {
+               .base.cra_name          = "ecb(des)",
+               .base.cra_driver_name   = "stm32-ecb-des",
+               .base.cra_priority      = 200,
+               .base.cra_flags         = CRYPTO_ALG_ASYNC,
+               .base.cra_blocksize     = DES_BLOCK_SIZE,
+               .base.cra_ctxsize       = sizeof(struct stm32_cryp_ctx),
+               .base.cra_alignmask     = 0,
+               .base.cra_module        = THIS_MODULE,
+
+               .init                   = stm32_cryp_init_tfm,
+               .min_keysize            = DES_BLOCK_SIZE,
+               .max_keysize            = DES_BLOCK_SIZE,
+               .setkey                 = stm32_cryp_des_setkey,
+               .encrypt                = stm32_cryp_des_ecb_encrypt,
+               .decrypt                = stm32_cryp_des_ecb_decrypt,
+       },
+       .op = {
+               .do_one_request = stm32_cryp_cipher_one_req,
+       },
 },
 {
-       .base.cra_name          = "cbc(des)",
-       .base.cra_driver_name   = "stm32-cbc-des",
-       .base.cra_priority      = 200,
-       .base.cra_flags         = CRYPTO_ALG_ASYNC,
-       .base.cra_blocksize     = DES_BLOCK_SIZE,
-       .base.cra_ctxsize       = sizeof(struct stm32_cryp_ctx),
-       .base.cra_alignmask     = 0,
-       .base.cra_module        = THIS_MODULE,
-
-       .init                   = stm32_cryp_init_tfm,
-       .min_keysize            = DES_BLOCK_SIZE,
-       .max_keysize            = DES_BLOCK_SIZE,
-       .ivsize                 = DES_BLOCK_SIZE,
-       .setkey                 = stm32_cryp_des_setkey,
-       .encrypt                = stm32_cryp_des_cbc_encrypt,
-       .decrypt                = stm32_cryp_des_cbc_decrypt,
+       .base = {
+               .base.cra_name          = "cbc(des)",
+               .base.cra_driver_name   = "stm32-cbc-des",
+               .base.cra_priority      = 200,
+               .base.cra_flags         = CRYPTO_ALG_ASYNC,
+               .base.cra_blocksize     = DES_BLOCK_SIZE,
+               .base.cra_ctxsize       = sizeof(struct stm32_cryp_ctx),
+               .base.cra_alignmask     = 0,
+               .base.cra_module        = THIS_MODULE,
+
+               .init                   = stm32_cryp_init_tfm,
+               .min_keysize            = DES_BLOCK_SIZE,
+               .max_keysize            = DES_BLOCK_SIZE,
+               .ivsize                 = DES_BLOCK_SIZE,
+               .setkey                 = stm32_cryp_des_setkey,
+               .encrypt                = stm32_cryp_des_cbc_encrypt,
+               .decrypt                = stm32_cryp_des_cbc_decrypt,
+       },
+       .op = {
+               .do_one_request = stm32_cryp_cipher_one_req,
+       },
 },
 {
-       .base.cra_name          = "ecb(des3_ede)",
-       .base.cra_driver_name   = "stm32-ecb-des3",
-       .base.cra_priority      = 200,
-       .base.cra_flags         = CRYPTO_ALG_ASYNC,
-       .base.cra_blocksize     = DES_BLOCK_SIZE,
-       .base.cra_ctxsize       = sizeof(struct stm32_cryp_ctx),
-       .base.cra_alignmask     = 0,
-       .base.cra_module        = THIS_MODULE,
-
-       .init                   = stm32_cryp_init_tfm,
-       .min_keysize            = 3 * DES_BLOCK_SIZE,
-       .max_keysize            = 3 * DES_BLOCK_SIZE,
-       .setkey                 = stm32_cryp_tdes_setkey,
-       .encrypt                = stm32_cryp_tdes_ecb_encrypt,
-       .decrypt                = stm32_cryp_tdes_ecb_decrypt,
+       .base = {
+               .base.cra_name          = "ecb(des3_ede)",
+               .base.cra_driver_name   = "stm32-ecb-des3",
+               .base.cra_priority      = 200,
+               .base.cra_flags         = CRYPTO_ALG_ASYNC,
+               .base.cra_blocksize     = DES_BLOCK_SIZE,
+               .base.cra_ctxsize       = sizeof(struct stm32_cryp_ctx),
+               .base.cra_alignmask     = 0,
+               .base.cra_module        = THIS_MODULE,
+
+               .init                   = stm32_cryp_init_tfm,
+               .min_keysize            = 3 * DES_BLOCK_SIZE,
+               .max_keysize            = 3 * DES_BLOCK_SIZE,
+               .setkey                 = stm32_cryp_tdes_setkey,
+               .encrypt                = stm32_cryp_tdes_ecb_encrypt,
+               .decrypt                = stm32_cryp_tdes_ecb_decrypt,
+       },
+       .op = {
+               .do_one_request = stm32_cryp_cipher_one_req,
+       },
 },
 {
-       .base.cra_name          = "cbc(des3_ede)",
-       .base.cra_driver_name   = "stm32-cbc-des3",
-       .base.cra_priority      = 200,
-       .base.cra_flags         = CRYPTO_ALG_ASYNC,
-       .base.cra_blocksize     = DES_BLOCK_SIZE,
-       .base.cra_ctxsize       = sizeof(struct stm32_cryp_ctx),
-       .base.cra_alignmask     = 0,
-       .base.cra_module        = THIS_MODULE,
-
-       .init                   = stm32_cryp_init_tfm,
-       .min_keysize            = 3 * DES_BLOCK_SIZE,
-       .max_keysize            = 3 * DES_BLOCK_SIZE,
-       .ivsize                 = DES_BLOCK_SIZE,
-       .setkey                 = stm32_cryp_tdes_setkey,
-       .encrypt                = stm32_cryp_tdes_cbc_encrypt,
-       .decrypt                = stm32_cryp_tdes_cbc_decrypt,
+       .base = {
+               .base.cra_name          = "cbc(des3_ede)",
+               .base.cra_driver_name   = "stm32-cbc-des3",
+               .base.cra_priority      = 200,
+               .base.cra_flags         = CRYPTO_ALG_ASYNC,
+               .base.cra_blocksize     = DES_BLOCK_SIZE,
+               .base.cra_ctxsize       = sizeof(struct stm32_cryp_ctx),
+               .base.cra_alignmask     = 0,
+               .base.cra_module        = THIS_MODULE,
+
+               .init                   = stm32_cryp_init_tfm,
+               .min_keysize            = 3 * DES_BLOCK_SIZE,
+               .max_keysize            = 3 * DES_BLOCK_SIZE,
+               .ivsize                 = DES_BLOCK_SIZE,
+               .setkey                 = stm32_cryp_tdes_setkey,
+               .encrypt                = stm32_cryp_tdes_cbc_encrypt,
+               .decrypt                = stm32_cryp_tdes_cbc_decrypt,
+       },
+       .op = {
+               .do_one_request = stm32_cryp_cipher_one_req,
+       },
 },
 };
 
-static struct aead_alg aead_algs[] = {
+static struct aead_engine_alg aead_algs[] = {
 {
-       .setkey         = stm32_cryp_aes_aead_setkey,
-       .setauthsize    = stm32_cryp_aes_gcm_setauthsize,
-       .encrypt        = stm32_cryp_aes_gcm_encrypt,
-       .decrypt        = stm32_cryp_aes_gcm_decrypt,
-       .init           = stm32_cryp_aes_aead_init,
-       .ivsize         = 12,
-       .maxauthsize    = AES_BLOCK_SIZE,
+       .base.setkey            = stm32_cryp_aes_aead_setkey,
+       .base.setauthsize       = stm32_cryp_aes_gcm_setauthsize,
+       .base.encrypt           = stm32_cryp_aes_gcm_encrypt,
+       .base.decrypt           = stm32_cryp_aes_gcm_decrypt,
+       .base.init              = stm32_cryp_aes_aead_init,
+       .base.ivsize            = 12,
+       .base.maxauthsize       = AES_BLOCK_SIZE,
 
-       .base = {
+       .base.base = {
                .cra_name               = "gcm(aes)",
                .cra_driver_name        = "stm32-gcm-aes",
                .cra_priority           = 200,
@@ -1855,17 +1861,20 @@ static struct aead_alg aead_algs[] = {
                .cra_alignmask          = 0,
                .cra_module             = THIS_MODULE,
        },
+       .op = {
+               .do_one_request = stm32_cryp_aead_one_req,
+       },
 },
 {
-       .setkey         = stm32_cryp_aes_aead_setkey,
-       .setauthsize    = stm32_cryp_aes_ccm_setauthsize,
-       .encrypt        = stm32_cryp_aes_ccm_encrypt,
-       .decrypt        = stm32_cryp_aes_ccm_decrypt,
-       .init           = stm32_cryp_aes_aead_init,
-       .ivsize         = AES_BLOCK_SIZE,
-       .maxauthsize    = AES_BLOCK_SIZE,
+       .base.setkey            = stm32_cryp_aes_aead_setkey,
+       .base.setauthsize       = stm32_cryp_aes_ccm_setauthsize,
+       .base.encrypt           = stm32_cryp_aes_ccm_encrypt,
+       .base.decrypt           = stm32_cryp_aes_ccm_decrypt,
+       .base.init              = stm32_cryp_aes_aead_init,
+       .base.ivsize            = AES_BLOCK_SIZE,
+       .base.maxauthsize       = AES_BLOCK_SIZE,
 
-       .base = {
+       .base.base = {
                .cra_name               = "ccm(aes)",
                .cra_driver_name        = "stm32-ccm-aes",
                .cra_priority           = 200,
@@ -1875,6 +1884,9 @@ static struct aead_alg aead_algs[] = {
                .cra_alignmask          = 0,
                .cra_module             = THIS_MODULE,
        },
+       .op = {
+               .do_one_request = stm32_cryp_aead_one_req,
+       },
 },
 };
 
@@ -2036,14 +2048,14 @@ static int stm32_cryp_probe(struct platform_device *pdev)
                goto err_engine2;
        }
 
-       ret = crypto_register_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
+       ret = crypto_engine_register_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
        if (ret) {
                dev_err(dev, "Could not register algs\n");
                goto err_algs;
        }
 
        if (cryp->caps->aeads_support) {
-               ret = crypto_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+               ret = crypto_engine_register_aeads(aead_algs, ARRAY_SIZE(aead_algs));
                if (ret)
                        goto err_aead_algs;
        }
@@ -2055,7 +2067,7 @@ static int stm32_cryp_probe(struct platform_device *pdev)
        return 0;
 
 err_aead_algs:
-       crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
+       crypto_engine_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
 err_algs:
 err_engine2:
        crypto_engine_exit(cryp->engine);
@@ -2085,8 +2097,8 @@ static int stm32_cryp_remove(struct platform_device *pdev)
                return ret;
 
        if (cryp->caps->aeads_support)
-               crypto_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
-       crypto_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
+               crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
+       crypto_engine_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs));
 
        crypto_engine_exit(cryp->engine);
 
index f0df323..2b2382d 100644 (file)
@@ -6,27 +6,26 @@
  * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
  */
 
+#include <crypto/engine.h>
+#include <crypto/internal/hash.h>
+#include <crypto/md5.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha1.h>
+#include <crypto/sha2.h>
+#include <crypto/sha3.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
 #include <linux/interrupt.h>
-#include <linux/io.h>
 #include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
-
-#include <crypto/engine.h>
-#include <crypto/hash.h>
-#include <crypto/md5.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/internal/hash.h>
+#include <linux/string.h>
 
 #define HASH_CR                                0x00
 #define HASH_DIN                       0x04
 #define HASH_CR_DMAE                   BIT(3)
 #define HASH_CR_DATATYPE_POS           4
 #define HASH_CR_MODE                   BIT(6)
+#define HASH_CR_ALGO_POS               7
 #define HASH_CR_MDMAT                  BIT(13)
 #define HASH_CR_DMAA                   BIT(14)
 #define HASH_CR_LKEY                   BIT(16)
 
-#define HASH_CR_ALGO_SHA1              0x0
-#define HASH_CR_ALGO_MD5               0x80
-#define HASH_CR_ALGO_SHA224            0x40000
-#define HASH_CR_ALGO_SHA256            0x40080
-
-#define HASH_CR_UX500_EMPTYMSG         BIT(20)
-#define HASH_CR_UX500_ALGO_SHA1                BIT(7)
-#define HASH_CR_UX500_ALGO_SHA256      0x0
-
 /* Interrupt */
 #define HASH_DINIE                     BIT(0)
 #define HASH_DCIE                      BIT(1)
@@ -66,9 +57,6 @@
 #define HASH_MASK_CALC_COMPLETION      BIT(0)
 #define HASH_MASK_DATA_INPUT           BIT(1)
 
-/* Context swap register */
-#define HASH_CSR_REGISTER_NUMBER       54
-
 /* Status Flags */
 #define HASH_SR_DATA_INPUT_READY       BIT(0)
 #define HASH_SR_OUTPUT_READY           BIT(1)
 #define HASH_STR_NBLW_MASK             GENMASK(4, 0)
 #define HASH_STR_DCAL                  BIT(8)
 
+/* HWCFGR Register */
+#define HASH_HWCFG_DMA_MASK            GENMASK(3, 0)
+
+/* Context swap register */
+#define HASH_CSR_NB_SHA256_HMAC                54
+#define HASH_CSR_NB_SHA256             38
+#define HASH_CSR_NB_SHA512_HMAC                103
+#define HASH_CSR_NB_SHA512             91
+#define HASH_CSR_NB_SHA3_HMAC          88
+#define HASH_CSR_NB_SHA3               72
+#define HASH_CSR_NB_MAX                        HASH_CSR_NB_SHA512_HMAC
+
 #define HASH_FLAGS_INIT                        BIT(0)
 #define HASH_FLAGS_OUTPUT_READY                BIT(1)
 #define HASH_FLAGS_CPU                 BIT(2)
-#define HASH_FLAGS_DMA_READY           BIT(3)
-#define HASH_FLAGS_DMA_ACTIVE          BIT(4)
-#define HASH_FLAGS_HMAC_INIT           BIT(5)
-#define HASH_FLAGS_HMAC_FINAL          BIT(6)
-#define HASH_FLAGS_HMAC_KEY            BIT(7)
-
+#define HASH_FLAGS_DMA_ACTIVE          BIT(3)
+#define HASH_FLAGS_HMAC_INIT           BIT(4)
+#define HASH_FLAGS_HMAC_FINAL          BIT(5)
+#define HASH_FLAGS_HMAC_KEY            BIT(6)
+#define HASH_FLAGS_SHA3_MODE           BIT(7)
 #define HASH_FLAGS_FINAL               BIT(15)
 #define HASH_FLAGS_FINUP               BIT(16)
-#define HASH_FLAGS_ALGO_MASK           GENMASK(21, 18)
-#define HASH_FLAGS_MD5                 BIT(18)
-#define HASH_FLAGS_SHA1                        BIT(19)
-#define HASH_FLAGS_SHA224              BIT(20)
-#define HASH_FLAGS_SHA256              BIT(21)
+#define HASH_FLAGS_ALGO_MASK           GENMASK(20, 17)
+#define HASH_FLAGS_ALGO_SHIFT          17
+#define HASH_FLAGS_ERRORS              BIT(21)
 #define HASH_FLAGS_EMPTY               BIT(22)
 #define HASH_FLAGS_HMAC                        BIT(23)
 
 #define HASH_OP_UPDATE                 1
 #define HASH_OP_FINAL                  2
 
+#define HASH_BURST_LEVEL               4
+
 enum stm32_hash_data_format {
        HASH_DATA_32_BITS               = 0x0,
        HASH_DATA_16_BITS               = 0x1,
@@ -108,16 +107,30 @@ enum stm32_hash_data_format {
        HASH_DATA_1_BIT                 = 0x3
 };
 
-#define HASH_BUFLEN                    256
-#define HASH_LONG_KEY                  64
-#define HASH_MAX_KEY_SIZE              (SHA256_BLOCK_SIZE * 8)
-#define HASH_QUEUE_LENGTH              16
-#define HASH_DMA_THRESHOLD             50
+#define HASH_BUFLEN                    (SHA3_224_BLOCK_SIZE + 4)
+#define HASH_MAX_KEY_SIZE              (SHA512_BLOCK_SIZE * 8)
+
+enum stm32_hash_algo {
+       HASH_SHA1                       = 0,
+       HASH_MD5                        = 1,
+       HASH_SHA224                     = 2,
+       HASH_SHA256                     = 3,
+       HASH_SHA3_224                   = 4,
+       HASH_SHA3_256                   = 5,
+       HASH_SHA3_384                   = 6,
+       HASH_SHA3_512                   = 7,
+       HASH_SHA384                     = 12,
+       HASH_SHA512                     = 15,
+};
+
+enum ux500_hash_algo {
+       HASH_SHA256_UX500               = 0,
+       HASH_SHA1_UX500                 = 1,
+};
 
 #define HASH_AUTOSUSPEND_DELAY         50
 
 struct stm32_hash_ctx {
-       struct crypto_engine_ctx enginectx;
        struct stm32_hash_dev   *hdev;
        struct crypto_shash     *xtfm;
        unsigned long           flags;
@@ -130,19 +143,19 @@ struct stm32_hash_state {
        u32                     flags;
 
        u16                     bufcnt;
-       u16                     buflen;
+       u16                     blocklen;
 
        u8 buffer[HASH_BUFLEN] __aligned(4);
 
        /* hash state */
-       u32                     hw_context[3 + HASH_CSR_REGISTER_NUMBER];
+       u32                     hw_context[3 + HASH_CSR_NB_MAX];
 };
 
 struct stm32_hash_request_ctx {
        struct stm32_hash_dev   *hdev;
        unsigned long           op;
 
-       u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
+       u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
        size_t                  digcnt;
 
        /* DMA */
@@ -161,17 +174,18 @@ struct stm32_hash_request_ctx {
 };
 
 struct stm32_hash_algs_info {
-       struct ahash_alg        *algs_list;
+       struct ahash_engine_alg *algs_list;
        size_t                  size;
 };
 
 struct stm32_hash_pdata {
-       struct stm32_hash_algs_info     *algs_info;
-       size_t                          algs_info_size;
-       bool                            has_sr;
-       bool                            has_mdmat;
-       bool                            broken_emptymsg;
-       bool                            ux500;
+       const int                               alg_shift;
+       const struct stm32_hash_algs_info       *algs_info;
+       size_t                                  algs_info_size;
+       bool                                    has_sr;
+       bool                                    has_mdmat;
+       bool                                    broken_emptymsg;
+       bool                                    ux500;
 };
 
 struct stm32_hash_dev {
@@ -182,7 +196,6 @@ struct stm32_hash_dev {
        void __iomem            *io_base;
        phys_addr_t             phys_base;
        u32                     dma_mode;
-       u32                     dma_maxburst;
        bool                    polled;
 
        struct ahash_request    *req;
@@ -269,37 +282,25 @@ static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
        return 0;
 }
 
-static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
+static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
 {
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
        struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
        struct stm32_hash_state *state = &rctx->state;
+       u32 alg = (state->flags & HASH_FLAGS_ALGO_MASK) >> HASH_FLAGS_ALGO_SHIFT;
 
        u32 reg = HASH_CR_INIT;
 
        if (!(hdev->flags & HASH_FLAGS_INIT)) {
-               switch (state->flags & HASH_FLAGS_ALGO_MASK) {
-               case HASH_FLAGS_MD5:
-                       reg |= HASH_CR_ALGO_MD5;
-                       break;
-               case HASH_FLAGS_SHA1:
-                       if (hdev->pdata->ux500)
-                               reg |= HASH_CR_UX500_ALGO_SHA1;
+               if (hdev->pdata->ux500) {
+                       reg |= ((alg & BIT(0)) << HASH_CR_ALGO_POS);
+               } else {
+                       if (hdev->pdata->alg_shift == HASH_CR_ALGO_POS)
+                               reg |= ((alg & BIT(1)) << 17) |
+                                      ((alg & BIT(0)) << HASH_CR_ALGO_POS);
                        else
-                               reg |= HASH_CR_ALGO_SHA1;
-                       break;
-               case HASH_FLAGS_SHA224:
-                       reg |= HASH_CR_ALGO_SHA224;
-                       break;
-               case HASH_FLAGS_SHA256:
-                       if (hdev->pdata->ux500)
-                               reg |= HASH_CR_UX500_ALGO_SHA256;
-                       else
-                               reg |= HASH_CR_ALGO_SHA256;
-                       break;
-               default:
-                       reg |= HASH_CR_ALGO_MD5;
+                               reg |= alg << hdev->pdata->alg_shift;
                }
 
                reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
@@ -307,7 +308,7 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
                if (state->flags & HASH_FLAGS_HMAC) {
                        hdev->flags |= HASH_FLAGS_HMAC;
                        reg |= HASH_CR_MODE;
-                       if (ctx->keylen > HASH_LONG_KEY)
+                       if (ctx->keylen > crypto_ahash_blocksize(tfm))
                                reg |= HASH_CR_LKEY;
                }
 
@@ -318,6 +319,12 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
 
                hdev->flags |= HASH_FLAGS_INIT;
 
+               /*
+                * After first block + 1 words are fill up,
+                * we only need to fill 1 block to start partial computation
+                */
+               rctx->state.blocklen -= sizeof(u32);
+
                dev_dbg(hdev->dev, "Write Control %x\n", reg);
        }
 }
@@ -327,9 +334,9 @@ static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
        struct stm32_hash_state *state = &rctx->state;
        size_t count;
 
-       while ((state->bufcnt < state->buflen) && rctx->total) {
+       while ((state->bufcnt < state->blocklen) && rctx->total) {
                count = min(rctx->sg->length - rctx->offset, rctx->total);
-               count = min_t(size_t, count, state->buflen - state->bufcnt);
+               count = min_t(size_t, count, state->blocklen - state->bufcnt);
 
                if (count <= 0) {
                        if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
@@ -384,7 +391,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
 
        hdev->flags |= HASH_FLAGS_CPU;
 
-       stm32_hash_write_ctrl(hdev, length);
+       stm32_hash_write_ctrl(hdev);
 
        if (stm32_hash_wait_busy(hdev))
                return -ETIMEDOUT;
@@ -419,20 +426,59 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
        return 0;
 }
 
+static int hash_swap_reg(struct stm32_hash_request_ctx *rctx)
+{
+       struct stm32_hash_state *state = &rctx->state;
+
+       switch ((state->flags & HASH_FLAGS_ALGO_MASK) >>
+               HASH_FLAGS_ALGO_SHIFT) {
+       case HASH_MD5:
+       case HASH_SHA1:
+       case HASH_SHA224:
+       case HASH_SHA256:
+               if (state->flags & HASH_FLAGS_HMAC)
+                       return HASH_CSR_NB_SHA256_HMAC;
+               else
+                       return HASH_CSR_NB_SHA256;
+               break;
+
+       case HASH_SHA384:
+       case HASH_SHA512:
+               if (state->flags & HASH_FLAGS_HMAC)
+                       return HASH_CSR_NB_SHA512_HMAC;
+               else
+                       return HASH_CSR_NB_SHA512;
+               break;
+
+       case HASH_SHA3_224:
+       case HASH_SHA3_256:
+       case HASH_SHA3_384:
+       case HASH_SHA3_512:
+               if (state->flags & HASH_FLAGS_HMAC)
+                       return HASH_CSR_NB_SHA3_HMAC;
+               else
+                       return HASH_CSR_NB_SHA3;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+}
+
 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
 {
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
        struct stm32_hash_state *state = &rctx->state;
        u32 *preg = state->hw_context;
        int bufcnt, err = 0, final;
-       int i;
+       int i, swap_reg;
 
        dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
 
        final = state->flags & HASH_FLAGS_FINAL;
 
-       while ((rctx->total >= state->buflen) ||
-              (state->bufcnt + rctx->total >= state->buflen)) {
+       while ((rctx->total >= state->blocklen) ||
+              (state->bufcnt + rctx->total >= state->blocklen)) {
                stm32_hash_append_sg(rctx);
                bufcnt = state->bufcnt;
                state->bufcnt = 0;
@@ -455,11 +501,13 @@ static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
        if (stm32_hash_wait_busy(hdev))
                return -ETIMEDOUT;
 
+       swap_reg = hash_swap_reg(rctx);
+
        if (!hdev->pdata->ux500)
                *preg++ = stm32_hash_read(hdev, HASH_IMR);
        *preg++ = stm32_hash_read(hdev, HASH_STR);
        *preg++ = stm32_hash_read(hdev, HASH_CR);
-       for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+       for (i = 0; i < swap_reg; i++)
                *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
 
        state->flags |= HASH_FLAGS_INIT;
@@ -492,7 +540,7 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
 
        reg = stm32_hash_read(hdev, HASH_CR);
 
-       if (!hdev->pdata->has_mdmat) {
+       if (hdev->pdata->has_mdmat) {
                if (mdma)
                        reg |= HASH_CR_MDMAT;
                else
@@ -533,8 +581,6 @@ static void stm32_hash_dma_callback(void *param)
        struct stm32_hash_dev *hdev = param;
 
        complete(&hdev->dma_completion);
-
-       hdev->flags |= HASH_FLAGS_DMA_READY;
 }
 
 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
@@ -544,7 +590,7 @@ static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
        struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
        int err;
 
-       if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
+       if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode == 1) {
                err = stm32_hash_write_key(hdev);
                if (stm32_hash_wait_busy(hdev))
                        return -ETIMEDOUT;
@@ -579,8 +625,8 @@ static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
        dma_conf.direction = DMA_MEM_TO_DEV;
        dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
        dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-       dma_conf.src_maxburst = hdev->dma_maxburst;
-       dma_conf.dst_maxburst = hdev->dma_maxburst;
+       dma_conf.src_maxburst = HASH_BURST_LEVEL;
+       dma_conf.dst_maxburst = HASH_BURST_LEVEL;
        dma_conf.device_fc = false;
 
        chan = dma_request_chan(hdev->dev, "in");
@@ -607,18 +653,18 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
        u32 *buffer = (void *)rctx->state.buffer;
        struct scatterlist sg[1], *tsg;
-       int err = 0, len = 0, reg, ncp = 0;
-       unsigned int i;
+       int err = 0, reg, ncp = 0;
+       unsigned int i, len = 0, bufcnt = 0;
+       bool is_last = false;
 
        rctx->sg = hdev->req->src;
        rctx->total = hdev->req->nbytes;
 
        rctx->nents = sg_nents(rctx->sg);
-
        if (rctx->nents < 0)
                return -EINVAL;
 
-       stm32_hash_write_ctrl(hdev, rctx->total);
+       stm32_hash_write_ctrl(hdev);
 
        if (hdev->flags & HASH_FLAGS_HMAC) {
                err = stm32_hash_hmac_dma_send(hdev);
@@ -627,10 +673,12 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
        }
 
        for_each_sg(rctx->sg, tsg, rctx->nents, i) {
+               sg[0] = *tsg;
                len = sg->length;
 
-               sg[0] = *tsg;
-               if (sg_is_last(sg)) {
+               if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) {
+                       sg->length = rctx->total - bufcnt;
+                       is_last = true;
                        if (hdev->dma_mode == 1) {
                                len = (ALIGN(sg->length, 16) - 16);
 
@@ -656,13 +704,15 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
                        return -ENOMEM;
                }
 
-               err = stm32_hash_xmit_dma(hdev, sg, len,
-                                         !sg_is_last(sg));
+               err = stm32_hash_xmit_dma(hdev, sg, len, !is_last);
 
+               bufcnt += sg[0].length;
                dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
 
                if (err == -ENOMEM)
                        return err;
+               if (is_last)
+                       break;
        }
 
        if (hdev->dma_mode == 1) {
@@ -718,11 +768,12 @@ static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
 {
        struct scatterlist *sg;
+       struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
        struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
        struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
        int i;
 
-       if (req->nbytes <= HASH_DMA_THRESHOLD)
+       if (!hdev->dma_lch || req->nbytes <= rctx->state.blocklen)
                return false;
 
        if (sg_nents(req->src) > 1) {
@@ -748,31 +799,64 @@ static int stm32_hash_init(struct ahash_request *req)
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
        struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
        struct stm32_hash_state *state = &rctx->state;
+       bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE;
 
        rctx->hdev = hdev;
 
        state->flags = HASH_FLAGS_CPU;
 
+       if (sha3_mode)
+               state->flags |= HASH_FLAGS_SHA3_MODE;
+
        rctx->digcnt = crypto_ahash_digestsize(tfm);
        switch (rctx->digcnt) {
        case MD5_DIGEST_SIZE:
-               state->flags |= HASH_FLAGS_MD5;
+               state->flags |= HASH_MD5 << HASH_FLAGS_ALGO_SHIFT;
                break;
        case SHA1_DIGEST_SIZE:
-               state->flags |= HASH_FLAGS_SHA1;
+               if (hdev->pdata->ux500)
+                       state->flags |= HASH_SHA1_UX500 << HASH_FLAGS_ALGO_SHIFT;
+               else
+                       state->flags |= HASH_SHA1 << HASH_FLAGS_ALGO_SHIFT;
                break;
        case SHA224_DIGEST_SIZE:
-               state->flags |= HASH_FLAGS_SHA224;
+               if (sha3_mode)
+                       state->flags |= HASH_SHA3_224 << HASH_FLAGS_ALGO_SHIFT;
+               else
+                       state->flags |= HASH_SHA224 << HASH_FLAGS_ALGO_SHIFT;
                break;
        case SHA256_DIGEST_SIZE:
-               state->flags |= HASH_FLAGS_SHA256;
+               if (sha3_mode) {
+                       state->flags |= HASH_SHA3_256 << HASH_FLAGS_ALGO_SHIFT;
+               } else {
+                       if (hdev->pdata->ux500)
+                               state->flags |= HASH_SHA256_UX500 << HASH_FLAGS_ALGO_SHIFT;
+                       else
+                               state->flags |= HASH_SHA256 << HASH_FLAGS_ALGO_SHIFT;
+               }
+               break;
+       case SHA384_DIGEST_SIZE:
+               if (sha3_mode)
+                       state->flags |= HASH_SHA3_384 << HASH_FLAGS_ALGO_SHIFT;
+               else
+                       state->flags |= HASH_SHA384 << HASH_FLAGS_ALGO_SHIFT;
+               break;
+       case SHA512_DIGEST_SIZE:
+               if (sha3_mode)
+                       state->flags |= HASH_SHA3_512 << HASH_FLAGS_ALGO_SHIFT;
+               else
+                       state->flags |= HASH_SHA512 << HASH_FLAGS_ALGO_SHIFT;
                break;
        default:
                return -EINVAL;
        }
 
        rctx->state.bufcnt = 0;
-       rctx->state.buflen = HASH_BUFLEN;
+       rctx->state.blocklen = crypto_ahash_blocksize(tfm) + sizeof(u32);
+       if (rctx->state.blocklen > HASH_BUFLEN) {
+               dev_err(hdev->dev, "Error, block too large");
+               return -EINVAL;
+       }
        rctx->total = 0;
        rctx->offset = 0;
        rctx->data_type = HASH_DATA_8_BITS;
@@ -842,6 +926,7 @@ static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
 
 static void stm32_hash_copy_hash(struct ahash_request *req)
 {
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
        struct stm32_hash_state *state = &rctx->state;
        struct stm32_hash_dev *hdev = rctx->hdev;
@@ -851,22 +936,7 @@ static void stm32_hash_copy_hash(struct ahash_request *req)
        if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY))
                return stm32_hash_emptymsg_fallback(req);
 
-       switch (state->flags & HASH_FLAGS_ALGO_MASK) {
-       case HASH_FLAGS_MD5:
-               hashsize = MD5_DIGEST_SIZE;
-               break;
-       case HASH_FLAGS_SHA1:
-               hashsize = SHA1_DIGEST_SIZE;
-               break;
-       case HASH_FLAGS_SHA224:
-               hashsize = SHA224_DIGEST_SIZE;
-               break;
-       case HASH_FLAGS_SHA256:
-               hashsize = SHA256_DIGEST_SIZE;
-               break;
-       default:
-               return;
-       }
+       hashsize = crypto_ahash_digestsize(tfm);
 
        for (i = 0; i < hashsize / sizeof(u32); i++) {
                if (hdev->pdata->ux500)
@@ -881,6 +951,11 @@ static void stm32_hash_copy_hash(struct ahash_request *req)
 static int stm32_hash_finish(struct ahash_request *req)
 {
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+       u32 reg;
+
+       reg = stm32_hash_read(rctx->hdev, HASH_SR);
+       reg &= ~HASH_SR_OUTPUT_READY;
+       stm32_hash_write(rctx->hdev, HASH_SR, reg);
 
        if (!req->result)
                return -EINVAL;
@@ -920,6 +995,7 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
        struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
        struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
        struct stm32_hash_state *state = &rctx->state;
+       int swap_reg;
        int err = 0;
 
        if (!hdev)
@@ -932,6 +1008,7 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
 
        hdev->req = req;
        hdev->flags = 0;
+       swap_reg = hash_swap_reg(rctx);
 
        if (state->flags & HASH_FLAGS_INIT) {
                u32 *preg = rctx->state.hw_context;
@@ -945,7 +1022,7 @@ static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
                reg = *preg++ | HASH_CR_INIT;
                stm32_hash_write(hdev, HASH_CR, reg);
 
-               for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
+               for (i = 0; i < swap_reg; i++)
                        stm32_hash_write(hdev, HASH_CSR(i), *preg++);
 
                hdev->flags |= HASH_FLAGS_INIT;
@@ -1000,7 +1077,7 @@ static int stm32_hash_update(struct ahash_request *req)
        rctx->sg = req->src;
        rctx->offset = 0;
 
-       if ((state->bufcnt + rctx->total < state->buflen)) {
+       if ((state->bufcnt + rctx->total < state->blocklen)) {
                stm32_hash_append_sg(rctx);
                return 0;
        }
@@ -1102,8 +1179,7 @@ static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
        return 0;
 }
 
-static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
-                                   const char *algs_hmac_name)
+static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, u32 algs_flags)
 {
        struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 
@@ -1112,38 +1188,33 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
 
        ctx->keylen = 0;
 
-       if (algs_hmac_name)
-               ctx->flags |= HASH_FLAGS_HMAC;
-
-       ctx->enginectx.op.do_one_request = stm32_hash_one_request;
+       if (algs_flags)
+               ctx->flags |= algs_flags;
 
        return stm32_hash_init_fallback(tfm);
 }
 
 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
 {
-       return stm32_hash_cra_init_algs(tfm, NULL);
+       return stm32_hash_cra_init_algs(tfm, 0);
 }
 
-static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
+static int stm32_hash_cra_hmac_init(struct crypto_tfm *tfm)
 {
-       return stm32_hash_cra_init_algs(tfm, "md5");
+       return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_HMAC);
 }
 
-static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
+static int stm32_hash_cra_sha3_init(struct crypto_tfm *tfm)
 {
-       return stm32_hash_cra_init_algs(tfm, "sha1");
+       return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE);
 }
 
-static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
+static int stm32_hash_cra_sha3_hmac_init(struct crypto_tfm *tfm)
 {
-       return stm32_hash_cra_init_algs(tfm, "sha224");
+       return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE |
+                                       HASH_FLAGS_HMAC);
 }
 
-static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
-{
-       return stm32_hash_cra_init_algs(tfm, "sha256");
-}
 
 static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
 {
@@ -1162,11 +1233,9 @@ static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
                        hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
                        goto finish;
                }
-       } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
-               if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
-                       hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
-                               goto finish;
-               }
+       } else if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
+               hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
+                       goto finish;
        }
 
        return IRQ_HANDLED;
@@ -1185,8 +1254,6 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
 
        reg = stm32_hash_read(hdev, HASH_SR);
        if (reg & HASH_SR_OUTPUT_READY) {
-               reg &= ~HASH_SR_OUTPUT_READY;
-               stm32_hash_write(hdev, HASH_SR, reg);
                hdev->flags |= HASH_FLAGS_OUTPUT_READY;
                /* Disable IT*/
                stm32_hash_write(hdev, HASH_IMR, 0);
@@ -1196,16 +1263,16 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
        return IRQ_NONE;
 }
 
-static struct ahash_alg algs_md5[] = {
+static struct ahash_engine_alg algs_md5[] = {
        {
-               .init = stm32_hash_init,
-               .update = stm32_hash_update,
-               .final = stm32_hash_final,
-               .finup = stm32_hash_finup,
-               .digest = stm32_hash_digest,
-               .export = stm32_hash_export,
-               .import = stm32_hash_import,
-               .halg = {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.halg = {
                        .digestsize = MD5_DIGEST_SIZE,
                        .statesize = sizeof(struct stm32_hash_state),
                        .base = {
@@ -1221,18 +1288,21 @@ static struct ahash_alg algs_md5[] = {
                                .cra_exit = stm32_hash_cra_exit,
                                .cra_module = THIS_MODULE,
                        }
-               }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
        },
        {
-               .init = stm32_hash_init,
-               .update = stm32_hash_update,
-               .final = stm32_hash_final,
-               .finup = stm32_hash_finup,
-               .digest = stm32_hash_digest,
-               .export = stm32_hash_export,
-               .import = stm32_hash_import,
-               .setkey = stm32_hash_setkey,
-               .halg = {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.setkey = stm32_hash_setkey,
+               .base.halg = {
                        .digestsize = MD5_DIGEST_SIZE,
                        .statesize = sizeof(struct stm32_hash_state),
                        .base = {
@@ -1244,24 +1314,27 @@ static struct ahash_alg algs_md5[] = {
                                .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct stm32_hash_ctx),
                                .cra_alignmask = 3,
-                               .cra_init = stm32_hash_cra_md5_init,
+                               .cra_init = stm32_hash_cra_hmac_init,
                                .cra_exit = stm32_hash_cra_exit,
                                .cra_module = THIS_MODULE,
                        }
-               }
-       },
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
+       }
 };
 
-static struct ahash_alg algs_sha1[] = {
+static struct ahash_engine_alg algs_sha1[] = {
        {
-               .init = stm32_hash_init,
-               .update = stm32_hash_update,
-               .final = stm32_hash_final,
-               .finup = stm32_hash_finup,
-               .digest = stm32_hash_digest,
-               .export = stm32_hash_export,
-               .import = stm32_hash_import,
-               .halg = {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.halg = {
                        .digestsize = SHA1_DIGEST_SIZE,
                        .statesize = sizeof(struct stm32_hash_state),
                        .base = {
@@ -1277,18 +1350,21 @@ static struct ahash_alg algs_sha1[] = {
                                .cra_exit = stm32_hash_cra_exit,
                                .cra_module = THIS_MODULE,
                        }
-               }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
        },
        {
-               .init = stm32_hash_init,
-               .update = stm32_hash_update,
-               .final = stm32_hash_final,
-               .finup = stm32_hash_finup,
-               .digest = stm32_hash_digest,
-               .export = stm32_hash_export,
-               .import = stm32_hash_import,
-               .setkey = stm32_hash_setkey,
-               .halg = {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.setkey = stm32_hash_setkey,
+               .base.halg = {
                        .digestsize = SHA1_DIGEST_SIZE,
                        .statesize = sizeof(struct stm32_hash_state),
                        .base = {
@@ -1300,24 +1376,27 @@ static struct ahash_alg algs_sha1[] = {
                                .cra_blocksize = SHA1_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct stm32_hash_ctx),
                                .cra_alignmask = 3,
-                               .cra_init = stm32_hash_cra_sha1_init,
+                               .cra_init = stm32_hash_cra_hmac_init,
                                .cra_exit = stm32_hash_cra_exit,
                                .cra_module = THIS_MODULE,
                        }
-               }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
        },
 };
 
-static struct ahash_alg algs_sha224[] = {
+static struct ahash_engine_alg algs_sha224[] = {
        {
-               .init = stm32_hash_init,
-               .update = stm32_hash_update,
-               .final = stm32_hash_final,
-               .finup = stm32_hash_finup,
-               .digest = stm32_hash_digest,
-               .export = stm32_hash_export,
-               .import = stm32_hash_import,
-               .halg = {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.halg = {
                        .digestsize = SHA224_DIGEST_SIZE,
                        .statesize = sizeof(struct stm32_hash_state),
                        .base = {
@@ -1333,18 +1412,21 @@ static struct ahash_alg algs_sha224[] = {
                                .cra_exit = stm32_hash_cra_exit,
                                .cra_module = THIS_MODULE,
                        }
-               }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
        },
        {
-               .init = stm32_hash_init,
-               .update = stm32_hash_update,
-               .final = stm32_hash_final,
-               .finup = stm32_hash_finup,
-               .digest = stm32_hash_digest,
-               .setkey = stm32_hash_setkey,
-               .export = stm32_hash_export,
-               .import = stm32_hash_import,
-               .halg = {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.setkey = stm32_hash_setkey,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.halg = {
                        .digestsize = SHA224_DIGEST_SIZE,
                        .statesize = sizeof(struct stm32_hash_state),
                        .base = {
@@ -1356,24 +1438,27 @@ static struct ahash_alg algs_sha224[] = {
                                .cra_blocksize = SHA224_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct stm32_hash_ctx),
                                .cra_alignmask = 3,
-                               .cra_init = stm32_hash_cra_sha224_init,
+                               .cra_init = stm32_hash_cra_hmac_init,
                                .cra_exit = stm32_hash_cra_exit,
                                .cra_module = THIS_MODULE,
                        }
-               }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
        },
 };
 
-static struct ahash_alg algs_sha256[] = {
+static struct ahash_engine_alg algs_sha256[] = {
        {
-               .init = stm32_hash_init,
-               .update = stm32_hash_update,
-               .final = stm32_hash_final,
-               .finup = stm32_hash_finup,
-               .digest = stm32_hash_digest,
-               .export = stm32_hash_export,
-               .import = stm32_hash_import,
-               .halg = {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.halg = {
                        .digestsize = SHA256_DIGEST_SIZE,
                        .statesize = sizeof(struct stm32_hash_state),
                        .base = {
@@ -1389,18 +1474,21 @@ static struct ahash_alg algs_sha256[] = {
                                .cra_exit = stm32_hash_cra_exit,
                                .cra_module = THIS_MODULE,
                        }
-               }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
        },
        {
-               .init = stm32_hash_init,
-               .update = stm32_hash_update,
-               .final = stm32_hash_final,
-               .finup = stm32_hash_finup,
-               .digest = stm32_hash_digest,
-               .export = stm32_hash_export,
-               .import = stm32_hash_import,
-               .setkey = stm32_hash_setkey,
-               .halg = {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.setkey = stm32_hash_setkey,
+               .base.halg = {
                        .digestsize = SHA256_DIGEST_SIZE,
                        .statesize = sizeof(struct stm32_hash_state),
                        .base = {
@@ -1412,14 +1500,377 @@ static struct ahash_alg algs_sha256[] = {
                                .cra_blocksize = SHA256_BLOCK_SIZE,
                                .cra_ctxsize = sizeof(struct stm32_hash_ctx),
                                .cra_alignmask = 3,
-                               .cra_init = stm32_hash_cra_sha256_init,
+                               .cra_init = stm32_hash_cra_hmac_init,
                                .cra_exit = stm32_hash_cra_exit,
                                .cra_module = THIS_MODULE,
                        }
-               }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
        },
 };
 
+static struct ahash_engine_alg algs_sha384_sha512[] = {
+       {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.halg = {
+                       .digestsize = SHA384_DIGEST_SIZE,
+                       .statesize = sizeof(struct stm32_hash_state),
+                       .base = {
+                               .cra_name = "sha384",
+                               .cra_driver_name = "stm32-sha384",
+                               .cra_priority = 200,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA384_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+                               .cra_alignmask = 3,
+                               .cra_init = stm32_hash_cra_init,
+                               .cra_exit = stm32_hash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
+       },
+       {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.setkey = stm32_hash_setkey,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.halg = {
+                       .digestsize = SHA384_DIGEST_SIZE,
+                       .statesize = sizeof(struct stm32_hash_state),
+                       .base = {
+                               .cra_name = "hmac(sha384)",
+                               .cra_driver_name = "stm32-hmac-sha384",
+                               .cra_priority = 200,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA384_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+                               .cra_alignmask = 3,
+                               .cra_init = stm32_hash_cra_hmac_init,
+                               .cra_exit = stm32_hash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
+       },
+       {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.halg = {
+                       .digestsize = SHA512_DIGEST_SIZE,
+                       .statesize = sizeof(struct stm32_hash_state),
+                       .base = {
+                               .cra_name = "sha512",
+                               .cra_driver_name = "stm32-sha512",
+                               .cra_priority = 200,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA512_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+                               .cra_alignmask = 3,
+                               .cra_init = stm32_hash_cra_init,
+                               .cra_exit = stm32_hash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
+       },
+       {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.setkey = stm32_hash_setkey,
+               .base.halg = {
+                       .digestsize = SHA512_DIGEST_SIZE,
+                       .statesize = sizeof(struct stm32_hash_state),
+                       .base = {
+                               .cra_name = "hmac(sha512)",
+                               .cra_driver_name = "stm32-hmac-sha512",
+                               .cra_priority = 200,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA512_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+                               .cra_alignmask = 3,
+                               .cra_init = stm32_hash_cra_hmac_init,
+                               .cra_exit = stm32_hash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
+       },
+};
+
+static struct ahash_engine_alg algs_sha3[] = {
+       {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.halg = {
+                       .digestsize = SHA3_224_DIGEST_SIZE,
+                       .statesize = sizeof(struct stm32_hash_state),
+                       .base = {
+                               .cra_name = "sha3-224",
+                               .cra_driver_name = "stm32-sha3-224",
+                               .cra_priority = 200,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA3_224_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+                               .cra_alignmask = 3,
+                               .cra_init = stm32_hash_cra_sha3_init,
+                               .cra_exit = stm32_hash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
+       },
+       {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.setkey = stm32_hash_setkey,
+               .base.halg = {
+                       .digestsize = SHA3_224_DIGEST_SIZE,
+                       .statesize = sizeof(struct stm32_hash_state),
+                       .base = {
+                               .cra_name = "hmac(sha3-224)",
+                               .cra_driver_name = "stm32-hmac-sha3-224",
+                               .cra_priority = 200,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA3_224_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+                               .cra_alignmask = 3,
+                               .cra_init = stm32_hash_cra_sha3_hmac_init,
+                               .cra_exit = stm32_hash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
+       },
+       {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.halg = {
+                       .digestsize = SHA3_256_DIGEST_SIZE,
+                       .statesize = sizeof(struct stm32_hash_state),
+                       .base = {
+                               .cra_name = "sha3-256",
+                               .cra_driver_name = "stm32-sha3-256",
+                               .cra_priority = 200,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA3_256_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+                               .cra_alignmask = 3,
+                               .cra_init = stm32_hash_cra_sha3_init,
+                               .cra_exit = stm32_hash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
+       },
+       {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.setkey = stm32_hash_setkey,
+               .base.halg = {
+                       .digestsize = SHA3_256_DIGEST_SIZE,
+                       .statesize = sizeof(struct stm32_hash_state),
+                       .base = {
+                               .cra_name = "hmac(sha3-256)",
+                               .cra_driver_name = "stm32-hmac-sha3-256",
+                               .cra_priority = 200,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA3_256_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+                               .cra_alignmask = 3,
+                               .cra_init = stm32_hash_cra_sha3_hmac_init,
+                               .cra_exit = stm32_hash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
+       },
+       {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.halg = {
+                       .digestsize = SHA3_384_DIGEST_SIZE,
+                       .statesize = sizeof(struct stm32_hash_state),
+                       .base = {
+                               .cra_name = "sha3-384",
+                               .cra_driver_name = "stm32-sha3-384",
+                               .cra_priority = 200,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA3_384_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+                               .cra_alignmask = 3,
+                               .cra_init = stm32_hash_cra_sha3_init,
+                               .cra_exit = stm32_hash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
+       },
+       {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.setkey = stm32_hash_setkey,
+               .base.halg = {
+                       .digestsize = SHA3_384_DIGEST_SIZE,
+                       .statesize = sizeof(struct stm32_hash_state),
+                       .base = {
+                               .cra_name = "hmac(sha3-384)",
+                               .cra_driver_name = "stm32-hmac-sha3-384",
+                               .cra_priority = 200,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA3_384_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+                               .cra_alignmask = 3,
+                               .cra_init = stm32_hash_cra_sha3_hmac_init,
+                               .cra_exit = stm32_hash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
+       },
+       {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.halg = {
+                       .digestsize = SHA3_512_DIGEST_SIZE,
+                       .statesize = sizeof(struct stm32_hash_state),
+                       .base = {
+                               .cra_name = "sha3-512",
+                               .cra_driver_name = "stm32-sha3-512",
+                               .cra_priority = 200,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA3_512_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+                               .cra_alignmask = 3,
+                               .cra_init = stm32_hash_cra_sha3_init,
+                               .cra_exit = stm32_hash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
+       },
+       {
+               .base.init = stm32_hash_init,
+               .base.update = stm32_hash_update,
+               .base.final = stm32_hash_final,
+               .base.finup = stm32_hash_finup,
+               .base.digest = stm32_hash_digest,
+               .base.export = stm32_hash_export,
+               .base.import = stm32_hash_import,
+               .base.setkey = stm32_hash_setkey,
+               .base.halg = {
+                       .digestsize = SHA3_512_DIGEST_SIZE,
+                       .statesize = sizeof(struct stm32_hash_state),
+                       .base = {
+                               .cra_name = "hmac(sha3-512)",
+                               .cra_driver_name = "stm32-hmac-sha3-512",
+                               .cra_priority = 200,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                       CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA3_512_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct stm32_hash_ctx),
+                               .cra_alignmask = 3,
+                               .cra_init = stm32_hash_cra_sha3_hmac_init,
+                               .cra_exit = stm32_hash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       }
+               },
+               .op = {
+                       .do_one_request = stm32_hash_one_request,
+               },
+       }
+};
+
 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
 {
        unsigned int i, j;
@@ -1427,7 +1878,7 @@ static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
 
        for (i = 0; i < hdev->pdata->algs_info_size; i++) {
                for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
-                       err = crypto_register_ahash(
+                       err = crypto_engine_register_ahash(
                                &hdev->pdata->algs_info[i].algs_list[j]);
                        if (err)
                                goto err_algs;
@@ -1439,7 +1890,7 @@ err_algs:
        dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
        for (; i--; ) {
                for (; j--;)
-                       crypto_unregister_ahash(
+                       crypto_engine_unregister_ahash(
                                &hdev->pdata->algs_info[i].algs_list[j]);
        }
 
@@ -1452,7 +1903,7 @@ static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
 
        for (i = 0; i < hdev->pdata->algs_info_size; i++) {
                for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
-                       crypto_unregister_ahash(
+                       crypto_engine_unregister_ahash(
                                &hdev->pdata->algs_info[i].algs_list[j]);
        }
 
@@ -1471,6 +1922,7 @@ static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
 };
 
 static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
+       .alg_shift      = 7,
        .algs_info      = stm32_hash_algs_info_ux500,
        .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_ux500),
        .broken_emptymsg = true,
@@ -1489,6 +1941,7 @@ static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
 };
 
 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
+       .alg_shift      = 7,
        .algs_info      = stm32_hash_algs_info_stm32f4,
        .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
        .has_sr         = true,
@@ -1515,25 +1968,49 @@ static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
 };
 
 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
+       .alg_shift      = 7,
        .algs_info      = stm32_hash_algs_info_stm32f7,
        .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
        .has_sr         = true,
        .has_mdmat      = true,
 };
 
-static const struct of_device_id stm32_hash_of_match[] = {
+static struct stm32_hash_algs_info stm32_hash_algs_info_stm32mp13[] = {
        {
-               .compatible = "stericsson,ux500-hash",
-               .data = &stm32_hash_pdata_ux500,
+               .algs_list      = algs_sha1,
+               .size           = ARRAY_SIZE(algs_sha1),
+       },
+       {
+               .algs_list      = algs_sha224,
+               .size           = ARRAY_SIZE(algs_sha224),
+       },
+       {
+               .algs_list      = algs_sha256,
+               .size           = ARRAY_SIZE(algs_sha256),
        },
        {
-               .compatible = "st,stm32f456-hash",
-               .data = &stm32_hash_pdata_stm32f4,
+               .algs_list      = algs_sha384_sha512,
+               .size           = ARRAY_SIZE(algs_sha384_sha512),
        },
        {
-               .compatible = "st,stm32f756-hash",
-               .data = &stm32_hash_pdata_stm32f7,
+               .algs_list      = algs_sha3,
+               .size           = ARRAY_SIZE(algs_sha3),
        },
+};
+
+static const struct stm32_hash_pdata stm32_hash_pdata_stm32mp13 = {
+       .alg_shift      = 17,
+       .algs_info      = stm32_hash_algs_info_stm32mp13,
+       .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32mp13),
+       .has_sr         = true,
+       .has_mdmat      = true,
+};
+
+static const struct of_device_id stm32_hash_of_match[] = {
+       { .compatible = "stericsson,ux500-hash", .data = &stm32_hash_pdata_ux500 },
+       { .compatible = "st,stm32f456-hash", .data = &stm32_hash_pdata_stm32f4 },
+       { .compatible = "st,stm32f756-hash", .data = &stm32_hash_pdata_stm32f7 },
+       { .compatible = "st,stm32mp13-hash", .data = &stm32_hash_pdata_stm32mp13 },
        {},
 };
 
@@ -1548,12 +2025,6 @@ static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
                return -EINVAL;
        }
 
-       if (of_property_read_u32(dev->of_node, "dma-maxburst",
-                                &hdev->dma_maxburst)) {
-               dev_info(dev, "dma-maxburst not specified, using 0\n");
-               hdev->dma_maxburst = 0;
-       }
-
        return 0;
 }
 
@@ -1663,7 +2134,7 @@ static int stm32_hash_probe(struct platform_device *pdev)
                /* FIXME: implement DMA mode for Ux500 */
                hdev->dma_mode = 0;
        else
-               hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
+               hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR) & HASH_HWCFG_DMA_MASK;
 
        /* Register algos */
        ret = stm32_hash_register_algs(hdev);
@@ -1696,18 +2167,12 @@ err_reset:
        return ret;
 }
 
-static int stm32_hash_remove(struct platform_device *pdev)
+static void stm32_hash_remove(struct platform_device *pdev)
 {
-       struct stm32_hash_dev *hdev;
+       struct stm32_hash_dev *hdev = platform_get_drvdata(pdev);
        int ret;
 
-       hdev = platform_get_drvdata(pdev);
-       if (!hdev)
-               return -ENODEV;
-
-       ret = pm_runtime_resume_and_get(hdev->dev);
-       if (ret < 0)
-               return ret;
+       ret = pm_runtime_get_sync(hdev->dev);
 
        stm32_hash_unregister_algs(hdev);
 
@@ -1723,9 +2188,8 @@ static int stm32_hash_remove(struct platform_device *pdev)
        pm_runtime_disable(hdev->dev);
        pm_runtime_put_noidle(hdev->dev);
 
-       clk_disable_unprepare(hdev->clk);
-
-       return 0;
+       if (ret >= 0)
+               clk_disable_unprepare(hdev->clk);
 }
 
 #ifdef CONFIG_PM
@@ -1762,7 +2226,7 @@ static const struct dev_pm_ops stm32_hash_pm_ops = {
 
 static struct platform_driver stm32_hash_driver = {
        .probe          = stm32_hash_probe,
-       .remove         = stm32_hash_remove,
+       .remove_new     = stm32_hash_remove,
        .driver         = {
                .name   = "stm32-hash",
                .pm = &stm32_hash_pm_ops,
@@ -1772,6 +2236,6 @@ static struct platform_driver stm32_hash_driver = {
 
 module_platform_driver(stm32_hash_driver);
 
-MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
+MODULE_DESCRIPTION("STM32 SHA1/SHA2/SHA3 & MD5 (HMAC) hw accelerator driver");
 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
 MODULE_LICENSE("GPL v2");
index bb27f01..4ca4fbd 100644 (file)
@@ -19,9 +19,9 @@
 #include <linux/interrupt.h>
 #include <linux/crypto.h>
 #include <linux/hw_random.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
 #include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/io.h>
 #include <linux/spinlock.h>
index 6963344..2621ff8 100644 (file)
@@ -7,15 +7,16 @@
   * Copyright 2022 Bytedance CO., LTD.
   */
 
-#include <linux/mpi.h>
-#include <linux/scatterlist.h>
-#include <crypto/algapi.h>
+#include <crypto/engine.h>
 #include <crypto/internal/akcipher.h>
 #include <crypto/internal/rsa.h>
-#include <linux/err.h>
 #include <crypto/scatterwalk.h>
-#include <linux/atomic.h>
-
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/mpi.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/string.h>
 #include <uapi/linux/virtio_crypto.h>
 #include "virtio_crypto_common.h"
 
@@ -24,7 +25,6 @@ struct virtio_crypto_rsa_ctx {
 };
 
 struct virtio_crypto_akcipher_ctx {
-       struct crypto_engine_ctx enginectx;
        struct virtio_crypto *vcrypto;
        struct crypto_akcipher *tfm;
        bool session_valid;
@@ -47,7 +47,7 @@ struct virtio_crypto_akcipher_algo {
        uint32_t algonum;
        uint32_t service;
        unsigned int active_devs;
-       struct akcipher_alg algo;
+       struct akcipher_engine_alg algo;
 };
 
 static DEFINE_MUTEX(algs_lock);
@@ -475,9 +475,6 @@ static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm)
        struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm);
 
        ctx->tfm = tfm;
-       ctx->enginectx.op.do_one_request = virtio_crypto_rsa_do_req;
-       ctx->enginectx.op.prepare_request = NULL;
-       ctx->enginectx.op.unprepare_request = NULL;
 
        akcipher_set_reqsize(tfm,
                             sizeof(struct virtio_crypto_akcipher_request));
@@ -500,7 +497,7 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
        {
                .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
                .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
-               .algo = {
+               .algo.base = {
                        .encrypt = virtio_crypto_rsa_encrypt,
                        .decrypt = virtio_crypto_rsa_decrypt,
                        .set_pub_key = virtio_crypto_rsa_raw_set_pub_key,
@@ -516,11 +513,14 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
                                .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
                        },
                },
+               .algo.op = {
+                       .do_one_request = virtio_crypto_rsa_do_req,
+               },
        },
        {
                .algonum = VIRTIO_CRYPTO_AKCIPHER_RSA,
                .service = VIRTIO_CRYPTO_SERVICE_AKCIPHER,
-               .algo = {
+               .algo.base = {
                        .encrypt = virtio_crypto_rsa_encrypt,
                        .decrypt = virtio_crypto_rsa_decrypt,
                        .sign = virtio_crypto_rsa_sign,
@@ -538,6 +538,9 @@ static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = {
                                .cra_ctxsize = sizeof(struct virtio_crypto_akcipher_ctx),
                        },
                },
+               .algo.op = {
+                       .do_one_request = virtio_crypto_rsa_do_req,
+               },
        },
 };
 
@@ -556,14 +559,14 @@ int virtio_crypto_akcipher_algs_register(struct virtio_crypto *vcrypto)
                        continue;
 
                if (virtio_crypto_akcipher_algs[i].active_devs == 0) {
-                       ret = crypto_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
+                       ret = crypto_engine_register_akcipher(&virtio_crypto_akcipher_algs[i].algo);
                        if (ret)
                                goto unlock;
                }
 
                virtio_crypto_akcipher_algs[i].active_devs++;
                dev_info(&vcrypto->vdev->dev, "Registered akcipher algo %s\n",
-                        virtio_crypto_akcipher_algs[i].algo.base.cra_name);
+                        virtio_crypto_akcipher_algs[i].algo.base.base.cra_name);
        }
 
 unlock:
@@ -586,7 +589,7 @@ void virtio_crypto_akcipher_algs_unregister(struct virtio_crypto *vcrypto)
                        continue;
 
                if (virtio_crypto_akcipher_algs[i].active_devs == 1)
-                       crypto_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
+                       crypto_engine_unregister_akcipher(&virtio_crypto_akcipher_algs[i].algo);
 
                virtio_crypto_akcipher_algs[i].active_devs--;
        }
index e587628..23c41d8 100644 (file)
@@ -6,19 +6,16 @@
   * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
   */
 
-#include <linux/scatterlist.h>
-#include <crypto/algapi.h>
+#include <crypto/engine.h>
 #include <crypto/internal/skcipher.h>
-#include <linux/err.h>
 #include <crypto/scatterwalk.h>
-#include <linux/atomic.h>
-
+#include <linux/err.h>
+#include <linux/scatterlist.h>
 #include <uapi/linux/virtio_crypto.h>
 #include "virtio_crypto_common.h"
 
 
 struct virtio_crypto_skcipher_ctx {
-       struct crypto_engine_ctx enginectx;
        struct virtio_crypto *vcrypto;
        struct crypto_skcipher *tfm;
 
@@ -42,7 +39,7 @@ struct virtio_crypto_algo {
        uint32_t algonum;
        uint32_t service;
        unsigned int active_devs;
-       struct skcipher_alg algo;
+       struct skcipher_engine_alg algo;
 };
 
 /*
@@ -523,9 +520,6 @@ static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
        crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
        ctx->tfm = tfm;
 
-       ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
-       ctx->enginectx.op.prepare_request = NULL;
-       ctx->enginectx.op.unprepare_request = NULL;
        return 0;
 }
 
@@ -580,7 +574,7 @@ static void virtio_crypto_skcipher_finalize_req(
 static struct virtio_crypto_algo virtio_crypto_algs[] = { {
        .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
        .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
-       .algo = {
+       .algo.base = {
                .base.cra_name          = "cbc(aes)",
                .base.cra_driver_name   = "virtio_crypto_aes_cbc",
                .base.cra_priority      = 150,
@@ -598,6 +592,9 @@ static struct virtio_crypto_algo virtio_crypto_algs[] = { {
                .max_keysize            = AES_MAX_KEY_SIZE,
                .ivsize                 = AES_BLOCK_SIZE,
        },
+       .algo.op = {
+               .do_one_request = virtio_crypto_skcipher_crypt_req,
+       },
 } };
 
 int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
@@ -616,14 +613,14 @@ int virtio_crypto_skcipher_algs_register(struct virtio_crypto *vcrypto)
                        continue;
 
                if (virtio_crypto_algs[i].active_devs == 0) {
-                       ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
+                       ret = crypto_engine_register_skcipher(&virtio_crypto_algs[i].algo);
                        if (ret)
                                goto unlock;
                }
 
                virtio_crypto_algs[i].active_devs++;
                dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
-                        virtio_crypto_algs[i].algo.base.cra_name);
+                        virtio_crypto_algs[i].algo.base.base.cra_name);
        }
 
 unlock:
@@ -647,7 +644,7 @@ void virtio_crypto_skcipher_algs_unregister(struct virtio_crypto *vcrypto)
                        continue;
 
                if (virtio_crypto_algs[i].active_devs == 1)
-                       crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
+                       crypto_engine_unregister_skcipher(&virtio_crypto_algs[i].algo);
 
                virtio_crypto_algs[i].active_devs--;
        }
index bf1f421..ce33557 100644 (file)
@@ -9,13 +9,14 @@
 #include <crypto/gcm.h>
 #include <crypto/internal/aead.h>
 #include <crypto/scatterwalk.h>
-
 #include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
 #include <linux/platform_device.h>
-
-#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/string.h>
 
 #define ZYNQMP_DMA_BIT_MASK    32U
 
@@ -43,7 +44,7 @@ enum zynqmp_aead_keysrc {
 
 struct zynqmp_aead_drv_ctx {
        union {
-               struct aead_alg aead;
+               struct aead_engine_alg aead;
        } alg;
        struct device *dev;
        struct crypto_engine *engine;
@@ -60,7 +61,6 @@ struct zynqmp_aead_hw_req {
 };
 
 struct zynqmp_aead_tfm_ctx {
-       struct crypto_engine_ctx engine_ctx;
        struct device *dev;
        u8 key[ZYNQMP_AES_KEY_SIZE];
        u8 *iv;
@@ -286,7 +286,7 @@ static int zynqmp_aes_aead_encrypt(struct aead_request *req)
        struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
 
        rq_ctx->op = ZYNQMP_AES_ENCRYPT;
-       drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+       drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
 
        return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
 }
@@ -299,7 +299,7 @@ static int zynqmp_aes_aead_decrypt(struct aead_request *req)
        struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
 
        rq_ctx->op = ZYNQMP_AES_DECRYPT;
-       drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+       drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
 
        return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
 }
@@ -312,20 +312,16 @@ static int zynqmp_aes_aead_init(struct crypto_aead *aead)
        struct zynqmp_aead_drv_ctx *drv_ctx;
        struct aead_alg *alg = crypto_aead_alg(aead);
 
-       drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead);
+       drv_ctx = container_of(alg, struct zynqmp_aead_drv_ctx, alg.aead.base);
        tfm_ctx->dev = drv_ctx->dev;
 
-       tfm_ctx->engine_ctx.op.do_one_request = zynqmp_handle_aes_req;
-       tfm_ctx->engine_ctx.op.prepare_request = NULL;
-       tfm_ctx->engine_ctx.op.unprepare_request = NULL;
-
-       tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->alg.aead.base.cra_name,
+       tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->alg.aead.base.base.cra_name,
                                                0,
                                                CRYPTO_ALG_NEED_FALLBACK);
 
        if (IS_ERR(tfm_ctx->fbk_cipher)) {
                pr_err("%s() Error: failed to allocate fallback for %s\n",
-                      __func__, drv_ctx->alg.aead.base.cra_name);
+                      __func__, drv_ctx->alg.aead.base.base.cra_name);
                return PTR_ERR(tfm_ctx->fbk_cipher);
        }
 
@@ -350,7 +346,7 @@ static void zynqmp_aes_aead_exit(struct crypto_aead *aead)
 }
 
 static struct zynqmp_aead_drv_ctx aes_drv_ctx = {
-       .alg.aead = {
+       .alg.aead.base = {
                .setkey         = zynqmp_aes_aead_setkey,
                .setauthsize    = zynqmp_aes_aead_setauthsize,
                .encrypt        = zynqmp_aes_aead_encrypt,
@@ -372,7 +368,10 @@ static struct zynqmp_aead_drv_ctx aes_drv_ctx = {
                .cra_ctxsize            = sizeof(struct zynqmp_aead_tfm_ctx),
                .cra_module             = THIS_MODULE,
                }
-       }
+       },
+       .alg.aead.op = {
+               .do_one_request = zynqmp_handle_aes_req,
+       },
 };
 
 static int zynqmp_aes_aead_probe(struct platform_device *pdev)
@@ -405,7 +404,7 @@ static int zynqmp_aes_aead_probe(struct platform_device *pdev)
                goto err_engine;
        }
 
-       err = crypto_register_aead(&aes_drv_ctx.alg.aead);
+       err = crypto_engine_register_aead(&aes_drv_ctx.alg.aead);
        if (err < 0) {
                dev_err(dev, "Failed to register AEAD alg.\n");
                goto err_aead;
@@ -413,7 +412,7 @@ static int zynqmp_aes_aead_probe(struct platform_device *pdev)
        return 0;
 
 err_aead:
-       crypto_unregister_aead(&aes_drv_ctx.alg.aead);
+       crypto_engine_unregister_aead(&aes_drv_ctx.alg.aead);
 
 err_engine:
        if (aes_drv_ctx.engine)
@@ -425,7 +424,7 @@ err_engine:
 static int zynqmp_aes_aead_remove(struct platform_device *pdev)
 {
        crypto_engine_exit(aes_drv_ctx.engine);
-       crypto_unregister_aead(&aes_drv_ctx.alg.aead);
+       crypto_engine_unregister_aead(&aes_drv_ctx.alg.aead);
 
        return 0;
 }
index 43ff170..426bf1a 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
 
 #define ZYNQMP_DMA_BIT_MASK            32U
index 6156161..ca86f4c 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/cache.h>
 #include <linux/crypto.h>
 #include <linux/types.h>
+#include <linux/workqueue.h>
 
 /*
  * Maximum values for blocksize and alignmask, used to allocate
@@ -82,6 +83,8 @@ struct crypto_instance {
                struct crypto_spawn *spawns;
        };
 
+       struct work_struct free_work;
+
        void *__ctx[] CRYPTO_MINALIGN_ATTR;
 };
 
index 2038764..2835069 100644 (file)
@@ -7,91 +7,47 @@
 #ifndef _CRYPTO_ENGINE_H
 #define _CRYPTO_ENGINE_H
 
-#include <linux/crypto.h>
-#include <linux/list.h>
-#include <linux/kthread.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include <crypto/algapi.h>
 #include <crypto/aead.h>
 #include <crypto/akcipher.h>
 #include <crypto/hash.h>
-#include <crypto/skcipher.h>
 #include <crypto/kpp.h>
+#include <crypto/skcipher.h>
+#include <linux/types.h>
 
+struct crypto_engine;
 struct device;
 
-#define ENGINE_NAME_LEN        30
-/*
- * struct crypto_engine - crypto hardware engine
- * @name: the engine name
- * @idling: the engine is entering idle state
- * @busy: request pump is busy
- * @running: the engine is on working
- * @retry_support: indication that the hardware allows re-execution
- * of a failed backlog request
- * crypto-engine, in head position to keep order
- * @list: link with the global crypto engine list
- * @queue_lock: spinlock to synchronise access to request queue
- * @queue: the crypto queue of the engine
- * @rt: whether this queue is set to run as a realtime task
- * @prepare_crypt_hardware: a request will soon arrive from the queue
- * so the subsystem requests the driver to prepare the hardware
- * by issuing this call
- * @unprepare_crypt_hardware: there are currently no more requests on the
- * queue so the subsystem notifies the driver that it may relax the
- * hardware by issuing this call
- * @do_batch_requests: execute a batch of requests. Depends on multiple
- * requests support.
- * @kworker: kthread worker struct for request pump
- * @pump_requests: work struct for scheduling work to the request pump
- * @priv_data: the engine private data
- * @cur_req: the current request which is on processing
- */
-struct crypto_engine {
-       char                    name[ENGINE_NAME_LEN];
-       bool                    idling;
-       bool                    busy;
-       bool                    running;
-
-       bool                    retry_support;
-
-       struct list_head        list;
-       spinlock_t              queue_lock;
-       struct crypto_queue     queue;
-       struct device           *dev;
-
-       bool                    rt;
-
-       int (*prepare_crypt_hardware)(struct crypto_engine *engine);
-       int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
-       int (*do_batch_requests)(struct crypto_engine *engine);
-
-
-       struct kthread_worker           *kworker;
-       struct kthread_work             pump_requests;
-
-       void                            *priv_data;
-       struct crypto_async_request     *cur_req;
-};
-
 /*
  * struct crypto_engine_op - crypto hardware engine operations
- * @prepare_request: do some preparation if needed before handling the current request
- * @unprepare_request: undo any work done by prepare_request()
  * @do_one_request: do encryption for current request
  */
 struct crypto_engine_op {
-       int (*prepare_request)(struct crypto_engine *engine,
-                              void *areq);
-       int (*unprepare_request)(struct crypto_engine *engine,
-                                void *areq);
        int (*do_one_request)(struct crypto_engine *engine,
                              void *areq);
 };
 
-struct crypto_engine_ctx {
+struct aead_engine_alg {
+       struct aead_alg base;
+       struct crypto_engine_op op;
+};
+
+struct ahash_engine_alg {
+       struct ahash_alg base;
+       struct crypto_engine_op op;
+};
+
+struct akcipher_engine_alg {
+       struct akcipher_alg base;
+       struct crypto_engine_op op;
+};
+
+struct kpp_engine_alg {
+       struct kpp_alg base;
+       struct crypto_engine_op op;
+};
+
+struct skcipher_engine_alg {
+       struct skcipher_alg base;
        struct crypto_engine_op op;
 };
 
@@ -124,4 +80,28 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
                                                       bool rt, int qlen);
 int crypto_engine_exit(struct crypto_engine *engine);
 
+int crypto_engine_register_aead(struct aead_engine_alg *alg);
+void crypto_engine_unregister_aead(struct aead_engine_alg *alg);
+int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count);
+void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count);
+
+int crypto_engine_register_ahash(struct ahash_engine_alg *alg);
+void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg);
+int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count);
+void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
+                                     int count);
+
+int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg);
+void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg);
+
+int crypto_engine_register_kpp(struct kpp_engine_alg *alg);
+void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg);
+
+int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg);
+void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg);
+int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
+                                    int count);
+void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
+                                       int count);
+
 #endif /* _CRYPTO_ENGINE_H */
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
new file mode 100644 (file)
index 0000000..fbf4be5
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Crypto engine API
+ *
+ * Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org>
+ * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
+ */
+#ifndef _CRYPTO_INTERNAL_ENGINE_H
+#define _CRYPTO_INTERNAL_ENGINE_H
+
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <linux/kthread.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+
+#define ENGINE_NAME_LEN        30
+
+struct device;
+
+/*
+ * struct crypto_engine - crypto hardware engine
+ * @name: the engine name
+ * @idling: the engine is entering idle state
+ * @busy: request pump is busy
+ * @running: the engine is on working
+ * @retry_support: indication that the hardware allows re-execution
+ * of a failed backlog request
+ * crypto-engine, in head position to keep order
+ * @list: link with the global crypto engine list
+ * @queue_lock: spinlock to synchronise access to request queue
+ * @queue: the crypto queue of the engine
+ * @rt: whether this queue is set to run as a realtime task
+ * @prepare_crypt_hardware: a request will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the hardware
+ * by issuing this call
+ * @unprepare_crypt_hardware: there are currently no more requests on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
+ * @do_batch_requests: execute a batch of requests. Depends on multiple
+ * requests support.
+ * @kworker: kthread worker struct for request pump
+ * @pump_requests: work struct for scheduling work to the request pump
+ * @priv_data: the engine private data
+ * @cur_req: the current request which is on processing
+ */
+struct crypto_engine {
+       char                    name[ENGINE_NAME_LEN];
+       bool                    idling;
+       bool                    busy;
+       bool                    running;
+
+       bool                    retry_support;
+
+       struct list_head        list;
+       spinlock_t              queue_lock;
+       struct crypto_queue     queue;
+       struct device           *dev;
+
+       bool                    rt;
+
+       int (*prepare_crypt_hardware)(struct crypto_engine *engine);
+       int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
+       int (*do_batch_requests)(struct crypto_engine *engine);
+
+
+       struct kthread_worker           *kworker;
+       struct kthread_work             pump_requests;
+
+       void                            *priv_data;
+       struct crypto_async_request     *cur_req;
+};
+
+#endif
index a7d54d4..39fbfb4 100644 (file)
 enum qm_stop_reason {
        QM_NORMAL,
        QM_SOFT_RESET,
-       QM_FLR,
+       QM_DOWN,
 };
 
 enum qm_state {
index 75da8f5..c1dc87f 100644 (file)
@@ -8,6 +8,10 @@
 enum psp_platform_access_msg {
        PSP_CMD_NONE = 0x0,
        PSP_I2C_REQ_BUS_CMD = 0x64,
+       PSP_DYNAMIC_BOOST_GET_NONCE,
+       PSP_DYNAMIC_BOOST_SET_UID,
+       PSP_DYNAMIC_BOOST_GET_PARAMETER,
+       PSP_DYNAMIC_BOOST_SET_PARAMETER,
 };
 
 struct psp_req_buffer_hdr {
diff --git a/include/uapi/linux/psp-dbc.h b/include/uapi/linux/psp-dbc.h
new file mode 100644 (file)
index 0000000..b3845a9
--- /dev/null
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Userspace interface for AMD Dynamic Boost Control (DBC)
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#ifndef __PSP_DBC_USER_H__
+#define __PSP_DBC_USER_H__
+
+#include <linux/types.h>
+
+/**
+ * DOC: AMD Dynamic Boost Control (DBC) interface
+ */
+
+#define DBC_NONCE_SIZE         16
+#define DBC_SIG_SIZE           32
+#define DBC_UID_SIZE           16
+
+/**
+ * struct dbc_user_nonce - Nonce exchange structure (input/output).
+ * @auth_needed: Whether the PSP should authenticate this request (input).
+ *               0: no authentication, PSP will return single use nonce.
+ *               1: authentication: PSP will return multi-use nonce.
+ * @nonce:       8 byte value used for future authentication (output).
+ * @signature:   Optional 32 byte signature created by software using a
+ *               previous nonce (input).
+ */
+struct dbc_user_nonce {
+       __u32   auth_needed;
+       __u8    nonce[DBC_NONCE_SIZE];
+       __u8    signature[DBC_SIG_SIZE];
+} __packed;
+
+/**
+ * struct dbc_user_setuid - UID exchange structure (input).
+ * @uid:       16 byte value representing software identity
+ * @signature: 32 byte signature created by software using a previous nonce
+ */
+struct dbc_user_setuid {
+       __u8    uid[DBC_UID_SIZE];
+       __u8    signature[DBC_SIG_SIZE];
+} __packed;
+
+/**
+ * struct dbc_user_param - Parameter exchange structure (input/output).
+ * @msg_index: Message indicating what parameter to set or get (input)
+ * @param:     4 byte parameter, units are message specific. (input/output)
+ * @signature: 32 byte signature.
+ *             - When sending a message this is to be created by software
+ *               using a previous nonce (input)
+ *             - For interpreting results, this signature is updated by the
+ *               PSP to allow software to validate the authenticity of the
+ *               results.
+ */
+struct dbc_user_param {
+       __u32   msg_index;
+       __u32   param;
+       __u8    signature[DBC_SIG_SIZE];
+} __packed;
+
+/**
+ * Dynamic Boost Control (DBC) IOC
+ *
+ * possible return codes for all DBC IOCTLs:
+ *  0:          success
+ *  -EINVAL:    invalid input
+ *  -E2BIG:     excess data passed
+ *  -EFAULT:    failed to copy to/from userspace
+ *  -EBUSY:     mailbox in recovery or in use
+ *  -ENODEV:    driver not bound with PSP device
+ *  -EACCES:    request isn't authorized
+ *  -EINVAL:    invalid parameter
+ *  -ETIMEDOUT: request timed out
+ *  -EAGAIN:    invalid request for state machine
+ *  -ENOENT:    not implemented
+ *  -ENFILE:    overflow
+ *  -EPERM:     invalid signature
+ *  -EIO:       unknown error
+ */
+#define DBC_IOC_TYPE   'D'
+
+/**
+ * DBCIOCNONCE - Fetch a nonce from the PSP for authenticating commands.
+ *               If a nonce is fetched without authentication it can only
+ *               be utilized for one command.
+ *               If a nonce is fetched with authentication it can be used
+ *               for multiple requests.
+ */
+#define DBCIOCNONCE    _IOWR(DBC_IOC_TYPE, 0x1, struct dbc_user_nonce)
+
+/**
+ * DBCIOCUID - Set the user ID (UID) of a calling process.
+ *             The user ID is 8 bytes long. It must be programmed using a
+ *             32 byte signature built using the nonce fetched from
+ *             DBCIOCNONCE.
+ *             The UID can only be set once until the system is rebooted.
+ */
+#define DBCIOCUID      _IOW(DBC_IOC_TYPE, 0x2, struct dbc_user_setuid)
+
+/**
+ * DBCIOCPARAM - Set or get a parameter from the PSP.
+ *               This request will only work after DBCIOCUID has successfully
+ *               set the UID of the calling process.
+ *               Whether the parameter is set or get is controlled by the
+ *               message ID in the request.
+ *               This command must be sent using a 32 byte signature built
+ *               using the nonce fetched from DBCIOCNONCE.
+ *               When the command succeeds, the 32 byte signature will be
+ *               updated by the PSP for software to authenticate the results.
+ */
+#define DBCIOCPARAM    _IOWR(DBC_IOC_TYPE, 0x3, struct dbc_user_param)
+
+/**
+ * enum dbc_cmd_msg - Messages utilized by DBCIOCPARAM
+ * @PARAM_GET_FMAX_CAP:                Get frequency cap (MHz)
+ * @PARAM_SET_FMAX_CAP:                Set frequency cap (MHz)
+ * @PARAM_GET_PWR_CAP:         Get socket power cap (mW)
+ * @PARAM_SET_PWR_CAP:         Set socket power cap (mW)
+ * @PARAM_GET_GFX_MODE:                Get graphics mode (0/1)
+ * @PARAM_SET_GFX_MODE:                Set graphics mode (0/1)
+ * @PARAM_GET_CURR_TEMP:       Get current temperature (degrees C)
+ * @PARAM_GET_FMAX_MAX:                Get maximum allowed value for frequency (MHz)
+ * @PARAM_GET_FMAX_MIN:                Get minimum allowed value for frequency (MHz)
+ * @PARAM_GET_SOC_PWR_MAX:     Get maximum allowed value for SoC power (mw)
+ * @PARAM_GET_SOC_PWR_MIN:     Get minimum allowed value for SoC power (mw)
+ * @PARAM_GET_SOC_PWR_CUR:     Get current value for SoC Power (mW)
+ */
+enum dbc_cmd_msg {
+       PARAM_GET_FMAX_CAP      = 0x3,
+       PARAM_SET_FMAX_CAP      = 0x4,
+       PARAM_GET_PWR_CAP       = 0x5,
+       PARAM_SET_PWR_CAP       = 0x6,
+       PARAM_GET_GFX_MODE      = 0x7,
+       PARAM_SET_GFX_MODE      = 0x8,
+       PARAM_GET_CURR_TEMP     = 0x9,
+       PARAM_GET_FMAX_MAX      = 0xA,
+       PARAM_GET_FMAX_MIN      = 0xB,
+       PARAM_GET_SOC_PWR_MAX   = 0xC,
+       PARAM_GET_SOC_PWR_MIN   = 0xD,
+       PARAM_GET_SOC_PWR_CUR   = 0xE,
+};
+
+#endif /* __PSP_DBC_USER_H__ */
index d139778..2e08397 100644 (file)
@@ -259,7 +259,6 @@ obj-$(CONFIG_DQL) += dynamic_queue_limits.o
 obj-$(CONFIG_GLOB) += glob.o
 obj-$(CONFIG_GLOB_SELFTEST) += globtest.o
 
-obj-$(CONFIG_MPILIB) += mpi/
 obj-$(CONFIG_DIMLIB) += dim/
 obj-$(CONFIG_SIGNATURE) += digsig.o
 
index 6ec2d45..8d1446c 100644 (file)
@@ -53,3 +53,5 @@ libblake2s-y                                  += blake2s-selftest.o
 libchacha20poly1305-y                          += chacha20poly1305-selftest.o
 libcurve25519-y                                        += curve25519-selftest.o
 endif
+
+obj-$(CONFIG_MPILIB) += mpi/
similarity index 100%
rename from lib/mpi/Makefile
rename to lib/crypto/mpi/Makefile
similarity index 100%
rename from lib/mpi/ec.c
rename to lib/crypto/mpi/ec.c
similarity index 100%
rename from lib/mpi/longlong.h
rename to lib/crypto/mpi/longlong.h
similarity index 100%
rename from lib/mpi/mpi-add.c
rename to lib/crypto/mpi/mpi-add.c
similarity index 100%
rename from lib/mpi/mpi-bit.c
rename to lib/crypto/mpi/mpi-bit.c
similarity index 96%
rename from lib/mpi/mpi-cmp.c
rename to lib/crypto/mpi/mpi-cmp.c
index c4cfa3f..0835b62 100644 (file)
@@ -25,8 +25,12 @@ int mpi_cmp_ui(MPI u, unsigned long v)
        mpi_limb_t limb = v;
 
        mpi_normalize(u);
-       if (!u->nlimbs && !limb)
-               return 0;
+       if (u->nlimbs == 0) {
+               if (v == 0)
+                       return 0;
+               else
+                       return -1;
+       }
        if (u->sign)
                return -1;
        if (u->nlimbs > 1)
similarity index 100%
rename from lib/mpi/mpi-div.c
rename to lib/crypto/mpi/mpi-div.c
similarity index 100%
rename from lib/mpi/mpi-inv.c
rename to lib/crypto/mpi/mpi-inv.c
similarity index 100%
rename from lib/mpi/mpi-mod.c
rename to lib/crypto/mpi/mpi-mod.c
similarity index 100%
rename from lib/mpi/mpi-mul.c
rename to lib/crypto/mpi/mpi-mul.c
similarity index 100%
rename from lib/mpi/mpi-pow.c
rename to lib/crypto/mpi/mpi-pow.c
similarity index 100%
rename from lib/mpi/mpicoder.c
rename to lib/crypto/mpi/mpicoder.c
similarity index 100%
rename from lib/mpi/mpih-cmp.c
rename to lib/crypto/mpi/mpih-cmp.c
similarity index 100%
rename from lib/mpi/mpih-div.c
rename to lib/crypto/mpi/mpih-div.c
similarity index 100%
rename from lib/mpi/mpih-mul.c
rename to lib/crypto/mpi/mpih-mul.c
similarity index 100%
rename from lib/mpi/mpiutil.c
rename to lib/crypto/mpi/mpiutil.c
diff --git a/tools/crypto/ccp/.gitignore b/tools/crypto/ccp/.gitignore
new file mode 100644 (file)
index 0000000..bee8a64
--- /dev/null
@@ -0,0 +1 @@
+__pycache__
diff --git a/tools/crypto/ccp/Makefile b/tools/crypto/ccp/Makefile
new file mode 100644 (file)
index 0000000..ae4a66d
--- /dev/null
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+CFLAGS += -D__EXPORTED_HEADERS__ -I../../../include/uapi -I../../../include
+
+TARGET = dbc_library.so
+
+all: $(TARGET)
+
+dbc_library.so: dbc.c
+       $(CC) $(CFLAGS) $(LDFLAGS) -shared -o $@ $<
+       chmod -x $@
+
+clean:
+       $(RM) $(TARGET)
diff --git a/tools/crypto/ccp/dbc.c b/tools/crypto/ccp/dbc.c
new file mode 100644 (file)
index 0000000..37e8131
--- /dev/null
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Processor Dynamic Boost Control sample library
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include <assert.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+/* if uapi header isn't installed, this might not yet exist */
+#ifndef __packed
+#define __packed __attribute__((packed))
+#endif
+#include <linux/psp-dbc.h>
+
+int get_nonce(int fd, void *nonce_out, void *signature)
+{
+       struct dbc_user_nonce tmp = {
+               .auth_needed = !!signature,
+       };
+       int ret;
+
+       assert(nonce_out);
+
+       if (signature)
+               memcpy(tmp.signature, signature, sizeof(tmp.signature));
+
+       ret = ioctl(fd, DBCIOCNONCE, &tmp);
+       if (ret)
+               return ret;
+       memcpy(nonce_out, tmp.nonce, sizeof(tmp.nonce));
+
+       return 0;
+}
+
+int set_uid(int fd, __u8 *uid, __u8 *signature)
+{
+       struct dbc_user_setuid tmp;
+
+       assert(uid);
+       assert(signature);
+
+       memcpy(tmp.uid, uid, sizeof(tmp.uid));
+       memcpy(tmp.signature, signature, sizeof(tmp.signature));
+
+       return ioctl(fd, DBCIOCUID, &tmp);
+}
+
+int process_param(int fd, int msg_index, __u8 *signature, int *data)
+{
+       struct dbc_user_param tmp = {
+               .msg_index = msg_index,
+               .param = *data,
+       };
+       int ret;
+
+       assert(signature);
+       assert(data);
+
+       memcpy(tmp.signature, signature, sizeof(tmp.signature));
+
+       ret = ioctl(fd, DBCIOCPARAM, &tmp);
+       if (ret)
+               return ret;
+
+       *data = tmp.param;
+       return 0;
+}
diff --git a/tools/crypto/ccp/dbc.py b/tools/crypto/ccp/dbc.py
new file mode 100644 (file)
index 0000000..3f6a825
--- /dev/null
@@ -0,0 +1,64 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+
+import ctypes
+import os
+
+DBC_UID_SIZE = 16
+DBC_NONCE_SIZE = 16
+DBC_SIG_SIZE = 32
+
+PARAM_GET_FMAX_CAP = (0x3,)
+PARAM_SET_FMAX_CAP = (0x4,)
+PARAM_GET_PWR_CAP = (0x5,)
+PARAM_SET_PWR_CAP = (0x6,)
+PARAM_GET_GFX_MODE = (0x7,)
+PARAM_SET_GFX_MODE = (0x8,)
+PARAM_GET_CURR_TEMP = (0x9,)
+PARAM_GET_FMAX_MAX = (0xA,)
+PARAM_GET_FMAX_MIN = (0xB,)
+PARAM_GET_SOC_PWR_MAX = (0xC,)
+PARAM_GET_SOC_PWR_MIN = (0xD,)
+PARAM_GET_SOC_PWR_CUR = (0xE,)
+
+DEVICE_NODE = "/dev/dbc"
+
+lib = ctypes.CDLL("./dbc_library.so", mode=ctypes.RTLD_GLOBAL)
+
+
+def handle_error(code):
+    val = code * -1
+    raise OSError(val, os.strerror(val))
+
+
+def get_nonce(device, signature):
+    if not device:
+        raise ValueError("Device required")
+    buf = ctypes.create_string_buffer(DBC_NONCE_SIZE)
+    ret = lib.get_nonce(device.fileno(), ctypes.byref(buf), signature)
+    if ret:
+        handle_error(ret)
+    return buf.value
+
+
+def set_uid(device, new_uid, signature):
+    if not signature:
+        raise ValueError("Signature required")
+    if not new_uid:
+        raise ValueError("UID required")
+    ret = lib.set_uid(device.fileno(), new_uid, signature)
+    if ret:
+        handle_error(ret)
+    return True
+
+
+def process_param(device, message, signature, data=None):
+    if not signature:
+        raise ValueError("Signature required")
+    if type(message) != tuple:
+        raise ValueError("Expected message tuple")
+    arg = ctypes.c_int(data if data else 0)
+    ret = lib.process_param(device.fileno(), message[0], signature, ctypes.pointer(arg))
+    if ret:
+        handle_error(ret)
+    return arg, signature
diff --git a/tools/crypto/ccp/dbc_cli.py b/tools/crypto/ccp/dbc_cli.py
new file mode 100755 (executable)
index 0000000..bf52233
--- /dev/null
@@ -0,0 +1,134 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+import argparse
+import binascii
+import os
+import errno
+from dbc import *
+
+ERRORS = {
+    errno.EACCES: "Access is denied",
+    errno.E2BIG: "Excess data provided",
+    errno.EINVAL: "Bad parameters",
+    errno.EAGAIN: "Bad state",
+    errno.ENOENT: "Not implemented or message failure",
+    errno.EBUSY: "Busy",
+    errno.ENFILE: "Overflow",
+    errno.EPERM: "Signature invalid",
+}
+
+messages = {
+    "get-fmax-cap": PARAM_GET_FMAX_CAP,
+    "set-fmax-cap": PARAM_SET_FMAX_CAP,
+    "get-power-cap": PARAM_GET_PWR_CAP,
+    "set-power-cap": PARAM_SET_PWR_CAP,
+    "get-graphics-mode": PARAM_GET_GFX_MODE,
+    "set-graphics-mode": PARAM_SET_GFX_MODE,
+    "get-current-temp": PARAM_GET_CURR_TEMP,
+    "get-fmax-max": PARAM_GET_FMAX_MAX,
+    "get-fmax-min": PARAM_GET_FMAX_MIN,
+    "get-soc-power-max": PARAM_GET_SOC_PWR_MAX,
+    "get-soc-power-min": PARAM_GET_SOC_PWR_MIN,
+    "get-soc-power-cur": PARAM_GET_SOC_PWR_CUR,
+}
+
+
+def _pretty_buffer(ba):
+    return str(binascii.hexlify(ba, " "))
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(
+        description="Dynamic Boost control command line interface"
+    )
+    parser.add_argument(
+        "command",
+        choices=["get-nonce", "get-param", "set-param", "set-uid"],
+        help="Command to send",
+    )
+    parser.add_argument("--device", default="/dev/dbc", help="Device to operate")
+    parser.add_argument("--signature", help="File containing signature for command")
+    parser.add_argument("--message", choices=messages.keys(), help="Message index")
+    parser.add_argument("--data", help="Argument to pass to message")
+    parser.add_argument("--uid", help="File containing UID to pass")
+    return parser.parse_args()
+
+
+def pretty_error(code):
+    if code in ERRORS:
+        print(ERRORS[code])
+    else:
+        print("failed with return code %d" % code)
+
+
+if __name__ == "__main__":
+    args = parse_args()
+    data = 0
+    sig = None
+    uid = None
+    if not os.path.exists(args.device):
+        raise IOError("Missing device {device}".format(device=args.device))
+    if args.signature:
+        if not os.path.exists(args.signature):
+            raise ValueError("Invalid signature file %s" % args.signature)
+        with open(args.signature, "rb") as f:
+            sig = f.read()
+        if len(sig) != DBC_SIG_SIZE:
+            raise ValueError(
+                "Invalid signature length %d (expected %d)" % (len(sig), DBC_SIG_SIZE)
+            )
+    if args.uid:
+        if not os.path.exists(args.uid):
+            raise ValueError("Invalid uid file %s" % args.uid)
+        with open(args.uid, "rb") as f:
+            uid = f.read()
+        if len(uid) != DBC_UID_SIZE:
+            raise ValueError(
+                "Invalid UID length %d (expected %d)" % (len(uid), DBC_UID_SIZE)
+            )
+    if args.data:
+        try:
+            data = int(args.data, 10)
+        except ValueError:
+            data = int(args.data, 16)
+
+    with open(args.device) as d:
+        if args.command == "get-nonce":
+            try:
+                nonce = get_nonce(d, sig)
+                print("Nonce: %s" % _pretty_buffer(bytes(nonce)))
+            except OSError as e:
+                pretty_error(e.errno)
+        elif args.command == "set-uid":
+            try:
+                result = set_uid(d, uid, sig)
+                if result:
+                    print("Set UID")
+            except OSError as e:
+                pretty_error(e.errno)
+        elif args.command == "get-param":
+            if not args.message or args.message.startswith("set"):
+                raise ValueError("Invalid message %s" % args.message)
+            try:
+                param, signature = process_param(d, messages[args.message], sig)
+                print(
+                    "Parameter: {par}, response signature {sig}".format(
+                        par=param,
+                        sig=_pretty_buffer(bytes(signature)),
+                    )
+                )
+            except OSError as e:
+                pretty_error(e.errno)
+        elif args.command == "set-param":
+            if not args.message or args.message.startswith("get"):
+                raise ValueError("Invalid message %s" % args.message)
+            try:
+                param, signature = process_param(d, messages[args.message], sig, data)
+                print(
+                    "Parameter: {par}, response signature {sig}".format(
+                        par=param,
+                        sig=_pretty_buffer(bytes(signature)),
+                    )
+                )
+            except OSError as e:
+                pretty_error(e.errno)
diff --git a/tools/crypto/ccp/test_dbc.py b/tools/crypto/ccp/test_dbc.py
new file mode 100755 (executable)
index 0000000..998bb3e
--- /dev/null
@@ -0,0 +1,266 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+import unittest
+import os
+import time
+import glob
+from dbc import *
+
+# Artificial delay between set commands
+SET_DELAY = 0.5
+
+
+class invalid_param(ctypes.Structure):
+    _fields_ = [
+        ("data", ctypes.c_uint8),
+    ]
+
+
+def system_is_secured() -> bool:
+    fused_part = glob.glob("/sys/bus/pci/drivers/ccp/**/fused_part")[0]
+    if os.path.exists(fused_part):
+        with open(fused_part, "r") as r:
+            return int(r.read()) == 1
+    return True
+
+
+class DynamicBoostControlTest(unittest.TestCase):
+    def __init__(self, data) -> None:
+        self.d = None
+        self.signature = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
+        self.uid = "1111111111111111"
+        super().__init__(data)
+
+    def setUp(self) -> None:
+        self.d = open(DEVICE_NODE)
+        return super().setUp()
+
+    def tearDown(self) -> None:
+        if self.d:
+            self.d.close()
+        return super().tearDown()
+
+
+class TestUnsupportedSystem(DynamicBoostControlTest):
+    def setUp(self) -> None:
+        if os.path.exists(DEVICE_NODE):
+            self.skipTest("system is supported")
+        with self.assertRaises(FileNotFoundError) as error:
+            super().setUp()
+        self.assertEqual(error.exception.errno, 2)
+
+    def test_unauthenticated_nonce(self) -> None:
+        """fetch unauthenticated nonce"""
+        with self.assertRaises(ValueError) as error:
+            get_nonce(self.d, None)
+
+
+class TestInvalidIoctls(DynamicBoostControlTest):
+    def __init__(self, data) -> None:
+        self.data = invalid_param()
+        self.data.data = 1
+        super().__init__(data)
+
+    def setUp(self) -> None:
+        if not os.path.exists(DEVICE_NODE):
+            self.skipTest("system is unsupported")
+        return super().setUp()
+
+    def test_invalid_nonce_ioctl(self) -> None:
+        """tries to call get_nonce ioctl with invalid data structures"""
+
+        # 0x1 (get nonce), and invalid data
+        INVALID1 = IOWR(ord("D"), 0x01, invalid_param)
+        with self.assertRaises(OSError) as error:
+            fcntl.ioctl(self.d, INVALID1, self.data, True)
+        self.assertEqual(error.exception.errno, 22)
+
+    def test_invalid_setuid_ioctl(self) -> None:
+        """tries to call set_uid ioctl with invalid data structures"""
+
+        # 0x2 (set uid), and invalid data
+        INVALID2 = IOW(ord("D"), 0x02, invalid_param)
+        with self.assertRaises(OSError) as error:
+            fcntl.ioctl(self.d, INVALID2, self.data, True)
+        self.assertEqual(error.exception.errno, 22)
+
+    def test_invalid_setuid_rw_ioctl(self) -> None:
+        """tries to call set_uid ioctl with invalid data structures"""
+
+        # 0x2 as RW (set uid), and invalid data
+        INVALID3 = IOWR(ord("D"), 0x02, invalid_param)
+        with self.assertRaises(OSError) as error:
+            fcntl.ioctl(self.d, INVALID3, self.data, True)
+        self.assertEqual(error.exception.errno, 22)
+
+    def test_invalid_param_ioctl(self) -> None:
+        """tries to call param ioctl with invalid data structures"""
+        # 0x3 (param), and invalid data
+        INVALID4 = IOWR(ord("D"), 0x03, invalid_param)
+        with self.assertRaises(OSError) as error:
+            fcntl.ioctl(self.d, INVALID4, self.data, True)
+        self.assertEqual(error.exception.errno, 22)
+
+    def test_invalid_call_ioctl(self) -> None:
+        """tries to call the DBC ioctl with invalid data structures"""
+        # 0x4, and invalid data
+        INVALID5 = IOWR(ord("D"), 0x04, invalid_param)
+        with self.assertRaises(OSError) as error:
+            fcntl.ioctl(self.d, INVALID5, self.data, True)
+        self.assertEqual(error.exception.errno, 22)
+
+
+class TestInvalidSignature(DynamicBoostControlTest):
+    def setUp(self) -> None:
+        if not os.path.exists(DEVICE_NODE):
+            self.skipTest("system is unsupported")
+        if not system_is_secured():
+            self.skipTest("system is unfused")
+        return super().setUp()
+
+    def test_unauthenticated_nonce(self) -> None:
+        """fetch unauthenticated nonce"""
+        get_nonce(self.d, None)
+
+    def test_multiple_unauthenticated_nonce(self) -> None:
+        """ensure state machine always returns nonce"""
+        for count in range(0, 2):
+            get_nonce(self.d, None)
+
+    def test_authenticated_nonce(self) -> None:
+        """fetch authenticated nonce"""
+        with self.assertRaises(OSError) as error:
+            get_nonce(self.d, self.signature)
+        self.assertEqual(error.exception.errno, 1)
+
+    def test_set_uid(self) -> None:
+        """set uid"""
+        with self.assertRaises(OSError) as error:
+            set_uid(self.d, self.uid, self.signature)
+        self.assertEqual(error.exception.errno, 1)
+
+    def test_get_param(self) -> None:
+        """fetch a parameter"""
+        with self.assertRaises(OSError) as error:
+            process_param(self.d, PARAM_GET_SOC_PWR_CUR, self.signature)
+        self.assertEqual(error.exception.errno, 1)
+
+    def test_set_param(self) -> None:
+        """set a parameter"""
+        with self.assertRaises(OSError) as error:
+            process_param(self.d, PARAM_SET_PWR_CAP, self.signature, 1000)
+        self.assertEqual(error.exception.errno, 1)
+
+
+class TestUnFusedSystem(DynamicBoostControlTest):
+    def setup_identity(self) -> None:
+        """sets up the identity of the caller"""
+        # if already authenticated these may fail
+        try:
+            get_nonce(self.d, None)
+        except PermissionError:
+            pass
+        try:
+            set_uid(self.d, self.uid, self.signature)
+        except BlockingIOError:
+            pass
+        try:
+            get_nonce(self.d, self.signature)
+        except PermissionError:
+            pass
+
+    def setUp(self) -> None:
+        if not os.path.exists(DEVICE_NODE):
+            self.skipTest("system is unsupported")
+        if system_is_secured():
+            self.skipTest("system is fused")
+        super().setUp()
+        self.setup_identity()
+        time.sleep(SET_DELAY)
+
+    def test_get_valid_param(self) -> None:
+        """fetch all possible parameters"""
+        # SOC power
+        soc_power_max = process_param(self.d, PARAM_GET_SOC_PWR_MAX, self.signature)
+        soc_power_min = process_param(self.d, PARAM_GET_SOC_PWR_MIN, self.signature)
+        self.assertGreater(soc_power_max.parameter, soc_power_min.parameter)
+
+        # fmax
+        fmax_max = process_param(self.d, PARAM_GET_FMAX_MAX, self.signature)
+        fmax_min = process_param(self.d, PARAM_GET_FMAX_MIN, self.signature)
+        self.assertGreater(fmax_max.parameter, fmax_min.parameter)
+
+        # cap values
+        keys = {
+            "fmax-cap": PARAM_GET_FMAX_CAP,
+            "power-cap": PARAM_GET_PWR_CAP,
+            "current-temp": PARAM_GET_CURR_TEMP,
+            "soc-power-cur": PARAM_GET_SOC_PWR_CUR,
+        }
+        for k in keys:
+            result = process_param(self.d, keys[k], self.signature)
+            self.assertGreater(result.parameter, 0)
+
+    def test_get_invalid_param(self) -> None:
+        """fetch an invalid parameter"""
+        try:
+            set_uid(self.d, self.uid, self.signature)
+        except OSError:
+            pass
+        with self.assertRaises(OSError) as error:
+            process_param(self.d, (0xF,), self.signature)
+        self.assertEqual(error.exception.errno, 22)
+
+    def test_set_fmax(self) -> None:
+        """get/set fmax limit"""
+        # fetch current
+        original = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
+
+        # set the fmax
+        target = original.parameter - 100
+        process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, target)
+        time.sleep(SET_DELAY)
+        new = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
+        self.assertEqual(new.parameter, target)
+
+        # revert back to current
+        process_param(self.d, PARAM_SET_FMAX_CAP, self.signature, original.parameter)
+        time.sleep(SET_DELAY)
+        cur = process_param(self.d, PARAM_GET_FMAX_CAP, self.signature)
+        self.assertEqual(cur.parameter, original.parameter)
+
+    def test_set_power_cap(self) -> None:
+        """get/set power cap limit"""
+        # fetch current
+        original = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
+
+        # set the fmax
+        target = original.parameter - 10
+        process_param(self.d, PARAM_SET_PWR_CAP, self.signature, target)
+        time.sleep(SET_DELAY)
+        new = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
+        self.assertEqual(new.parameter, target)
+
+        # revert back to current
+        process_param(self.d, PARAM_SET_PWR_CAP, self.signature, original.parameter)
+        time.sleep(SET_DELAY)
+        cur = process_param(self.d, PARAM_GET_PWR_CAP, self.signature)
+        self.assertEqual(cur.parameter, original.parameter)
+
+    def test_set_3d_graphics_mode(self) -> None:
+        """set/get 3d graphics mode"""
+        # these aren't currently implemented but may be some day
+        # they are *expected* to fail
+        with self.assertRaises(OSError) as error:
+            process_param(self.d, PARAM_GET_GFX_MODE, self.signature)
+        self.assertEqual(error.exception.errno, 2)
+
+        time.sleep(SET_DELAY)
+
+        with self.assertRaises(OSError) as error:
+            process_param(self.d, PARAM_SET_GFX_MODE, self.signature, 1)
+        self.assertEqual(error.exception.errno, 2)
+
+
+if __name__ == "__main__":
+    unittest.main()