1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel IXP4xx NPE-C crypto driver
5 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
8 #include <linux/platform_device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmapool.h>
11 #include <linux/crypto.h>
12 #include <linux/kernel.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/interrupt.h>
15 #include <linux/spinlock.h>
16 #include <linux/gfp.h>
17 #include <linux/module.h>
20 #include <crypto/ctr.h>
21 #include <crypto/internal/des.h>
22 #include <crypto/aes.h>
23 #include <crypto/hmac.h>
24 #include <crypto/sha1.h>
25 #include <crypto/algapi.h>
26 #include <crypto/internal/aead.h>
27 #include <crypto/internal/skcipher.h>
28 #include <crypto/authenc.h>
29 #include <crypto/scatterwalk.h>
31 #include <linux/soc/ixp4xx/npe.h>
32 #include <linux/soc/ixp4xx/qmgr.h>
34 /* Intermittent includes, delete this after v5.14-rc1 */
35 #include <linux/soc/ixp4xx/cpu.h>
36 #include <mach/ixp4xx-regs.h>
40 /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
41 #define NPE_CTX_LEN 80
42 #define AES_BLOCK128 16
44 #define NPE_OP_HASH_VERIFY 0x01
45 #define NPE_OP_CCM_ENABLE 0x04
46 #define NPE_OP_CRYPT_ENABLE 0x08
47 #define NPE_OP_HASH_ENABLE 0x10
48 #define NPE_OP_NOT_IN_PLACE 0x20
49 #define NPE_OP_HMAC_DISABLE 0x40
50 #define NPE_OP_CRYPT_ENCRYPT 0x80
52 #define NPE_OP_CCM_GEN_MIC 0xcc
53 #define NPE_OP_HASH_GEN_ICV 0x50
54 #define NPE_OP_ENC_GEN_KEY 0xc9
56 #define MOD_ECB 0x0000
57 #define MOD_CTR 0x1000
58 #define MOD_CBC_ENC 0x2000
59 #define MOD_CBC_DEC 0x3000
60 #define MOD_CCM_ENC 0x4000
61 #define MOD_CCM_DEC 0x5000
67 #define CIPH_DECR 0x0000
68 #define CIPH_ENCR 0x0400
70 #define MOD_DES 0x0000
71 #define MOD_TDEA2 0x0100
72 #define MOD_3DES 0x0200
73 #define MOD_AES 0x0800
74 #define MOD_AES128 (0x0800 | KEYLEN_128)
75 #define MOD_AES192 (0x0900 | KEYLEN_192)
76 #define MOD_AES256 (0x0a00 | KEYLEN_256)
80 /* Space for registering when the first
81 * NPE_QLEN crypt_ctl are busy */
82 #define NPE_QLEN_TOTAL 64
84 #define CTL_FLAG_UNUSED 0x0000
85 #define CTL_FLAG_USED 0x1000
86 #define CTL_FLAG_PERFORM_ABLK 0x0001
87 #define CTL_FLAG_GEN_ICV 0x0002
88 #define CTL_FLAG_GEN_REVAES 0x0004
89 #define CTL_FLAG_PERFORM_AEAD 0x0008
90 #define CTL_FLAG_MASK 0x000f
92 #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
94 #define MD5_DIGEST_SIZE 16
105 dma_addr_t phys_addr;
107 struct buffer_desc *next;
108 enum dma_data_direction dir;
113 u8 mode; /* NPE_OP_* operation mode */
119 u8 mode; /* NPE_OP_* operation mode */
121 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
122 dma_addr_t icv_rev_aes; /* icv or rev aes */
126 u16 auth_offs; /* Authentication start offset */
127 u16 auth_len; /* Authentication data length */
128 u16 crypt_offs; /* Cryption start offset */
129 u16 crypt_len; /* Cryption data length */
131 u16 auth_len; /* Authentication data length */
132 u16 auth_offs; /* Authentication start offset */
133 u16 crypt_len; /* Cryption data length */
134 u16 crypt_offs; /* Cryption start offset */
136 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
137 u32 crypto_ctx; /* NPE Crypto Param structure address */
139 /* Used by Host: 4*4 bytes*/
140 unsigned int ctl_flags;
142 struct skcipher_request *ablk_req;
143 struct aead_request *aead_req;
144 struct crypto_tfm *tfm;
146 struct buffer_desc *regist_buf;
151 struct buffer_desc *src;
152 struct buffer_desc *dst;
155 struct skcipher_request fallback_req; // keep at the end
159 struct buffer_desc *src;
160 struct buffer_desc *dst;
161 struct scatterlist ivlist;
162 /* used when the hmac is not on one sg entry */
167 struct ix_hash_algo {
173 unsigned char *npe_ctx;
174 dma_addr_t npe_ctx_phys;
180 struct ix_sa_dir encrypt;
181 struct ix_sa_dir decrypt;
183 u8 authkey[MAX_KEYLEN];
185 u8 enckey[MAX_KEYLEN];
187 u8 nonce[CTR_RFC3686_NONCE_SIZE];
189 atomic_t configuring;
190 struct completion completion;
191 struct crypto_skcipher *fallback_tfm;
195 struct skcipher_alg crypto;
196 const struct ix_hash_algo *hash;
203 struct ixp_aead_alg {
204 struct aead_alg crypto;
205 const struct ix_hash_algo *hash;
212 static const struct ix_hash_algo hash_alg_md5 = {
213 .cfgword = 0xAA010004,
214 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
215 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
218 static const struct ix_hash_algo hash_alg_sha1 = {
219 .cfgword = 0x00000005,
220 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
221 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
224 static struct npe *npe_c;
226 static unsigned int send_qid;
227 static unsigned int recv_qid;
228 static struct dma_pool *buffer_pool;
229 static struct dma_pool *ctx_pool;
231 static struct crypt_ctl *crypt_virt;
232 static dma_addr_t crypt_phys;
234 static int support_aes = 1;
236 static struct platform_device *pdev;
238 static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
240 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
243 static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
245 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
248 static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
250 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
253 static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
255 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
258 static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
260 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
263 static int setup_crypt_desc(void)
265 struct device *dev = &pdev->dev;
267 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
268 crypt_virt = dma_alloc_coherent(dev,
269 NPE_QLEN * sizeof(struct crypt_ctl),
270 &crypt_phys, GFP_ATOMIC);
276 static DEFINE_SPINLOCK(desc_lock);
277 static struct crypt_ctl *get_crypt_desc(void)
283 spin_lock_irqsave(&desc_lock, flags);
285 if (unlikely(!crypt_virt))
287 if (unlikely(!crypt_virt)) {
288 spin_unlock_irqrestore(&desc_lock, flags);
292 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
293 if (++idx >= NPE_QLEN)
295 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
296 spin_unlock_irqrestore(&desc_lock, flags);
297 return crypt_virt + i;
299 spin_unlock_irqrestore(&desc_lock, flags);
304 static DEFINE_SPINLOCK(emerg_lock);
305 static struct crypt_ctl *get_crypt_desc_emerg(void)
308 static int idx = NPE_QLEN;
309 struct crypt_ctl *desc;
312 desc = get_crypt_desc();
315 if (unlikely(!crypt_virt))
318 spin_lock_irqsave(&emerg_lock, flags);
320 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
321 if (++idx >= NPE_QLEN_TOTAL)
323 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
324 spin_unlock_irqrestore(&emerg_lock, flags);
325 return crypt_virt + i;
327 spin_unlock_irqrestore(&emerg_lock, flags);
332 static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
336 struct buffer_desc *buf1;
340 phys1 = buf->phys_next;
341 dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
342 dma_pool_free(buffer_pool, buf, phys);
348 static struct tasklet_struct crypto_done_tasklet;
350 static void finish_scattered_hmac(struct crypt_ctl *crypt)
352 struct aead_request *req = crypt->data.aead_req;
353 struct aead_ctx *req_ctx = aead_request_ctx(req);
354 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
355 int authsize = crypto_aead_authsize(tfm);
356 int decryptlen = req->assoclen + req->cryptlen - authsize;
358 if (req_ctx->encrypt) {
359 scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
360 decryptlen, authsize, 1);
362 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
365 static void one_packet(dma_addr_t phys)
367 struct device *dev = &pdev->dev;
368 struct crypt_ctl *crypt;
372 failed = phys & 0x1 ? -EBADMSG : 0;
374 crypt = crypt_phys2virt(phys);
376 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
377 case CTL_FLAG_PERFORM_AEAD: {
378 struct aead_request *req = crypt->data.aead_req;
379 struct aead_ctx *req_ctx = aead_request_ctx(req);
381 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
382 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
383 if (req_ctx->hmac_virt)
384 finish_scattered_hmac(crypt);
386 req->base.complete(&req->base, failed);
389 case CTL_FLAG_PERFORM_ABLK: {
390 struct skcipher_request *req = crypt->data.ablk_req;
391 struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
392 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
393 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
397 offset = req->cryptlen - ivsize;
398 if (req_ctx->encrypt) {
399 scatterwalk_map_and_copy(req->iv, req->dst,
402 memcpy(req->iv, req_ctx->iv, ivsize);
403 memzero_explicit(req_ctx->iv, ivsize);
408 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
410 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
411 req->base.complete(&req->base, failed);
414 case CTL_FLAG_GEN_ICV:
415 ctx = crypto_tfm_ctx(crypt->data.tfm);
416 dma_pool_free(ctx_pool, crypt->regist_ptr,
417 crypt->regist_buf->phys_addr);
418 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
419 if (atomic_dec_and_test(&ctx->configuring))
420 complete(&ctx->completion);
422 case CTL_FLAG_GEN_REVAES:
423 ctx = crypto_tfm_ctx(crypt->data.tfm);
424 *(u32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
425 if (atomic_dec_and_test(&ctx->configuring))
426 complete(&ctx->completion);
431 crypt->ctl_flags = CTL_FLAG_UNUSED;
434 static void irqhandler(void *_unused)
436 tasklet_schedule(&crypto_done_tasklet);
439 static void crypto_done_action(unsigned long arg)
443 for (i = 0; i < 4; i++) {
444 dma_addr_t phys = qmgr_get_entry(recv_qid);
449 tasklet_schedule(&crypto_done_tasklet);
452 static int init_ixp_crypto(struct device *dev)
454 struct device_node *np = dev->of_node;
455 u32 msg[2] = { 0, 0 };
459 dev_info(dev, "probing...\n");
461 /* Locate the NPE and queue manager to use from device tree */
462 if (IS_ENABLED(CONFIG_OF) && np) {
463 struct of_phandle_args queue_spec;
464 struct of_phandle_args npe_spec;
466 ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
469 dev_err(dev, "no NPE engine specified\n");
472 npe_id = npe_spec.args[0];
474 ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
477 dev_err(dev, "no rx queue phandle\n");
480 recv_qid = queue_spec.args[0];
482 ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
485 dev_err(dev, "no txready queue phandle\n");
488 send_qid = queue_spec.args[0];
491 * Hardcoded engine when using platform data, this goes away
492 * when we switch to using DT only.
499 npe_c = npe_request(npe_id);
503 if (!npe_running(npe_c)) {
504 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
507 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
510 if (npe_send_message(npe_c, msg, "STATUS_MSG"))
513 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
517 switch ((msg[1] >> 16) & 0xff) {
519 dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
527 dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
531 /* buffer_pool will also be used to sometimes store the hmac,
532 * so assure it is large enough
534 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
535 buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
541 ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
545 ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
546 "ixp_crypto:out", NULL);
549 ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
550 "ixp_crypto:in", NULL);
552 qmgr_release_queue(send_qid);
555 qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
556 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
558 qmgr_enable_irq(recv_qid);
562 dev_err(dev, "%s not responding\n", npe_name(npe_c));
565 dma_pool_destroy(ctx_pool);
566 dma_pool_destroy(buffer_pool);
572 static void release_ixp_crypto(struct device *dev)
574 qmgr_disable_irq(recv_qid);
575 tasklet_kill(&crypto_done_tasklet);
577 qmgr_release_queue(send_qid);
578 qmgr_release_queue(recv_qid);
580 dma_pool_destroy(ctx_pool);
581 dma_pool_destroy(buffer_pool);
586 dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
587 crypt_virt, crypt_phys);
590 static void reset_sa_dir(struct ix_sa_dir *dir)
592 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
593 dir->npe_ctx_idx = 0;
597 static int init_sa_dir(struct ix_sa_dir *dir)
599 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
607 static void free_sa_dir(struct ix_sa_dir *dir)
609 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
610 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
613 static int init_tfm(struct crypto_tfm *tfm)
615 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
618 atomic_set(&ctx->configuring, 0);
619 ret = init_sa_dir(&ctx->encrypt);
622 ret = init_sa_dir(&ctx->decrypt);
624 free_sa_dir(&ctx->encrypt);
629 static int init_tfm_ablk(struct crypto_skcipher *tfm)
631 struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
632 struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
633 const char *name = crypto_tfm_alg_name(ctfm);
635 ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
636 if (IS_ERR(ctx->fallback_tfm)) {
637 pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
638 name, PTR_ERR(ctx->fallback_tfm));
639 return PTR_ERR(ctx->fallback_tfm);
642 pr_info("Fallback for %s is %s\n",
643 crypto_tfm_alg_driver_name(&tfm->base),
644 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
647 crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
648 return init_tfm(crypto_skcipher_tfm(tfm));
651 static int init_tfm_aead(struct crypto_aead *tfm)
653 crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
654 return init_tfm(crypto_aead_tfm(tfm));
657 static void exit_tfm(struct crypto_tfm *tfm)
659 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
661 free_sa_dir(&ctx->encrypt);
662 free_sa_dir(&ctx->decrypt);
665 static void exit_tfm_ablk(struct crypto_skcipher *tfm)
667 struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
668 struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
670 crypto_free_skcipher(ctx->fallback_tfm);
671 exit_tfm(crypto_skcipher_tfm(tfm));
674 static void exit_tfm_aead(struct crypto_aead *tfm)
676 exit_tfm(crypto_aead_tfm(tfm));
679 static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
680 int init_len, u32 ctx_addr, const u8 *key,
683 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
684 struct crypt_ctl *crypt;
685 struct buffer_desc *buf;
688 dma_addr_t pad_phys, buf_phys;
690 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
691 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
694 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
696 dma_pool_free(ctx_pool, pad, pad_phys);
699 crypt = get_crypt_desc_emerg();
701 dma_pool_free(ctx_pool, pad, pad_phys);
702 dma_pool_free(buffer_pool, buf, buf_phys);
706 memcpy(pad, key, key_len);
707 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
708 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
711 crypt->data.tfm = tfm;
712 crypt->regist_ptr = pad;
713 crypt->regist_buf = buf;
715 crypt->auth_offs = 0;
716 crypt->auth_len = HMAC_PAD_BLOCKLEN;
717 crypt->crypto_ctx = ctx_addr;
718 crypt->src_buf = buf_phys;
719 crypt->icv_rev_aes = target;
720 crypt->mode = NPE_OP_HASH_GEN_ICV;
721 crypt->init_len = init_len;
722 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
725 buf->buf_len = HMAC_PAD_BLOCKLEN;
727 buf->phys_addr = pad_phys;
729 atomic_inc(&ctx->configuring);
730 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
731 BUG_ON(qmgr_stat_overflow(send_qid));
735 static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
736 const u8 *key, int key_len, unsigned int digest_len)
738 u32 itarget, otarget, npe_ctx_addr;
739 unsigned char *cinfo;
740 int init_len, ret = 0;
742 struct ix_sa_dir *dir;
743 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
744 const struct ix_hash_algo *algo;
746 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
747 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
750 /* write cfg word to cryptinfo */
751 cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
753 cfgword ^= 0xAA000000; /* change the "byte swap" flags */
755 *(u32 *)cinfo = cpu_to_be32(cfgword);
756 cinfo += sizeof(cfgword);
758 /* write ICV to cryptinfo */
759 memcpy(cinfo, algo->icv, digest_len);
762 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
763 + sizeof(algo->cfgword);
764 otarget = itarget + digest_len;
765 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
766 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
768 dir->npe_ctx_idx += init_len;
769 dir->npe_mode |= NPE_OP_HASH_ENABLE;
772 dir->npe_mode |= NPE_OP_HASH_VERIFY;
774 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
775 init_len, npe_ctx_addr, key, key_len);
778 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
779 init_len, npe_ctx_addr, key, key_len);
782 static int gen_rev_aes_key(struct crypto_tfm *tfm)
784 struct crypt_ctl *crypt;
785 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
786 struct ix_sa_dir *dir = &ctx->decrypt;
788 crypt = get_crypt_desc_emerg();
792 *(u32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
794 crypt->data.tfm = tfm;
795 crypt->crypt_offs = 0;
796 crypt->crypt_len = AES_BLOCK128;
798 crypt->crypto_ctx = dir->npe_ctx_phys;
799 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
800 crypt->mode = NPE_OP_ENC_GEN_KEY;
801 crypt->init_len = dir->npe_ctx_idx;
802 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
804 atomic_inc(&ctx->configuring);
805 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
806 BUG_ON(qmgr_stat_overflow(send_qid));
810 static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
816 struct ix_sa_dir *dir;
817 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
820 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
821 cinfo = dir->npe_ctx;
824 cipher_cfg = cipher_cfg_enc(tfm);
825 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
827 cipher_cfg = cipher_cfg_dec(tfm);
829 if (cipher_cfg & MOD_AES) {
832 keylen_cfg = MOD_AES128;
835 keylen_cfg = MOD_AES192;
838 keylen_cfg = MOD_AES256;
843 cipher_cfg |= keylen_cfg;
845 err = crypto_des_verify_key(tfm, key);
849 /* write cfg word to cryptinfo */
850 *(u32 *)cinfo = cpu_to_be32(cipher_cfg);
851 cinfo += sizeof(cipher_cfg);
853 /* write cipher key to cryptinfo */
854 memcpy(cinfo, key, key_len);
855 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
856 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
857 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
858 key_len = DES3_EDE_KEY_SIZE;
860 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
861 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
862 if ((cipher_cfg & MOD_AES) && !encrypt)
863 return gen_rev_aes_key(tfm);
868 static struct buffer_desc *chainup_buffers(struct device *dev,
869 struct scatterlist *sg, unsigned int nbytes,
870 struct buffer_desc *buf, gfp_t flags,
871 enum dma_data_direction dir)
873 for (; nbytes > 0; sg = sg_next(sg)) {
874 unsigned int len = min(nbytes, sg->length);
875 struct buffer_desc *next_buf;
876 dma_addr_t next_buf_phys;
881 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
886 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
887 buf->next = next_buf;
888 buf->phys_next = next_buf_phys;
891 buf->phys_addr = sg_dma_address(sg);
900 static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
901 unsigned int key_len)
903 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
906 init_completion(&ctx->completion);
907 atomic_inc(&ctx->configuring);
909 reset_sa_dir(&ctx->encrypt);
910 reset_sa_dir(&ctx->decrypt);
912 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
913 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
915 ret = setup_cipher(&tfm->base, 0, key, key_len);
918 ret = setup_cipher(&tfm->base, 1, key, key_len);
920 if (!atomic_dec_and_test(&ctx->configuring))
921 wait_for_completion(&ctx->completion);
924 crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
925 crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
927 return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
930 static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
931 unsigned int key_len)
933 return verify_skcipher_des3_key(tfm, key) ?:
934 ablk_setkey(tfm, key, key_len);
937 static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
938 unsigned int key_len)
940 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
942 /* the nonce is stored in bytes at end of key */
943 if (key_len < CTR_RFC3686_NONCE_SIZE)
946 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
947 CTR_RFC3686_NONCE_SIZE);
949 key_len -= CTR_RFC3686_NONCE_SIZE;
950 return ablk_setkey(tfm, key, key_len);
953 static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
955 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
956 struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
957 struct ablk_ctx *rctx = skcipher_request_ctx(areq);
960 skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
961 skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
962 areq->base.complete, areq->base.data);
963 skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
964 areq->cryptlen, areq->iv);
966 err = crypto_skcipher_encrypt(&rctx->fallback_req);
968 err = crypto_skcipher_decrypt(&rctx->fallback_req);
972 static int ablk_perform(struct skcipher_request *req, int encrypt)
974 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
975 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
976 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
977 struct ix_sa_dir *dir;
978 struct crypt_ctl *crypt;
979 unsigned int nbytes = req->cryptlen;
980 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
981 struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
982 struct buffer_desc src_hook;
983 struct device *dev = &pdev->dev;
985 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
986 GFP_KERNEL : GFP_ATOMIC;
988 if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
989 return ixp4xx_cipher_fallback(req, encrypt);
991 if (qmgr_stat_full(send_qid))
993 if (atomic_read(&ctx->configuring))
996 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
997 req_ctx->encrypt = encrypt;
999 crypt = get_crypt_desc();
1003 crypt->data.ablk_req = req;
1004 crypt->crypto_ctx = dir->npe_ctx_phys;
1005 crypt->mode = dir->npe_mode;
1006 crypt->init_len = dir->npe_ctx_idx;
1008 crypt->crypt_offs = 0;
1009 crypt->crypt_len = nbytes;
1011 BUG_ON(ivsize && !req->iv);
1012 memcpy(crypt->iv, req->iv, ivsize);
1013 if (ivsize > 0 && !encrypt) {
1014 offset = req->cryptlen - ivsize;
1015 scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
1017 if (req->src != req->dst) {
1018 struct buffer_desc dst_hook;
1020 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1021 /* This was never tested by Intel
1022 * for more than one dst buffer, I think. */
1023 req_ctx->dst = NULL;
1024 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
1025 flags, DMA_FROM_DEVICE))
1027 src_direction = DMA_TO_DEVICE;
1028 req_ctx->dst = dst_hook.next;
1029 crypt->dst_buf = dst_hook.phys_next;
1031 req_ctx->dst = NULL;
1033 req_ctx->src = NULL;
1034 if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
1038 req_ctx->src = src_hook.next;
1039 crypt->src_buf = src_hook.phys_next;
1040 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
1041 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
1042 BUG_ON(qmgr_stat_overflow(send_qid));
1043 return -EINPROGRESS;
1046 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1048 if (req->src != req->dst)
1049 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1051 crypt->ctl_flags = CTL_FLAG_UNUSED;
1055 static int ablk_encrypt(struct skcipher_request *req)
1057 return ablk_perform(req, 1);
1060 static int ablk_decrypt(struct skcipher_request *req)
1062 return ablk_perform(req, 0);
1065 static int ablk_rfc3686_crypt(struct skcipher_request *req)
1067 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1068 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
1069 u8 iv[CTR_RFC3686_BLOCK_SIZE];
1073 /* set up counter block */
1074 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
1075 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
1077 /* initialize counter portion of counter block */
1078 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
1082 ret = ablk_perform(req, 1);
1087 static int aead_perform(struct aead_request *req, int encrypt,
1088 int cryptoffset, int eff_cryptlen, u8 *iv)
1090 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1091 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1092 unsigned int ivsize = crypto_aead_ivsize(tfm);
1093 unsigned int authsize = crypto_aead_authsize(tfm);
1094 struct ix_sa_dir *dir;
1095 struct crypt_ctl *crypt;
1096 unsigned int cryptlen;
1097 struct buffer_desc *buf, src_hook;
1098 struct aead_ctx *req_ctx = aead_request_ctx(req);
1099 struct device *dev = &pdev->dev;
1100 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1101 GFP_KERNEL : GFP_ATOMIC;
1102 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
1103 unsigned int lastlen;
1105 if (qmgr_stat_full(send_qid))
1107 if (atomic_read(&ctx->configuring))
1111 dir = &ctx->encrypt;
1112 cryptlen = req->cryptlen;
1114 dir = &ctx->decrypt;
1115 /* req->cryptlen includes the authsize when decrypting */
1116 cryptlen = req->cryptlen - authsize;
1117 eff_cryptlen -= authsize;
1119 crypt = get_crypt_desc();
1123 crypt->data.aead_req = req;
1124 crypt->crypto_ctx = dir->npe_ctx_phys;
1125 crypt->mode = dir->npe_mode;
1126 crypt->init_len = dir->npe_ctx_idx;
1128 crypt->crypt_offs = cryptoffset;
1129 crypt->crypt_len = eff_cryptlen;
1131 crypt->auth_offs = 0;
1132 crypt->auth_len = req->assoclen + cryptlen;
1133 BUG_ON(ivsize && !req->iv);
1134 memcpy(crypt->iv, req->iv, ivsize);
1136 buf = chainup_buffers(dev, req->src, crypt->auth_len,
1137 &src_hook, flags, src_direction);
1138 req_ctx->src = src_hook.next;
1139 crypt->src_buf = src_hook.phys_next;
1143 lastlen = buf->buf_len;
1144 if (lastlen >= authsize)
1145 crypt->icv_rev_aes = buf->phys_addr +
1146 buf->buf_len - authsize;
1148 req_ctx->dst = NULL;
1150 if (req->src != req->dst) {
1151 struct buffer_desc dst_hook;
1153 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1154 src_direction = DMA_TO_DEVICE;
1156 buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1157 &dst_hook, flags, DMA_FROM_DEVICE);
1158 req_ctx->dst = dst_hook.next;
1159 crypt->dst_buf = dst_hook.phys_next;
1165 lastlen = buf->buf_len;
1166 if (lastlen >= authsize)
1167 crypt->icv_rev_aes = buf->phys_addr +
1168 buf->buf_len - authsize;
1172 if (unlikely(lastlen < authsize)) {
1173 /* The 12 hmac bytes are scattered,
1174 * we need to copy them into a safe buffer */
1175 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1176 &crypt->icv_rev_aes);
1177 if (unlikely(!req_ctx->hmac_virt))
1180 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1181 req->src, cryptlen, authsize, 0);
1183 req_ctx->encrypt = encrypt;
1185 req_ctx->hmac_virt = NULL;
1188 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1189 qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
1190 BUG_ON(qmgr_stat_overflow(send_qid));
1191 return -EINPROGRESS;
1194 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1196 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1197 crypt->ctl_flags = CTL_FLAG_UNUSED;
1201 static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1203 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1204 unsigned int digest_len = crypto_aead_maxauthsize(tfm);
1207 if (!ctx->enckey_len && !ctx->authkey_len)
1209 init_completion(&ctx->completion);
1210 atomic_inc(&ctx->configuring);
1212 reset_sa_dir(&ctx->encrypt);
1213 reset_sa_dir(&ctx->decrypt);
1215 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1218 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1221 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1222 ctx->authkey_len, digest_len);
1225 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1226 ctx->authkey_len, digest_len);
1228 if (!atomic_dec_and_test(&ctx->configuring))
1229 wait_for_completion(&ctx->completion);
1233 static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1235 int max = crypto_aead_maxauthsize(tfm) >> 2;
1237 if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
1239 return aead_setup(tfm, authsize);
1242 static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1243 unsigned int keylen)
1245 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1246 struct crypto_authenc_keys keys;
1248 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1251 if (keys.authkeylen > sizeof(ctx->authkey))
1254 if (keys.enckeylen > sizeof(ctx->enckey))
1257 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1258 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1259 ctx->authkey_len = keys.authkeylen;
1260 ctx->enckey_len = keys.enckeylen;
1262 memzero_explicit(&keys, sizeof(keys));
1263 return aead_setup(tfm, crypto_aead_authsize(tfm));
1265 memzero_explicit(&keys, sizeof(keys));
1269 static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1270 unsigned int keylen)
1272 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1273 struct crypto_authenc_keys keys;
1276 err = crypto_authenc_extractkeys(&keys, key, keylen);
1281 if (keys.authkeylen > sizeof(ctx->authkey))
1284 err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
1288 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1289 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1290 ctx->authkey_len = keys.authkeylen;
1291 ctx->enckey_len = keys.enckeylen;
1293 memzero_explicit(&keys, sizeof(keys));
1294 return aead_setup(tfm, crypto_aead_authsize(tfm));
1296 memzero_explicit(&keys, sizeof(keys));
1300 static int aead_encrypt(struct aead_request *req)
1302 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1305 static int aead_decrypt(struct aead_request *req)
1307 return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1310 static struct ixp_alg ixp4xx_algos[] = {
1313 .base.cra_name = "cbc(des)",
1314 .base.cra_blocksize = DES_BLOCK_SIZE,
1316 .min_keysize = DES_KEY_SIZE,
1317 .max_keysize = DES_KEY_SIZE,
1318 .ivsize = DES_BLOCK_SIZE,
1320 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1321 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1325 .base.cra_name = "ecb(des)",
1326 .base.cra_blocksize = DES_BLOCK_SIZE,
1327 .min_keysize = DES_KEY_SIZE,
1328 .max_keysize = DES_KEY_SIZE,
1330 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1331 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1334 .base.cra_name = "cbc(des3_ede)",
1335 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1337 .min_keysize = DES3_EDE_KEY_SIZE,
1338 .max_keysize = DES3_EDE_KEY_SIZE,
1339 .ivsize = DES3_EDE_BLOCK_SIZE,
1340 .setkey = ablk_des3_setkey,
1342 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1343 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1346 .base.cra_name = "ecb(des3_ede)",
1347 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1349 .min_keysize = DES3_EDE_KEY_SIZE,
1350 .max_keysize = DES3_EDE_KEY_SIZE,
1351 .setkey = ablk_des3_setkey,
1353 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1354 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1357 .base.cra_name = "cbc(aes)",
1358 .base.cra_blocksize = AES_BLOCK_SIZE,
1360 .min_keysize = AES_MIN_KEY_SIZE,
1361 .max_keysize = AES_MAX_KEY_SIZE,
1362 .ivsize = AES_BLOCK_SIZE,
1364 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1365 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1368 .base.cra_name = "ecb(aes)",
1369 .base.cra_blocksize = AES_BLOCK_SIZE,
1371 .min_keysize = AES_MIN_KEY_SIZE,
1372 .max_keysize = AES_MAX_KEY_SIZE,
1374 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1375 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1378 .base.cra_name = "ctr(aes)",
1379 .base.cra_blocksize = 1,
1381 .min_keysize = AES_MIN_KEY_SIZE,
1382 .max_keysize = AES_MAX_KEY_SIZE,
1383 .ivsize = AES_BLOCK_SIZE,
1385 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1386 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1389 .base.cra_name = "rfc3686(ctr(aes))",
1390 .base.cra_blocksize = 1,
1392 .min_keysize = AES_MIN_KEY_SIZE,
1393 .max_keysize = AES_MAX_KEY_SIZE,
1394 .ivsize = AES_BLOCK_SIZE,
1395 .setkey = ablk_rfc3686_setkey,
1396 .encrypt = ablk_rfc3686_crypt,
1397 .decrypt = ablk_rfc3686_crypt,
1399 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1400 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1403 static struct ixp_aead_alg ixp4xx_aeads[] = {
1407 .cra_name = "authenc(hmac(md5),cbc(des))",
1408 .cra_blocksize = DES_BLOCK_SIZE,
1410 .ivsize = DES_BLOCK_SIZE,
1411 .maxauthsize = MD5_DIGEST_SIZE,
1413 .hash = &hash_alg_md5,
1414 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1415 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1419 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1420 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1422 .ivsize = DES3_EDE_BLOCK_SIZE,
1423 .maxauthsize = MD5_DIGEST_SIZE,
1424 .setkey = des3_aead_setkey,
1426 .hash = &hash_alg_md5,
1427 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1428 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1432 .cra_name = "authenc(hmac(sha1),cbc(des))",
1433 .cra_blocksize = DES_BLOCK_SIZE,
1435 .ivsize = DES_BLOCK_SIZE,
1436 .maxauthsize = SHA1_DIGEST_SIZE,
1438 .hash = &hash_alg_sha1,
1439 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1440 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1444 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1445 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1447 .ivsize = DES3_EDE_BLOCK_SIZE,
1448 .maxauthsize = SHA1_DIGEST_SIZE,
1449 .setkey = des3_aead_setkey,
1451 .hash = &hash_alg_sha1,
1452 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1453 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1457 .cra_name = "authenc(hmac(md5),cbc(aes))",
1458 .cra_blocksize = AES_BLOCK_SIZE,
1460 .ivsize = AES_BLOCK_SIZE,
1461 .maxauthsize = MD5_DIGEST_SIZE,
1463 .hash = &hash_alg_md5,
1464 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1465 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1469 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1470 .cra_blocksize = AES_BLOCK_SIZE,
1472 .ivsize = AES_BLOCK_SIZE,
1473 .maxauthsize = SHA1_DIGEST_SIZE,
1475 .hash = &hash_alg_sha1,
1476 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1477 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1480 #define IXP_POSTFIX "-ixp4xx"
1482 static int ixp_crypto_probe(struct platform_device *_pdev)
1484 struct device *dev = &_pdev->dev;
1485 int num = ARRAY_SIZE(ixp4xx_algos);
1490 err = init_ixp_crypto(dev);
1494 for (i = 0; i < num; i++) {
1495 struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
1497 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1498 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1499 CRYPTO_MAX_ALG_NAME)
1501 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1505 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1507 CRYPTO_ALG_ALLOCATES_MEMORY |
1508 CRYPTO_ALG_NEED_FALLBACK;
1510 cra->setkey = ablk_setkey;
1512 cra->encrypt = ablk_encrypt;
1514 cra->decrypt = ablk_decrypt;
1515 cra->init = init_tfm_ablk;
1516 cra->exit = exit_tfm_ablk;
1518 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1519 cra->base.cra_module = THIS_MODULE;
1520 cra->base.cra_alignmask = 3;
1521 cra->base.cra_priority = 300;
1522 if (crypto_register_skcipher(cra))
1523 dev_err(&pdev->dev, "Failed to register '%s'\n",
1524 cra->base.cra_name);
1526 ixp4xx_algos[i].registered = 1;
1529 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1530 struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1532 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1533 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1534 CRYPTO_MAX_ALG_NAME)
1536 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1540 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1542 CRYPTO_ALG_ALLOCATES_MEMORY;
1543 cra->setkey = cra->setkey ?: aead_setkey;
1544 cra->setauthsize = aead_setauthsize;
1545 cra->encrypt = aead_encrypt;
1546 cra->decrypt = aead_decrypt;
1547 cra->init = init_tfm_aead;
1548 cra->exit = exit_tfm_aead;
1550 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1551 cra->base.cra_module = THIS_MODULE;
1552 cra->base.cra_alignmask = 3;
1553 cra->base.cra_priority = 300;
1555 if (crypto_register_aead(cra))
1556 dev_err(&pdev->dev, "Failed to register '%s'\n",
1557 cra->base.cra_driver_name);
1559 ixp4xx_aeads[i].registered = 1;
1564 static int ixp_crypto_remove(struct platform_device *pdev)
1566 int num = ARRAY_SIZE(ixp4xx_algos);
1569 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1570 if (ixp4xx_aeads[i].registered)
1571 crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1574 for (i = 0; i < num; i++) {
1575 if (ixp4xx_algos[i].registered)
1576 crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
1578 release_ixp_crypto(&pdev->dev);
1582 static const struct of_device_id ixp4xx_crypto_of_match[] = {
1584 .compatible = "intel,ixp4xx-crypto",
1589 static struct platform_driver ixp_crypto_driver = {
1590 .probe = ixp_crypto_probe,
1591 .remove = ixp_crypto_remove,
1593 .name = "ixp4xx_crypto",
1594 .of_match_table = ixp4xx_crypto_of_match,
1597 module_platform_driver(ixp_crypto_driver);
1599 MODULE_LICENSE("GPL");
1600 MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1601 MODULE_DESCRIPTION("IXP4xx hardware crypto");