1 // SPDX-License-Identifier: GPL-2.0-only
5 * Support for OMAP SHA1/MD5 HW acceleration.
7 * Copyright (c) 2010 Nokia Corporation
8 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
9 * Copyright (c) 2011 Texas Instruments Incorporated
11 * Some ideas are from old omap-sha1-md5.c driver.
14 #define pr_fmt(fmt) "%s: " fmt, __func__
16 #include <crypto/engine.h>
17 #include <crypto/hmac.h>
18 #include <crypto/internal/hash.h>
19 #include <crypto/scatterwalk.h>
20 #include <crypto/sha1.h>
21 #include <crypto/sha2.h>
22 #include <linux/err.h>
23 #include <linux/device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dmaengine.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
29 #include <linux/irq.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
33 #include <linux/of_address.h>
34 #include <linux/of_irq.h>
35 #include <linux/platform_device.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/scatterlist.h>
38 #include <linux/slab.h>
39 #include <linux/string.h>
41 #define MD5_DIGEST_SIZE 16
43 #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
44 #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
45 #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
47 #define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04))
49 #define SHA_REG_CTRL 0x18
50 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
51 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
52 #define SHA_REG_CTRL_ALGO_CONST (1 << 3)
53 #define SHA_REG_CTRL_ALGO (1 << 2)
54 #define SHA_REG_CTRL_INPUT_READY (1 << 1)
55 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
57 #define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
59 #define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
60 #define SHA_REG_MASK_DMA_EN (1 << 3)
61 #define SHA_REG_MASK_IT_EN (1 << 2)
62 #define SHA_REG_MASK_SOFTRESET (1 << 1)
63 #define SHA_REG_AUTOIDLE (1 << 0)
65 #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
66 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
68 #define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs)
69 #define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
70 #define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
71 #define SHA_REG_MODE_CLOSE_HASH (1 << 4)
72 #define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
74 #define SHA_REG_MODE_ALGO_MASK (7 << 0)
75 #define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
76 #define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
77 #define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
78 #define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
79 #define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0)
80 #define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0)
82 #define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs)
84 #define SHA_REG_IRQSTATUS 0x118
85 #define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
86 #define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
87 #define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1)
88 #define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0)
90 #define SHA_REG_IRQENA 0x11C
91 #define SHA_REG_IRQENA_CTX_RDY (1 << 3)
92 #define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2)
93 #define SHA_REG_IRQENA_INPUT_RDY (1 << 1)
94 #define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0)
96 #define DEFAULT_TIMEOUT_INTERVAL HZ
98 #define DEFAULT_AUTOSUSPEND_DELAY 1000
100 /* mostly device flags */
101 #define FLAGS_FINAL 1
102 #define FLAGS_DMA_ACTIVE 2
103 #define FLAGS_OUTPUT_READY 3
105 #define FLAGS_DMA_READY 6
106 #define FLAGS_AUTO_XOR 7
107 #define FLAGS_BE32_SHA1 8
108 #define FLAGS_SGS_COPIED 9
109 #define FLAGS_SGS_ALLOCED 10
110 #define FLAGS_HUGE 11
113 #define FLAGS_FINUP 16
115 #define FLAGS_MODE_SHIFT 18
116 #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
117 #define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
118 #define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
119 #define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
120 #define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
121 #define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
122 #define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
124 #define FLAGS_HMAC 21
125 #define FLAGS_ERROR 22
130 #define OMAP_ALIGN_MASK (sizeof(u32)-1)
131 #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
133 #define BUFLEN SHA512_BLOCK_SIZE
134 #define OMAP_SHA_DMA_THRESHOLD 256
136 #define OMAP_SHA_MAX_DMA_LEN (1024 * 2048)
138 struct omap_sham_dev;
140 struct omap_sham_reqctx {
141 struct omap_sham_dev *dd;
145 u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
151 struct scatterlist *sg;
152 struct scatterlist sgl[2];
153 int offset; /* offset in current sg */
155 unsigned int total; /* total request */
157 u8 buffer[] OMAP_ALIGNED;
160 struct omap_sham_hmac_ctx {
161 struct crypto_shash *shash;
162 u8 ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
163 u8 opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
166 struct omap_sham_ctx {
170 struct crypto_shash *fallback;
172 struct omap_sham_hmac_ctx base[];
175 #define OMAP_SHAM_QUEUE_LENGTH 10
177 struct omap_sham_algs_info {
178 struct ahash_engine_alg *algs_list;
180 unsigned int registered;
183 struct omap_sham_pdata {
184 struct omap_sham_algs_info *algs_info;
185 unsigned int algs_info_size;
189 void (*copy_hash)(struct ahash_request *req, int out);
190 void (*write_ctrl)(struct omap_sham_dev *dd, size_t length,
192 void (*trigger)(struct omap_sham_dev *dd, size_t length);
193 int (*poll_irq)(struct omap_sham_dev *dd);
194 irqreturn_t (*intr_hdlr)(int irq, void *dev_id);
212 struct omap_sham_dev {
213 struct list_head list;
214 unsigned long phys_base;
216 void __iomem *io_base;
219 struct dma_chan *dma_lch;
220 struct tasklet_struct done_task;
222 u8 xmit_buf[BUFLEN] OMAP_ALIGNED;
226 struct crypto_queue queue;
227 struct ahash_request *req;
228 struct crypto_engine *engine;
230 const struct omap_sham_pdata *pdata;
233 struct omap_sham_drv {
234 struct list_head dev_list;
239 static struct omap_sham_drv sham = {
240 .dev_list = LIST_HEAD_INIT(sham.dev_list),
241 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
244 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op);
245 static void omap_sham_finish_req(struct ahash_request *req, int err);
247 static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
249 return __raw_readl(dd->io_base + offset);
252 static inline void omap_sham_write(struct omap_sham_dev *dd,
253 u32 offset, u32 value)
255 __raw_writel(value, dd->io_base + offset);
258 static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
263 val = omap_sham_read(dd, address);
266 omap_sham_write(dd, address, val);
269 static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
271 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
273 while (!(omap_sham_read(dd, offset) & bit)) {
274 if (time_is_before_jiffies(timeout))
281 static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
283 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
284 struct omap_sham_dev *dd = ctx->dd;
285 u32 *hash = (u32 *)ctx->digest;
288 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
290 hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
292 omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
296 static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
298 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
299 struct omap_sham_dev *dd = ctx->dd;
302 if (ctx->flags & BIT(FLAGS_HMAC)) {
303 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
304 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
305 struct omap_sham_hmac_ctx *bctx = tctx->base;
306 u32 *opad = (u32 *)bctx->opad;
308 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
310 opad[i] = omap_sham_read(dd,
311 SHA_REG_ODIGEST(dd, i));
313 omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
318 omap_sham_copy_hash_omap2(req, out);
321 static void omap_sham_copy_ready_hash(struct ahash_request *req)
323 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
324 u32 *in = (u32 *)ctx->digest;
325 u32 *hash = (u32 *)req->result;
326 int i, d, big_endian = 0;
331 switch (ctx->flags & FLAGS_MODE_MASK) {
333 d = MD5_DIGEST_SIZE / sizeof(u32);
335 case FLAGS_MODE_SHA1:
336 /* OMAP2 SHA1 is big endian */
337 if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
339 d = SHA1_DIGEST_SIZE / sizeof(u32);
341 case FLAGS_MODE_SHA224:
342 d = SHA224_DIGEST_SIZE / sizeof(u32);
344 case FLAGS_MODE_SHA256:
345 d = SHA256_DIGEST_SIZE / sizeof(u32);
347 case FLAGS_MODE_SHA384:
348 d = SHA384_DIGEST_SIZE / sizeof(u32);
350 case FLAGS_MODE_SHA512:
351 d = SHA512_DIGEST_SIZE / sizeof(u32);
358 for (i = 0; i < d; i++)
359 hash[i] = be32_to_cpup((__be32 *)in + i);
361 for (i = 0; i < d; i++)
362 hash[i] = le32_to_cpup((__le32 *)in + i);
365 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
368 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
369 u32 val = length << 5, mask;
371 if (likely(ctx->digcnt))
372 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
374 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
375 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
376 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
378 * Setting ALGO_CONST only for the first iteration
379 * and CLOSE_HASH only for the last one.
381 if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
382 val |= SHA_REG_CTRL_ALGO;
384 val |= SHA_REG_CTRL_ALGO_CONST;
386 val |= SHA_REG_CTRL_CLOSE_HASH;
388 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
389 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
391 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
394 static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
398 static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
400 return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
403 static int get_block_size(struct omap_sham_reqctx *ctx)
407 switch (ctx->flags & FLAGS_MODE_MASK) {
409 case FLAGS_MODE_SHA1:
412 case FLAGS_MODE_SHA224:
413 case FLAGS_MODE_SHA256:
414 d = SHA256_BLOCK_SIZE;
416 case FLAGS_MODE_SHA384:
417 case FLAGS_MODE_SHA512:
418 d = SHA512_BLOCK_SIZE;
427 static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
428 u32 *value, int count)
430 for (; count--; value++, offset += 4)
431 omap_sham_write(dd, offset, *value);
434 static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
437 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
440 if (likely(ctx->digcnt))
441 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
444 * Setting ALGO_CONST only for the first iteration and
445 * CLOSE_HASH only for the last one. Note that flags mode bits
446 * correspond to algorithm encoding in mode register.
448 val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
450 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
451 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
452 struct omap_sham_hmac_ctx *bctx = tctx->base;
455 val |= SHA_REG_MODE_ALGO_CONSTANT;
457 if (ctx->flags & BIT(FLAGS_HMAC)) {
458 bs = get_block_size(ctx);
459 nr_dr = bs / (2 * sizeof(u32));
460 val |= SHA_REG_MODE_HMAC_KEY_PROC;
461 omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
462 (u32 *)bctx->ipad, nr_dr);
463 omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
464 (u32 *)bctx->ipad + nr_dr, nr_dr);
470 val |= SHA_REG_MODE_CLOSE_HASH;
472 if (ctx->flags & BIT(FLAGS_HMAC))
473 val |= SHA_REG_MODE_HMAC_OUTER_HASH;
476 mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
477 SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
478 SHA_REG_MODE_HMAC_KEY_PROC;
480 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
481 omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
482 omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
483 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
485 (dma ? SHA_REG_MASK_DMA_EN : 0),
486 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
489 static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
491 omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
494 static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
496 return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
497 SHA_REG_IRQSTATUS_INPUT_RDY);
500 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
503 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
504 int count, len32, bs32, offset = 0;
507 struct sg_mapping_iter mi;
509 dev_dbg(dd->dev, "xmit_cpu: digcnt: %zd, length: %zd, final: %d\n",
510 ctx->digcnt, length, final);
512 dd->pdata->write_ctrl(dd, length, final, 0);
513 dd->pdata->trigger(dd, length);
515 /* should be non-zero before next lines to disable clocks later */
516 ctx->digcnt += length;
517 ctx->total -= length;
520 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
522 set_bit(FLAGS_CPU, &dd->flags);
524 len32 = DIV_ROUND_UP(length, sizeof(u32));
525 bs32 = get_block_size(ctx) / sizeof(u32);
527 sg_miter_start(&mi, ctx->sg, ctx->sg_len,
528 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
533 if (dd->pdata->poll_irq(dd))
536 for (count = 0; count < min(len32, bs32); count++, offset++) {
541 pr_err("sg miter failure.\n");
547 omap_sham_write(dd, SHA_REG_DIN(dd, count),
551 len32 -= min(len32, bs32);
559 static void omap_sham_dma_callback(void *param)
561 struct omap_sham_dev *dd = param;
563 set_bit(FLAGS_DMA_READY, &dd->flags);
564 tasklet_schedule(&dd->done_task);
567 static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
570 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
571 struct dma_async_tx_descriptor *tx;
572 struct dma_slave_config cfg;
575 dev_dbg(dd->dev, "xmit_dma: digcnt: %zd, length: %zd, final: %d\n",
576 ctx->digcnt, length, final);
578 if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
579 dev_err(dd->dev, "dma_map_sg error\n");
583 memset(&cfg, 0, sizeof(cfg));
585 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
586 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
587 cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
589 ret = dmaengine_slave_config(dd->dma_lch, &cfg);
591 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
595 tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
597 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
600 dev_err(dd->dev, "prep_slave_sg failed\n");
604 tx->callback = omap_sham_dma_callback;
605 tx->callback_param = dd;
607 dd->pdata->write_ctrl(dd, length, final, 1);
609 ctx->digcnt += length;
610 ctx->total -= length;
613 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
615 set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
617 dmaengine_submit(tx);
618 dma_async_issue_pending(dd->dma_lch);
620 dd->pdata->trigger(dd, length);
625 static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
626 struct scatterlist *sg, int bs, int new_len)
628 int n = sg_nents(sg);
629 struct scatterlist *tmp;
630 int offset = ctx->offset;
632 ctx->total = new_len;
637 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
641 sg_init_table(ctx->sg, n);
648 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
651 new_len -= ctx->bufcnt;
654 while (sg && new_len) {
655 int len = sg->length - offset;
658 offset -= sg->length;
668 sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
683 set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
685 ctx->offset += new_len - ctx->bufcnt;
691 static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
692 struct scatterlist *sg, int bs,
693 unsigned int new_len)
698 pages = get_order(new_len);
700 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
702 pr_err("Couldn't allocate pages for unaligned cases.\n");
707 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
709 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
710 min(new_len, ctx->total) - ctx->bufcnt, 0);
711 sg_init_table(ctx->sgl, 1);
712 sg_set_buf(ctx->sgl, buf, new_len);
714 set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
716 ctx->offset += new_len - ctx->bufcnt;
718 ctx->total = new_len;
723 static int omap_sham_align_sgs(struct scatterlist *sg,
724 int nbytes, int bs, bool final,
725 struct omap_sham_reqctx *rctx)
730 struct scatterlist *sg_tmp = sg;
732 int offset = rctx->offset;
733 int bufcnt = rctx->bufcnt;
735 if (!sg || !sg->length || !nbytes) {
737 bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
738 sg_init_table(rctx->sgl, 1);
739 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
740 rctx->sg = rctx->sgl;
753 new_len = DIV_ROUND_UP(new_len, bs) * bs;
755 new_len = (new_len - 1) / bs * bs;
760 if (nbytes != new_len)
763 while (nbytes > 0 && sg_tmp) {
767 if (!IS_ALIGNED(bufcnt, bs)) {
779 #ifdef CONFIG_ZONE_DMA
780 if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
786 if (offset < sg_tmp->length) {
787 if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
792 if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
799 offset -= sg_tmp->length;
805 nbytes -= sg_tmp->length;
808 sg_tmp = sg_next(sg_tmp);
816 if (new_len > OMAP_SHA_MAX_DMA_LEN) {
817 new_len = OMAP_SHA_MAX_DMA_LEN;
822 return omap_sham_copy_sgs(rctx, sg, bs, new_len);
824 return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
826 rctx->total = new_len;
827 rctx->offset += new_len;
830 sg_init_table(rctx->sgl, 2);
831 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
832 sg_chain(rctx->sgl, 2, sg);
833 rctx->sg = rctx->sgl;
841 static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq)
843 struct ahash_request *req = container_of(areq, struct ahash_request,
845 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
849 bool final = rctx->flags & BIT(FLAGS_FINUP);
850 bool update = rctx->op == OP_UPDATE;
853 bs = get_block_size(rctx);
855 nbytes = rctx->bufcnt;
858 nbytes += req->nbytes - rctx->offset;
860 dev_dbg(rctx->dd->dev,
861 "%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%zd\n",
862 __func__, nbytes, bs, rctx->total, rctx->offset,
868 rctx->total = nbytes;
870 if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
871 int len = bs - rctx->bufcnt % bs;
873 if (len > req->nbytes)
875 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
882 memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
884 ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
888 hash_later = nbytes - rctx->total;
892 if (hash_later && hash_later <= rctx->buflen) {
893 scatterwalk_map_and_copy(rctx->buffer,
895 req->nbytes - hash_later,
898 rctx->bufcnt = hash_later;
903 if (hash_later > rctx->buflen)
904 set_bit(FLAGS_HUGE, &rctx->dd->flags);
906 rctx->total = min(nbytes, rctx->total);
911 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
913 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
915 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
917 clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
922 static struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
924 struct omap_sham_dev *dd;
929 spin_lock_bh(&sham.lock);
930 dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
931 list_move_tail(&dd->list, &sham.dev_list);
933 spin_unlock_bh(&sham.lock);
938 static int omap_sham_init(struct ahash_request *req)
940 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
941 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
942 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
943 struct omap_sham_dev *dd;
948 dd = omap_sham_find_dev(ctx);
954 dev_dbg(dd->dev, "init: digest size: %d\n",
955 crypto_ahash_digestsize(tfm));
957 switch (crypto_ahash_digestsize(tfm)) {
958 case MD5_DIGEST_SIZE:
959 ctx->flags |= FLAGS_MODE_MD5;
960 bs = SHA1_BLOCK_SIZE;
962 case SHA1_DIGEST_SIZE:
963 ctx->flags |= FLAGS_MODE_SHA1;
964 bs = SHA1_BLOCK_SIZE;
966 case SHA224_DIGEST_SIZE:
967 ctx->flags |= FLAGS_MODE_SHA224;
968 bs = SHA224_BLOCK_SIZE;
970 case SHA256_DIGEST_SIZE:
971 ctx->flags |= FLAGS_MODE_SHA256;
972 bs = SHA256_BLOCK_SIZE;
974 case SHA384_DIGEST_SIZE:
975 ctx->flags |= FLAGS_MODE_SHA384;
976 bs = SHA384_BLOCK_SIZE;
978 case SHA512_DIGEST_SIZE:
979 ctx->flags |= FLAGS_MODE_SHA512;
980 bs = SHA512_BLOCK_SIZE;
988 ctx->buflen = BUFLEN;
990 if (tctx->flags & BIT(FLAGS_HMAC)) {
991 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
992 struct omap_sham_hmac_ctx *bctx = tctx->base;
994 memcpy(ctx->buffer, bctx->ipad, bs);
998 ctx->flags |= BIT(FLAGS_HMAC);
1005 static int omap_sham_update_req(struct omap_sham_dev *dd)
1007 struct ahash_request *req = dd->req;
1008 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1010 bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1011 !(dd->flags & BIT(FLAGS_HUGE));
1013 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d",
1014 ctx->total, ctx->digcnt, final);
1016 if (ctx->total < get_block_size(ctx) ||
1017 ctx->total < dd->fallback_sz)
1018 ctx->flags |= BIT(FLAGS_CPU);
1020 if (ctx->flags & BIT(FLAGS_CPU))
1021 err = omap_sham_xmit_cpu(dd, ctx->total, final);
1023 err = omap_sham_xmit_dma(dd, ctx->total, final);
1025 /* wait for dma completion before can take more data */
1026 dev_dbg(dd->dev, "update: err: %d, digcnt: %zd\n", err, ctx->digcnt);
1031 static int omap_sham_final_req(struct omap_sham_dev *dd)
1033 struct ahash_request *req = dd->req;
1034 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1035 int err = 0, use_dma = 1;
1037 if (dd->flags & BIT(FLAGS_HUGE))
1040 if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
1042 * faster to handle last block with cpu or
1043 * use cpu when dma is not present.
1048 err = omap_sham_xmit_dma(dd, ctx->total, 1);
1050 err = omap_sham_xmit_cpu(dd, ctx->total, 1);
1054 dev_dbg(dd->dev, "final_req: err: %d\n", err);
1059 static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
1061 struct ahash_request *req = container_of(areq, struct ahash_request,
1063 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1064 struct omap_sham_dev *dd = ctx->dd;
1066 bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1067 !(dd->flags & BIT(FLAGS_HUGE));
1069 dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
1070 ctx->op, ctx->total, ctx->digcnt, final);
1072 err = omap_sham_prepare_request(engine, areq);
1076 err = pm_runtime_resume_and_get(dd->dev);
1078 dev_err(dd->dev, "failed to get sync: %d\n", err);
1086 dd->pdata->copy_hash(req, 0);
1088 if (ctx->op == OP_UPDATE)
1089 err = omap_sham_update_req(dd);
1090 else if (ctx->op == OP_FINAL)
1091 err = omap_sham_final_req(dd);
1093 if (err != -EINPROGRESS)
1094 omap_sham_finish_req(req, err);
1099 static int omap_sham_finish_hmac(struct ahash_request *req)
1101 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1102 struct omap_sham_hmac_ctx *bctx = tctx->base;
1103 int bs = crypto_shash_blocksize(bctx->shash);
1104 int ds = crypto_shash_digestsize(bctx->shash);
1105 SHASH_DESC_ON_STACK(shash, bctx->shash);
1107 shash->tfm = bctx->shash;
1109 return crypto_shash_init(shash) ?:
1110 crypto_shash_update(shash, bctx->opad, bs) ?:
1111 crypto_shash_finup(shash, req->result, ds, req->result);
1114 static int omap_sham_finish(struct ahash_request *req)
1116 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1117 struct omap_sham_dev *dd = ctx->dd;
1121 omap_sham_copy_ready_hash(req);
1122 if ((ctx->flags & BIT(FLAGS_HMAC)) &&
1123 !test_bit(FLAGS_AUTO_XOR, &dd->flags))
1124 err = omap_sham_finish_hmac(req);
1127 dev_dbg(dd->dev, "digcnt: %zd, bufcnt: %zd\n", ctx->digcnt, ctx->bufcnt);
1132 static void omap_sham_finish_req(struct ahash_request *req, int err)
1134 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1135 struct omap_sham_dev *dd = ctx->dd;
1137 if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
1138 free_pages((unsigned long)sg_virt(ctx->sg),
1139 get_order(ctx->sg->length));
1141 if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
1146 dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED) |
1147 BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
1148 BIT(FLAGS_OUTPUT_READY));
1151 dd->pdata->copy_hash(req, 1);
1153 if (dd->flags & BIT(FLAGS_HUGE)) {
1154 /* Re-enqueue the request */
1155 omap_sham_enqueue(req, ctx->op);
1160 if (test_bit(FLAGS_FINAL, &dd->flags))
1161 err = omap_sham_finish(req);
1163 ctx->flags |= BIT(FLAGS_ERROR);
1166 /* atomic operation is not needed here */
1167 dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1168 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
1170 pm_runtime_mark_last_busy(dd->dev);
1171 pm_runtime_put_autosuspend(dd->dev);
1175 crypto_finalize_hash_request(dd->engine, req, err);
1178 static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1179 struct ahash_request *req)
1181 return crypto_transfer_hash_request_to_engine(dd->engine, req);
1184 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1186 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1187 struct omap_sham_dev *dd = ctx->dd;
1191 return omap_sham_handle_queue(dd, req);
1194 static int omap_sham_update(struct ahash_request *req)
1196 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1197 struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
1202 if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
1203 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1205 ctx->bufcnt += req->nbytes;
1209 if (dd->polling_mode)
1210 ctx->flags |= BIT(FLAGS_CPU);
1212 return omap_sham_enqueue(req, OP_UPDATE);
1215 static int omap_sham_final_shash(struct ahash_request *req)
1217 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1218 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1222 * If we are running HMAC on limited hardware support, skip
1223 * the ipad in the beginning of the buffer if we are going for
1224 * software fallback algorithm.
1226 if (test_bit(FLAGS_HMAC, &ctx->flags) &&
1227 !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
1228 offset = get_block_size(ctx);
1230 return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset,
1231 ctx->bufcnt - offset, req->result);
1234 static int omap_sham_final(struct ahash_request *req)
1236 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1238 ctx->flags |= BIT(FLAGS_FINUP);
1240 if (ctx->flags & BIT(FLAGS_ERROR))
1241 return 0; /* uncompleted hash is not needed */
1244 * OMAP HW accel works only with buffers >= 9.
1245 * HMAC is always >= 9 because ipad == block size.
1246 * If buffersize is less than fallback_sz, we use fallback
1247 * SW encoding, as using DMA + HW in this case doesn't provide
1250 if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
1251 return omap_sham_final_shash(req);
1252 else if (ctx->bufcnt)
1253 return omap_sham_enqueue(req, OP_FINAL);
1255 /* copy ready hash (+ finalize hmac) */
1256 return omap_sham_finish(req);
1259 static int omap_sham_finup(struct ahash_request *req)
1261 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1264 ctx->flags |= BIT(FLAGS_FINUP);
1266 err1 = omap_sham_update(req);
1267 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1270 * final() has to be always called to cleanup resources
1271 * even if udpate() failed, except EINPROGRESS
1273 err2 = omap_sham_final(req);
1275 return err1 ?: err2;
1278 static int omap_sham_digest(struct ahash_request *req)
1280 return omap_sham_init(req) ?: omap_sham_finup(req);
1283 static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1284 unsigned int keylen)
1286 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1287 struct omap_sham_hmac_ctx *bctx = tctx->base;
1288 int bs = crypto_shash_blocksize(bctx->shash);
1289 int ds = crypto_shash_digestsize(bctx->shash);
1292 err = crypto_shash_setkey(tctx->fallback, key, keylen);
1297 err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
1303 memcpy(bctx->ipad, key, keylen);
1306 memset(bctx->ipad + keylen, 0, bs - keylen);
1308 if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
1309 memcpy(bctx->opad, bctx->ipad, bs);
1311 for (i = 0; i < bs; i++) {
1312 bctx->ipad[i] ^= HMAC_IPAD_VALUE;
1313 bctx->opad[i] ^= HMAC_OPAD_VALUE;
1320 static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1322 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1323 const char *alg_name = crypto_tfm_alg_name(tfm);
1325 /* Allocate a fallback and abort if it failed. */
1326 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1327 CRYPTO_ALG_NEED_FALLBACK);
1328 if (IS_ERR(tctx->fallback)) {
1329 pr_err("omap-sham: fallback driver '%s' "
1330 "could not be loaded.\n", alg_name);
1331 return PTR_ERR(tctx->fallback);
1334 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1335 sizeof(struct omap_sham_reqctx) + BUFLEN);
1338 struct omap_sham_hmac_ctx *bctx = tctx->base;
1339 tctx->flags |= BIT(FLAGS_HMAC);
1340 bctx->shash = crypto_alloc_shash(alg_base, 0,
1341 CRYPTO_ALG_NEED_FALLBACK);
1342 if (IS_ERR(bctx->shash)) {
1343 pr_err("omap-sham: base driver '%s' "
1344 "could not be loaded.\n", alg_base);
1345 crypto_free_shash(tctx->fallback);
1346 return PTR_ERR(bctx->shash);
1354 static int omap_sham_cra_init(struct crypto_tfm *tfm)
1356 return omap_sham_cra_init_alg(tfm, NULL);
1359 static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1361 return omap_sham_cra_init_alg(tfm, "sha1");
1364 static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1366 return omap_sham_cra_init_alg(tfm, "sha224");
1369 static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1371 return omap_sham_cra_init_alg(tfm, "sha256");
1374 static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1376 return omap_sham_cra_init_alg(tfm, "md5");
1379 static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1381 return omap_sham_cra_init_alg(tfm, "sha384");
1384 static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1386 return omap_sham_cra_init_alg(tfm, "sha512");
1389 static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1391 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1393 crypto_free_shash(tctx->fallback);
1394 tctx->fallback = NULL;
1396 if (tctx->flags & BIT(FLAGS_HMAC)) {
1397 struct omap_sham_hmac_ctx *bctx = tctx->base;
1398 crypto_free_shash(bctx->shash);
1402 static int omap_sham_export(struct ahash_request *req, void *out)
1404 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1406 memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
1411 static int omap_sham_import(struct ahash_request *req, const void *in)
1413 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1414 const struct omap_sham_reqctx *ctx_in = in;
1416 memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
1421 static struct ahash_engine_alg algs_sha1_md5[] = {
1423 .base.init = omap_sham_init,
1424 .base.update = omap_sham_update,
1425 .base.final = omap_sham_final,
1426 .base.finup = omap_sham_finup,
1427 .base.digest = omap_sham_digest,
1428 .base.halg.digestsize = SHA1_DIGEST_SIZE,
1431 .cra_driver_name = "omap-sha1",
1432 .cra_priority = 400,
1433 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1435 CRYPTO_ALG_NEED_FALLBACK,
1436 .cra_blocksize = SHA1_BLOCK_SIZE,
1437 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1438 .cra_alignmask = OMAP_ALIGN_MASK,
1439 .cra_module = THIS_MODULE,
1440 .cra_init = omap_sham_cra_init,
1441 .cra_exit = omap_sham_cra_exit,
1443 .op.do_one_request = omap_sham_hash_one_req,
1446 .base.init = omap_sham_init,
1447 .base.update = omap_sham_update,
1448 .base.final = omap_sham_final,
1449 .base.finup = omap_sham_finup,
1450 .base.digest = omap_sham_digest,
1451 .base.halg.digestsize = MD5_DIGEST_SIZE,
1454 .cra_driver_name = "omap-md5",
1455 .cra_priority = 400,
1456 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1458 CRYPTO_ALG_NEED_FALLBACK,
1459 .cra_blocksize = SHA1_BLOCK_SIZE,
1460 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1461 .cra_alignmask = OMAP_ALIGN_MASK,
1462 .cra_module = THIS_MODULE,
1463 .cra_init = omap_sham_cra_init,
1464 .cra_exit = omap_sham_cra_exit,
1466 .op.do_one_request = omap_sham_hash_one_req,
1469 .base.init = omap_sham_init,
1470 .base.update = omap_sham_update,
1471 .base.final = omap_sham_final,
1472 .base.finup = omap_sham_finup,
1473 .base.digest = omap_sham_digest,
1474 .base.setkey = omap_sham_setkey,
1475 .base.halg.digestsize = SHA1_DIGEST_SIZE,
1477 .cra_name = "hmac(sha1)",
1478 .cra_driver_name = "omap-hmac-sha1",
1479 .cra_priority = 400,
1480 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1482 CRYPTO_ALG_NEED_FALLBACK,
1483 .cra_blocksize = SHA1_BLOCK_SIZE,
1484 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1485 sizeof(struct omap_sham_hmac_ctx),
1486 .cra_alignmask = OMAP_ALIGN_MASK,
1487 .cra_module = THIS_MODULE,
1488 .cra_init = omap_sham_cra_sha1_init,
1489 .cra_exit = omap_sham_cra_exit,
1491 .op.do_one_request = omap_sham_hash_one_req,
1494 .base.init = omap_sham_init,
1495 .base.update = omap_sham_update,
1496 .base.final = omap_sham_final,
1497 .base.finup = omap_sham_finup,
1498 .base.digest = omap_sham_digest,
1499 .base.setkey = omap_sham_setkey,
1500 .base.halg.digestsize = MD5_DIGEST_SIZE,
1502 .cra_name = "hmac(md5)",
1503 .cra_driver_name = "omap-hmac-md5",
1504 .cra_priority = 400,
1505 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1507 CRYPTO_ALG_NEED_FALLBACK,
1508 .cra_blocksize = SHA1_BLOCK_SIZE,
1509 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1510 sizeof(struct omap_sham_hmac_ctx),
1511 .cra_alignmask = OMAP_ALIGN_MASK,
1512 .cra_module = THIS_MODULE,
1513 .cra_init = omap_sham_cra_md5_init,
1514 .cra_exit = omap_sham_cra_exit,
1516 .op.do_one_request = omap_sham_hash_one_req,
1520 /* OMAP4 has some algs in addition to what OMAP2 has */
1521 static struct ahash_engine_alg algs_sha224_sha256[] = {
1523 .base.init = omap_sham_init,
1524 .base.update = omap_sham_update,
1525 .base.final = omap_sham_final,
1526 .base.finup = omap_sham_finup,
1527 .base.digest = omap_sham_digest,
1528 .base.halg.digestsize = SHA224_DIGEST_SIZE,
1530 .cra_name = "sha224",
1531 .cra_driver_name = "omap-sha224",
1532 .cra_priority = 400,
1533 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1535 CRYPTO_ALG_NEED_FALLBACK,
1536 .cra_blocksize = SHA224_BLOCK_SIZE,
1537 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1538 .cra_alignmask = OMAP_ALIGN_MASK,
1539 .cra_module = THIS_MODULE,
1540 .cra_init = omap_sham_cra_init,
1541 .cra_exit = omap_sham_cra_exit,
1543 .op.do_one_request = omap_sham_hash_one_req,
1546 .base.init = omap_sham_init,
1547 .base.update = omap_sham_update,
1548 .base.final = omap_sham_final,
1549 .base.finup = omap_sham_finup,
1550 .base.digest = omap_sham_digest,
1551 .base.halg.digestsize = SHA256_DIGEST_SIZE,
1553 .cra_name = "sha256",
1554 .cra_driver_name = "omap-sha256",
1555 .cra_priority = 400,
1556 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1558 CRYPTO_ALG_NEED_FALLBACK,
1559 .cra_blocksize = SHA256_BLOCK_SIZE,
1560 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1561 .cra_alignmask = OMAP_ALIGN_MASK,
1562 .cra_module = THIS_MODULE,
1563 .cra_init = omap_sham_cra_init,
1564 .cra_exit = omap_sham_cra_exit,
1566 .op.do_one_request = omap_sham_hash_one_req,
1569 .base.init = omap_sham_init,
1570 .base.update = omap_sham_update,
1571 .base.final = omap_sham_final,
1572 .base.finup = omap_sham_finup,
1573 .base.digest = omap_sham_digest,
1574 .base.setkey = omap_sham_setkey,
1575 .base.halg.digestsize = SHA224_DIGEST_SIZE,
1577 .cra_name = "hmac(sha224)",
1578 .cra_driver_name = "omap-hmac-sha224",
1579 .cra_priority = 400,
1580 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1582 CRYPTO_ALG_NEED_FALLBACK,
1583 .cra_blocksize = SHA224_BLOCK_SIZE,
1584 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1585 sizeof(struct omap_sham_hmac_ctx),
1586 .cra_alignmask = OMAP_ALIGN_MASK,
1587 .cra_module = THIS_MODULE,
1588 .cra_init = omap_sham_cra_sha224_init,
1589 .cra_exit = omap_sham_cra_exit,
1591 .op.do_one_request = omap_sham_hash_one_req,
1594 .base.init = omap_sham_init,
1595 .base.update = omap_sham_update,
1596 .base.final = omap_sham_final,
1597 .base.finup = omap_sham_finup,
1598 .base.digest = omap_sham_digest,
1599 .base.setkey = omap_sham_setkey,
1600 .base.halg.digestsize = SHA256_DIGEST_SIZE,
1602 .cra_name = "hmac(sha256)",
1603 .cra_driver_name = "omap-hmac-sha256",
1604 .cra_priority = 400,
1605 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1607 CRYPTO_ALG_NEED_FALLBACK,
1608 .cra_blocksize = SHA256_BLOCK_SIZE,
1609 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1610 sizeof(struct omap_sham_hmac_ctx),
1611 .cra_alignmask = OMAP_ALIGN_MASK,
1612 .cra_module = THIS_MODULE,
1613 .cra_init = omap_sham_cra_sha256_init,
1614 .cra_exit = omap_sham_cra_exit,
1616 .op.do_one_request = omap_sham_hash_one_req,
1620 static struct ahash_engine_alg algs_sha384_sha512[] = {
1622 .base.init = omap_sham_init,
1623 .base.update = omap_sham_update,
1624 .base.final = omap_sham_final,
1625 .base.finup = omap_sham_finup,
1626 .base.digest = omap_sham_digest,
1627 .base.halg.digestsize = SHA384_DIGEST_SIZE,
1629 .cra_name = "sha384",
1630 .cra_driver_name = "omap-sha384",
1631 .cra_priority = 400,
1632 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1634 CRYPTO_ALG_NEED_FALLBACK,
1635 .cra_blocksize = SHA384_BLOCK_SIZE,
1636 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1637 .cra_alignmask = OMAP_ALIGN_MASK,
1638 .cra_module = THIS_MODULE,
1639 .cra_init = omap_sham_cra_init,
1640 .cra_exit = omap_sham_cra_exit,
1642 .op.do_one_request = omap_sham_hash_one_req,
1645 .base.init = omap_sham_init,
1646 .base.update = omap_sham_update,
1647 .base.final = omap_sham_final,
1648 .base.finup = omap_sham_finup,
1649 .base.digest = omap_sham_digest,
1650 .base.halg.digestsize = SHA512_DIGEST_SIZE,
1652 .cra_name = "sha512",
1653 .cra_driver_name = "omap-sha512",
1654 .cra_priority = 400,
1655 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1657 CRYPTO_ALG_NEED_FALLBACK,
1658 .cra_blocksize = SHA512_BLOCK_SIZE,
1659 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1660 .cra_alignmask = OMAP_ALIGN_MASK,
1661 .cra_module = THIS_MODULE,
1662 .cra_init = omap_sham_cra_init,
1663 .cra_exit = omap_sham_cra_exit,
1665 .op.do_one_request = omap_sham_hash_one_req,
1668 .base.init = omap_sham_init,
1669 .base.update = omap_sham_update,
1670 .base.final = omap_sham_final,
1671 .base.finup = omap_sham_finup,
1672 .base.digest = omap_sham_digest,
1673 .base.setkey = omap_sham_setkey,
1674 .base.halg.digestsize = SHA384_DIGEST_SIZE,
1676 .cra_name = "hmac(sha384)",
1677 .cra_driver_name = "omap-hmac-sha384",
1678 .cra_priority = 400,
1679 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1681 CRYPTO_ALG_NEED_FALLBACK,
1682 .cra_blocksize = SHA384_BLOCK_SIZE,
1683 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1684 sizeof(struct omap_sham_hmac_ctx),
1685 .cra_alignmask = OMAP_ALIGN_MASK,
1686 .cra_module = THIS_MODULE,
1687 .cra_init = omap_sham_cra_sha384_init,
1688 .cra_exit = omap_sham_cra_exit,
1690 .op.do_one_request = omap_sham_hash_one_req,
1693 .base.init = omap_sham_init,
1694 .base.update = omap_sham_update,
1695 .base.final = omap_sham_final,
1696 .base.finup = omap_sham_finup,
1697 .base.digest = omap_sham_digest,
1698 .base.setkey = omap_sham_setkey,
1699 .base.halg.digestsize = SHA512_DIGEST_SIZE,
1701 .cra_name = "hmac(sha512)",
1702 .cra_driver_name = "omap-hmac-sha512",
1703 .cra_priority = 400,
1704 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1706 CRYPTO_ALG_NEED_FALLBACK,
1707 .cra_blocksize = SHA512_BLOCK_SIZE,
1708 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1709 sizeof(struct omap_sham_hmac_ctx),
1710 .cra_alignmask = OMAP_ALIGN_MASK,
1711 .cra_module = THIS_MODULE,
1712 .cra_init = omap_sham_cra_sha512_init,
1713 .cra_exit = omap_sham_cra_exit,
1715 .op.do_one_request = omap_sham_hash_one_req,
1719 static void omap_sham_done_task(unsigned long data)
1721 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1724 dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
1726 if (test_bit(FLAGS_CPU, &dd->flags)) {
1727 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1729 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1730 if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1731 omap_sham_update_dma_stop(dd);
1737 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1738 /* hash or semi-hash ready */
1739 clear_bit(FLAGS_DMA_READY, &dd->flags);
1747 dev_dbg(dd->dev, "update done: err: %d\n", err);
1748 /* finish curent request */
1749 omap_sham_finish_req(dd->req, err);
1752 static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1754 set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1755 tasklet_schedule(&dd->done_task);
1760 static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
1762 struct omap_sham_dev *dd = dev_id;
1764 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1765 /* final -> allow device to go to power-saving mode */
1766 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1768 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1769 SHA_REG_CTRL_OUTPUT_READY);
1770 omap_sham_read(dd, SHA_REG_CTRL);
1772 return omap_sham_irq_common(dd);
1775 static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1777 struct omap_sham_dev *dd = dev_id;
1779 omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1781 return omap_sham_irq_common(dd);
1784 static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1786 .algs_list = algs_sha1_md5,
1787 .size = ARRAY_SIZE(algs_sha1_md5),
1791 static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1792 .algs_info = omap_sham_algs_info_omap2,
1793 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2),
1794 .flags = BIT(FLAGS_BE32_SHA1),
1795 .digest_size = SHA1_DIGEST_SIZE,
1796 .copy_hash = omap_sham_copy_hash_omap2,
1797 .write_ctrl = omap_sham_write_ctrl_omap2,
1798 .trigger = omap_sham_trigger_omap2,
1799 .poll_irq = omap_sham_poll_irq_omap2,
1800 .intr_hdlr = omap_sham_irq_omap2,
1801 .idigest_ofs = 0x00,
1806 .sysstatus_ofs = 0x64,
1814 static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1816 .algs_list = algs_sha1_md5,
1817 .size = ARRAY_SIZE(algs_sha1_md5),
1820 .algs_list = algs_sha224_sha256,
1821 .size = ARRAY_SIZE(algs_sha224_sha256),
1825 static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1826 .algs_info = omap_sham_algs_info_omap4,
1827 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4),
1828 .flags = BIT(FLAGS_AUTO_XOR),
1829 .digest_size = SHA256_DIGEST_SIZE,
1830 .copy_hash = omap_sham_copy_hash_omap4,
1831 .write_ctrl = omap_sham_write_ctrl_omap4,
1832 .trigger = omap_sham_trigger_omap4,
1833 .poll_irq = omap_sham_poll_irq_omap4,
1834 .intr_hdlr = omap_sham_irq_omap4,
1835 .idigest_ofs = 0x020,
1838 .digcnt_ofs = 0x040,
1841 .sysstatus_ofs = 0x114,
1844 .major_mask = 0x0700,
1846 .minor_mask = 0x003f,
1850 static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1852 .algs_list = algs_sha1_md5,
1853 .size = ARRAY_SIZE(algs_sha1_md5),
1856 .algs_list = algs_sha224_sha256,
1857 .size = ARRAY_SIZE(algs_sha224_sha256),
1860 .algs_list = algs_sha384_sha512,
1861 .size = ARRAY_SIZE(algs_sha384_sha512),
1865 static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1866 .algs_info = omap_sham_algs_info_omap5,
1867 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5),
1868 .flags = BIT(FLAGS_AUTO_XOR),
1869 .digest_size = SHA512_DIGEST_SIZE,
1870 .copy_hash = omap_sham_copy_hash_omap4,
1871 .write_ctrl = omap_sham_write_ctrl_omap4,
1872 .trigger = omap_sham_trigger_omap4,
1873 .poll_irq = omap_sham_poll_irq_omap4,
1874 .intr_hdlr = omap_sham_irq_omap4,
1875 .idigest_ofs = 0x240,
1876 .odigest_ofs = 0x200,
1878 .digcnt_ofs = 0x280,
1881 .sysstatus_ofs = 0x114,
1883 .length_ofs = 0x288,
1884 .major_mask = 0x0700,
1886 .minor_mask = 0x003f,
1890 static const struct of_device_id omap_sham_of_match[] = {
1892 .compatible = "ti,omap2-sham",
1893 .data = &omap_sham_pdata_omap2,
1896 .compatible = "ti,omap3-sham",
1897 .data = &omap_sham_pdata_omap2,
1900 .compatible = "ti,omap4-sham",
1901 .data = &omap_sham_pdata_omap4,
1904 .compatible = "ti,omap5-sham",
1905 .data = &omap_sham_pdata_omap5,
1909 MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1911 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1912 struct device *dev, struct resource *res)
1914 struct device_node *node = dev->of_node;
1917 dd->pdata = of_device_get_match_data(dev);
1919 dev_err(dev, "no compatible OF match\n");
1924 err = of_address_to_resource(node, 0, res);
1926 dev_err(dev, "can't translate OF node address\n");
1931 dd->irq = irq_of_parse_and_map(node, 0);
1933 dev_err(dev, "can't translate OF irq value\n");
1942 static const struct of_device_id omap_sham_of_match[] = {
1946 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1947 struct device *dev, struct resource *res)
1953 static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1954 struct platform_device *pdev, struct resource *res)
1956 struct device *dev = &pdev->dev;
1960 /* Get the base address */
1961 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1963 dev_err(dev, "no MEM resource info\n");
1967 memcpy(res, r, sizeof(*res));
1970 dd->irq = platform_get_irq(pdev, 0);
1976 /* Only OMAP2/3 can be non-DT */
1977 dd->pdata = &omap_sham_pdata_omap2;
1983 static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
1986 struct omap_sham_dev *dd = dev_get_drvdata(dev);
1988 return sprintf(buf, "%d\n", dd->fallback_sz);
1991 static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
1992 const char *buf, size_t size)
1994 struct omap_sham_dev *dd = dev_get_drvdata(dev);
1998 status = kstrtol(buf, 0, &value);
2002 /* HW accelerator only works with buffers > 9 */
2004 dev_err(dev, "minimum fallback size 9\n");
2008 dd->fallback_sz = value;
2013 static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
2016 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2018 return sprintf(buf, "%d\n", dd->queue.max_qlen);
2021 static ssize_t queue_len_store(struct device *dev,
2022 struct device_attribute *attr, const char *buf,
2025 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2029 status = kstrtol(buf, 0, &value);
2037 * Changing the queue size in fly is safe, if size becomes smaller
2038 * than current size, it will just not accept new entries until
2039 * it has shrank enough.
2041 dd->queue.max_qlen = value;
2046 static DEVICE_ATTR_RW(queue_len);
2047 static DEVICE_ATTR_RW(fallback);
2049 static struct attribute *omap_sham_attrs[] = {
2050 &dev_attr_queue_len.attr,
2051 &dev_attr_fallback.attr,
2055 static const struct attribute_group omap_sham_attr_group = {
2056 .attrs = omap_sham_attrs,
2059 static int omap_sham_probe(struct platform_device *pdev)
2061 struct omap_sham_dev *dd;
2062 struct device *dev = &pdev->dev;
2063 struct resource res;
2064 dma_cap_mask_t mask;
2068 dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
2070 dev_err(dev, "unable to alloc data struct.\n");
2075 platform_set_drvdata(pdev, dd);
2077 INIT_LIST_HEAD(&dd->list);
2078 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
2079 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
2081 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
2082 omap_sham_get_res_pdev(dd, pdev, &res);
2086 dd->io_base = devm_ioremap_resource(dev, &res);
2087 if (IS_ERR(dd->io_base)) {
2088 err = PTR_ERR(dd->io_base);
2091 dd->phys_base = res.start;
2093 err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
2094 IRQF_TRIGGER_NONE, dev_name(dev), dd);
2096 dev_err(dev, "unable to request irq %d, err = %d\n",
2102 dma_cap_set(DMA_SLAVE, mask);
2104 dd->dma_lch = dma_request_chan(dev, "rx");
2105 if (IS_ERR(dd->dma_lch)) {
2106 err = PTR_ERR(dd->dma_lch);
2107 if (err == -EPROBE_DEFER)
2110 dd->polling_mode = 1;
2111 dev_dbg(dev, "using polling mode instead of dma\n");
2114 dd->flags |= dd->pdata->flags;
2115 sham.flags |= dd->pdata->flags;
2117 pm_runtime_use_autosuspend(dev);
2118 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
2120 dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
2122 pm_runtime_enable(dev);
2124 err = pm_runtime_resume_and_get(dev);
2126 dev_err(dev, "failed to get sync: %d\n", err);
2130 rev = omap_sham_read(dd, SHA_REG_REV(dd));
2131 pm_runtime_put_sync(&pdev->dev);
2133 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
2134 (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2135 (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
2137 spin_lock_bh(&sham.lock);
2138 list_add_tail(&dd->list, &sham.dev_list);
2139 spin_unlock_bh(&sham.lock);
2141 dd->engine = crypto_engine_alloc_init(dev, 1);
2147 err = crypto_engine_start(dd->engine);
2149 goto err_engine_start;
2151 for (i = 0; i < dd->pdata->algs_info_size; i++) {
2152 if (dd->pdata->algs_info[i].registered)
2155 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
2156 struct ahash_engine_alg *ealg;
2157 struct ahash_alg *alg;
2159 ealg = &dd->pdata->algs_info[i].algs_list[j];
2161 alg->export = omap_sham_export;
2162 alg->import = omap_sham_import;
2163 alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
2165 err = crypto_engine_register_ahash(ealg);
2169 dd->pdata->algs_info[i].registered++;
2173 err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
2175 dev_err(dev, "could not create sysfs device attrs\n");
2182 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2183 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2184 crypto_engine_unregister_ahash(
2185 &dd->pdata->algs_info[i].algs_list[j]);
2187 crypto_engine_exit(dd->engine);
2189 spin_lock_bh(&sham.lock);
2190 list_del(&dd->list);
2191 spin_unlock_bh(&sham.lock);
2193 pm_runtime_dont_use_autosuspend(dev);
2194 pm_runtime_disable(dev);
2195 if (!dd->polling_mode)
2196 dma_release_channel(dd->dma_lch);
2198 dev_err(dev, "initialization failed.\n");
2203 static int omap_sham_remove(struct platform_device *pdev)
2205 struct omap_sham_dev *dd;
2208 dd = platform_get_drvdata(pdev);
2210 spin_lock_bh(&sham.lock);
2211 list_del(&dd->list);
2212 spin_unlock_bh(&sham.lock);
2213 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2214 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
2215 crypto_engine_unregister_ahash(
2216 &dd->pdata->algs_info[i].algs_list[j]);
2217 dd->pdata->algs_info[i].registered--;
2219 tasklet_kill(&dd->done_task);
2220 pm_runtime_dont_use_autosuspend(&pdev->dev);
2221 pm_runtime_disable(&pdev->dev);
2223 if (!dd->polling_mode)
2224 dma_release_channel(dd->dma_lch);
2226 sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
2231 static struct platform_driver omap_sham_driver = {
2232 .probe = omap_sham_probe,
2233 .remove = omap_sham_remove,
2235 .name = "omap-sham",
2236 .of_match_table = omap_sham_of_match,
2240 module_platform_driver(omap_sham_driver);
2242 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2243 MODULE_LICENSE("GPL v2");
2244 MODULE_AUTHOR("Dmitry Kasatkin");
2245 MODULE_ALIAS("platform:omap-sham");