1 // SPDX-License-Identifier: GPL-2.0+
3 * talitos - Freescale Integrated Security Engine (SEC) device driver
5 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
7 * Scatterlist Crypto API glue code copied from files with the following:
8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
10 * Crypto algorithm registration code copied from hifn driver:
11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12 * All rights reserved.
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mod_devicetable.h>
18 #include <linux/device.h>
19 #include <linux/interrupt.h>
20 #include <linux/crypto.h>
21 #include <linux/hw_random.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_platform.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/spinlock.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/slab.h>
31 #include <crypto/algapi.h>
32 #include <crypto/aes.h>
33 #include <crypto/des.h>
34 #include <crypto/sha.h>
35 #include <crypto/md5.h>
36 #include <crypto/internal/aead.h>
37 #include <crypto/authenc.h>
38 #include <crypto/skcipher.h>
39 #include <crypto/hash.h>
40 #include <crypto/internal/hash.h>
41 #include <crypto/scatterwalk.h>
45 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
46 unsigned int len, bool is_sec1)
48 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
50 ptr->len1 = cpu_to_be16(len);
52 ptr->len = cpu_to_be16(len);
53 ptr->eptr = upper_32_bits(dma_addr);
57 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
58 struct talitos_ptr *src_ptr, bool is_sec1)
60 dst_ptr->ptr = src_ptr->ptr;
62 dst_ptr->len1 = src_ptr->len1;
64 dst_ptr->len = src_ptr->len;
65 dst_ptr->eptr = src_ptr->eptr;
69 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
73 return be16_to_cpu(ptr->len1);
75 return be16_to_cpu(ptr->len);
78 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
85 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
92 * map virtual single (contiguous) pointer to h/w descriptor pointer
94 static void __map_single_talitos_ptr(struct device *dev,
95 struct talitos_ptr *ptr,
96 unsigned int len, void *data,
97 enum dma_data_direction dir,
100 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
104 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
107 static void map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir)
112 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
115 static void map_single_talitos_ptr_nosync(struct device *dev,
116 struct talitos_ptr *ptr,
117 unsigned int len, void *data,
118 enum dma_data_direction dir)
120 __map_single_talitos_ptr(dev, ptr, len, data, dir,
121 DMA_ATTR_SKIP_CPU_SYNC);
125 * unmap bus single (contiguous) h/w descriptor pointer
127 static void unmap_single_talitos_ptr(struct device *dev,
128 struct talitos_ptr *ptr,
129 enum dma_data_direction dir)
131 struct talitos_private *priv = dev_get_drvdata(dev);
132 bool is_sec1 = has_ftr_sec1(priv);
134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135 from_talitos_ptr_len(ptr, is_sec1), dir);
138 static int reset_channel(struct device *dev, int ch)
140 struct talitos_private *priv = dev_get_drvdata(dev);
141 unsigned int timeout = TALITOS_TIMEOUT;
142 bool is_sec1 = has_ftr_sec1(priv);
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 TALITOS1_CCCR_LO_RESET);
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 TALITOS1_CCCR_LO_RESET) && --timeout)
152 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 TALITOS2_CCCR_RESET);
155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 TALITOS2_CCCR_RESET) && --timeout)
161 dev_err(dev, "failed to reset channel %d\n", ch);
165 /* set 36-bit addressing, done writeback enable and done IRQ enable */
166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
168 /* enable chaining descriptors */
170 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
173 /* and ICCR writeback, if available */
174 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
175 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
176 TALITOS_CCCR_LO_IWSE);
181 static int reset_device(struct device *dev)
183 struct talitos_private *priv = dev_get_drvdata(dev);
184 unsigned int timeout = TALITOS_TIMEOUT;
185 bool is_sec1 = has_ftr_sec1(priv);
186 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
188 setbits32(priv->reg + TALITOS_MCR, mcr);
190 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
195 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
196 setbits32(priv->reg + TALITOS_MCR, mcr);
200 dev_err(dev, "failed to reset device\n");
208 * Reset and initialize the device
210 static int init_device(struct device *dev)
212 struct talitos_private *priv = dev_get_drvdata(dev);
214 bool is_sec1 = has_ftr_sec1(priv);
218 * errata documentation: warning: certain SEC interrupts
219 * are not fully cleared by writing the MCR:SWR bit,
220 * set bit twice to completely reset
222 err = reset_device(dev);
226 err = reset_device(dev);
231 for (ch = 0; ch < priv->num_channels; ch++) {
232 err = reset_channel(dev, ch);
237 /* enable channel done and error interrupts */
239 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
240 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
241 /* disable parity error check in DEU (erroneous? test vect.) */
242 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
244 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
245 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
248 /* disable integrity check error interrupts (use writeback instead) */
249 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
250 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
251 TALITOS_MDEUICR_LO_ICE);
257 * talitos_submit - submits a descriptor to the device for processing
258 * @dev: the SEC device to be used
259 * @ch: the SEC device channel to be used
260 * @desc: the descriptor to be processed by the device
261 * @callback: whom to call when processing is complete
262 * @context: a handle for use by caller (optional)
264 * desc must contain valid dma-mapped (bus physical) address pointers.
265 * callback must check err and feedback in descriptor header
266 * for device processing status.
268 static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
269 void (*callback)(struct device *dev,
270 struct talitos_desc *desc,
271 void *context, int error),
274 struct talitos_private *priv = dev_get_drvdata(dev);
275 struct talitos_request *request;
278 bool is_sec1 = has_ftr_sec1(priv);
280 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
282 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
283 /* h/w fifo is full */
284 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
288 head = priv->chan[ch].head;
289 request = &priv->chan[ch].fifo[head];
291 /* map descriptor and save caller data */
293 desc->hdr1 = desc->hdr;
294 request->dma_desc = dma_map_single(dev, &desc->hdr1,
298 request->dma_desc = dma_map_single(dev, desc,
302 request->callback = callback;
303 request->context = context;
305 /* increment fifo head */
306 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
309 request->desc = desc;
313 out_be32(priv->chan[ch].reg + TALITOS_FF,
314 upper_32_bits(request->dma_desc));
315 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
316 lower_32_bits(request->dma_desc));
318 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
323 static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
325 struct talitos_edesc *edesc;
328 return request->desc->hdr;
330 if (!request->desc->next_desc)
331 return request->desc->hdr1;
333 edesc = container_of(request->desc, struct talitos_edesc, desc);
335 return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
339 * process what was done, notify callback of error if not
341 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
343 struct talitos_private *priv = dev_get_drvdata(dev);
344 struct talitos_request *request, saved_req;
347 bool is_sec1 = has_ftr_sec1(priv);
349 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
351 tail = priv->chan[ch].tail;
352 while (priv->chan[ch].fifo[tail].desc) {
355 request = &priv->chan[ch].fifo[tail];
357 /* descriptors with their done bits set don't get the error */
359 hdr = get_request_hdr(request, is_sec1);
361 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
369 dma_unmap_single(dev, request->dma_desc,
373 /* copy entries so we can call callback outside lock */
374 saved_req.desc = request->desc;
375 saved_req.callback = request->callback;
376 saved_req.context = request->context;
378 /* release request entry in fifo */
380 request->desc = NULL;
382 /* increment fifo tail */
383 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
385 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
387 atomic_dec(&priv->chan[ch].submit_count);
389 saved_req.callback(dev, saved_req.desc, saved_req.context,
391 /* channel may resume processing in single desc error case */
392 if (error && !reset_ch && status == error)
394 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
395 tail = priv->chan[ch].tail;
398 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
402 * process completed requests for channels that have done status
404 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
405 static void talitos1_done_##name(unsigned long data) \
407 struct device *dev = (struct device *)data; \
408 struct talitos_private *priv = dev_get_drvdata(dev); \
409 unsigned long flags; \
411 if (ch_done_mask & 0x10000000) \
412 flush_channel(dev, 0, 0, 0); \
413 if (ch_done_mask & 0x40000000) \
414 flush_channel(dev, 1, 0, 0); \
415 if (ch_done_mask & 0x00010000) \
416 flush_channel(dev, 2, 0, 0); \
417 if (ch_done_mask & 0x00040000) \
418 flush_channel(dev, 3, 0, 0); \
420 /* At this point, all completed channels have been processed */ \
421 /* Unmask done interrupts for channels completed later on. */ \
422 spin_lock_irqsave(&priv->reg_lock, flags); \
423 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
424 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
425 spin_unlock_irqrestore(&priv->reg_lock, flags); \
428 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
429 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
431 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
432 static void talitos2_done_##name(unsigned long data) \
434 struct device *dev = (struct device *)data; \
435 struct talitos_private *priv = dev_get_drvdata(dev); \
436 unsigned long flags; \
438 if (ch_done_mask & 1) \
439 flush_channel(dev, 0, 0, 0); \
440 if (ch_done_mask & (1 << 2)) \
441 flush_channel(dev, 1, 0, 0); \
442 if (ch_done_mask & (1 << 4)) \
443 flush_channel(dev, 2, 0, 0); \
444 if (ch_done_mask & (1 << 6)) \
445 flush_channel(dev, 3, 0, 0); \
447 /* At this point, all completed channels have been processed */ \
448 /* Unmask done interrupts for channels completed later on. */ \
449 spin_lock_irqsave(&priv->reg_lock, flags); \
450 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
451 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
452 spin_unlock_irqrestore(&priv->reg_lock, flags); \
455 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
456 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
457 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
458 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
461 * locate current (offending) descriptor
463 static u32 current_desc_hdr(struct device *dev, int ch)
465 struct talitos_private *priv = dev_get_drvdata(dev);
469 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
470 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
473 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
477 tail = priv->chan[ch].tail;
480 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
481 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
482 iter = (iter + 1) & (priv->fifo_len - 1);
484 dev_err(dev, "couldn't locate current descriptor\n");
489 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
490 struct talitos_edesc *edesc;
492 edesc = container_of(priv->chan[ch].fifo[iter].desc,
493 struct talitos_edesc, desc);
494 return ((struct talitos_desc *)
495 (edesc->buf + edesc->dma_len))->hdr;
498 return priv->chan[ch].fifo[iter].desc->hdr;
502 * user diagnostics; report root cause of error based on execution unit status
504 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
506 struct talitos_private *priv = dev_get_drvdata(dev);
510 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
512 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
513 case DESC_HDR_SEL0_AFEU:
514 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
515 in_be32(priv->reg_afeu + TALITOS_EUISR),
516 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
518 case DESC_HDR_SEL0_DEU:
519 dev_err(dev, "DEUISR 0x%08x_%08x\n",
520 in_be32(priv->reg_deu + TALITOS_EUISR),
521 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
523 case DESC_HDR_SEL0_MDEUA:
524 case DESC_HDR_SEL0_MDEUB:
525 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
526 in_be32(priv->reg_mdeu + TALITOS_EUISR),
527 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
529 case DESC_HDR_SEL0_RNG:
530 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
531 in_be32(priv->reg_rngu + TALITOS_ISR),
532 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
534 case DESC_HDR_SEL0_PKEU:
535 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
536 in_be32(priv->reg_pkeu + TALITOS_EUISR),
537 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
539 case DESC_HDR_SEL0_AESU:
540 dev_err(dev, "AESUISR 0x%08x_%08x\n",
541 in_be32(priv->reg_aesu + TALITOS_EUISR),
542 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
544 case DESC_HDR_SEL0_CRCU:
545 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
546 in_be32(priv->reg_crcu + TALITOS_EUISR),
547 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
549 case DESC_HDR_SEL0_KEU:
550 dev_err(dev, "KEUISR 0x%08x_%08x\n",
551 in_be32(priv->reg_pkeu + TALITOS_EUISR),
552 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
556 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
557 case DESC_HDR_SEL1_MDEUA:
558 case DESC_HDR_SEL1_MDEUB:
559 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
560 in_be32(priv->reg_mdeu + TALITOS_EUISR),
561 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
563 case DESC_HDR_SEL1_CRCU:
564 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
565 in_be32(priv->reg_crcu + TALITOS_EUISR),
566 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
570 for (i = 0; i < 8; i++)
571 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
572 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
573 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
577 * recover from error interrupts
579 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
581 struct talitos_private *priv = dev_get_drvdata(dev);
582 unsigned int timeout = TALITOS_TIMEOUT;
583 int ch, error, reset_dev = 0;
585 bool is_sec1 = has_ftr_sec1(priv);
586 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
588 for (ch = 0; ch < priv->num_channels; ch++) {
589 /* skip channels without errors */
591 /* bits 29, 31, 17, 19 */
592 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
595 if (!(isr & (1 << (ch * 2 + 1))))
601 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
603 if (v_lo & TALITOS_CCPSR_LO_DOF) {
604 dev_err(dev, "double fetch fifo overflow error\n");
608 if (v_lo & TALITOS_CCPSR_LO_SOF) {
609 /* h/w dropped descriptor */
610 dev_err(dev, "single fetch fifo overflow error\n");
613 if (v_lo & TALITOS_CCPSR_LO_MDTE)
614 dev_err(dev, "master data transfer error\n");
615 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
616 dev_err(dev, is_sec1 ? "pointer not complete error\n"
617 : "s/g data length zero error\n");
618 if (v_lo & TALITOS_CCPSR_LO_FPZ)
619 dev_err(dev, is_sec1 ? "parity error\n"
620 : "fetch pointer zero error\n");
621 if (v_lo & TALITOS_CCPSR_LO_IDH)
622 dev_err(dev, "illegal descriptor header error\n");
623 if (v_lo & TALITOS_CCPSR_LO_IEU)
624 dev_err(dev, is_sec1 ? "static assignment error\n"
625 : "invalid exec unit error\n");
626 if (v_lo & TALITOS_CCPSR_LO_EU)
627 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
629 if (v_lo & TALITOS_CCPSR_LO_GB)
630 dev_err(dev, "gather boundary error\n");
631 if (v_lo & TALITOS_CCPSR_LO_GRL)
632 dev_err(dev, "gather return/length error\n");
633 if (v_lo & TALITOS_CCPSR_LO_SB)
634 dev_err(dev, "scatter boundary error\n");
635 if (v_lo & TALITOS_CCPSR_LO_SRL)
636 dev_err(dev, "scatter return/length error\n");
639 flush_channel(dev, ch, error, reset_ch);
642 reset_channel(dev, ch);
644 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
646 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
647 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
648 TALITOS2_CCCR_CONT) && --timeout)
651 dev_err(dev, "failed to restart channel %d\n",
657 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
658 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
659 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
660 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
663 dev_err(dev, "done overflow, internal time out, or "
664 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
666 /* purge request queues */
667 for (ch = 0; ch < priv->num_channels; ch++)
668 flush_channel(dev, ch, -EIO, 1);
670 /* reset and reinitialize the device */
675 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
676 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
678 struct device *dev = data; \
679 struct talitos_private *priv = dev_get_drvdata(dev); \
681 unsigned long flags; \
683 spin_lock_irqsave(&priv->reg_lock, flags); \
684 isr = in_be32(priv->reg + TALITOS_ISR); \
685 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
686 /* Acknowledge interrupt */ \
687 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
688 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
690 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
691 spin_unlock_irqrestore(&priv->reg_lock, flags); \
692 talitos_error(dev, isr & ch_err_mask, isr_lo); \
695 if (likely(isr & ch_done_mask)) { \
696 /* mask further done interrupts. */ \
697 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
698 /* done_task will unmask done interrupts at exit */ \
699 tasklet_schedule(&priv->done_task[tlet]); \
701 spin_unlock_irqrestore(&priv->reg_lock, flags); \
704 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
708 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
710 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
711 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
713 struct device *dev = data; \
714 struct talitos_private *priv = dev_get_drvdata(dev); \
716 unsigned long flags; \
718 spin_lock_irqsave(&priv->reg_lock, flags); \
719 isr = in_be32(priv->reg + TALITOS_ISR); \
720 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
721 /* Acknowledge interrupt */ \
722 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
723 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
725 if (unlikely(isr & ch_err_mask || isr_lo)) { \
726 spin_unlock_irqrestore(&priv->reg_lock, flags); \
727 talitos_error(dev, isr & ch_err_mask, isr_lo); \
730 if (likely(isr & ch_done_mask)) { \
731 /* mask further done interrupts. */ \
732 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
733 /* done_task will unmask done interrupts at exit */ \
734 tasklet_schedule(&priv->done_task[tlet]); \
736 spin_unlock_irqrestore(&priv->reg_lock, flags); \
739 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
743 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
744 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
746 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
752 static int talitos_rng_data_present(struct hwrng *rng, int wait)
754 struct device *dev = (struct device *)rng->priv;
755 struct talitos_private *priv = dev_get_drvdata(dev);
759 for (i = 0; i < 20; i++) {
760 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
761 TALITOS_RNGUSR_LO_OFL;
770 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
772 struct device *dev = (struct device *)rng->priv;
773 struct talitos_private *priv = dev_get_drvdata(dev);
775 /* rng fifo requires 64-bit accesses */
776 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
777 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
782 static int talitos_rng_init(struct hwrng *rng)
784 struct device *dev = (struct device *)rng->priv;
785 struct talitos_private *priv = dev_get_drvdata(dev);
786 unsigned int timeout = TALITOS_TIMEOUT;
788 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
789 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
790 & TALITOS_RNGUSR_LO_RD)
794 dev_err(dev, "failed to reset rng hw\n");
798 /* start generating */
799 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
804 static int talitos_register_rng(struct device *dev)
806 struct talitos_private *priv = dev_get_drvdata(dev);
809 priv->rng.name = dev_driver_string(dev),
810 priv->rng.init = talitos_rng_init,
811 priv->rng.data_present = talitos_rng_data_present,
812 priv->rng.data_read = talitos_rng_data_read,
813 priv->rng.priv = (unsigned long)dev;
815 err = hwrng_register(&priv->rng);
817 priv->rng_registered = true;
822 static void talitos_unregister_rng(struct device *dev)
824 struct talitos_private *priv = dev_get_drvdata(dev);
826 if (!priv->rng_registered)
829 hwrng_unregister(&priv->rng);
830 priv->rng_registered = false;
836 #define TALITOS_CRA_PRIORITY 3000
838 * Defines a priority for doing AEAD with descriptors type
839 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
841 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
842 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
843 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
845 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
847 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
852 __be32 desc_hdr_template;
853 u8 key[TALITOS_MAX_KEY_SIZE];
854 u8 iv[TALITOS_MAX_IV_LENGTH];
857 unsigned int enckeylen;
858 unsigned int authkeylen;
861 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
862 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
864 struct talitos_ahash_req_ctx {
865 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
866 unsigned int hw_context_size;
867 u8 buf[2][HASH_MAX_BLOCK_SIZE];
872 unsigned int to_hash_later;
874 struct scatterlist bufsl[2];
875 struct scatterlist *psrc;
878 struct talitos_export_state {
879 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
880 u8 buf[HASH_MAX_BLOCK_SIZE];
884 unsigned int to_hash_later;
888 static int aead_setkey(struct crypto_aead *authenc,
889 const u8 *key, unsigned int keylen)
891 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
892 struct device *dev = ctx->dev;
893 struct crypto_authenc_keys keys;
895 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
898 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
902 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
904 memcpy(ctx->key, keys.authkey, keys.authkeylen);
905 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
907 ctx->keylen = keys.authkeylen + keys.enckeylen;
908 ctx->enckeylen = keys.enckeylen;
909 ctx->authkeylen = keys.authkeylen;
910 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
913 memzero_explicit(&keys, sizeof(keys));
917 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
918 memzero_explicit(&keys, sizeof(keys));
922 static int aead_des3_setkey(struct crypto_aead *authenc,
923 const u8 *key, unsigned int keylen)
925 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
926 struct device *dev = ctx->dev;
927 struct crypto_authenc_keys keys;
931 err = crypto_authenc_extractkeys(&keys, key, keylen);
936 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
939 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
942 flags = crypto_aead_get_flags(authenc);
943 err = __des3_verify_key(&flags, keys.enckey);
945 crypto_aead_set_flags(authenc, flags);
950 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
952 memcpy(ctx->key, keys.authkey, keys.authkeylen);
953 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
955 ctx->keylen = keys.authkeylen + keys.enckeylen;
956 ctx->enckeylen = keys.enckeylen;
957 ctx->authkeylen = keys.authkeylen;
958 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
962 memzero_explicit(&keys, sizeof(keys));
966 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
970 static void talitos_sg_unmap(struct device *dev,
971 struct talitos_edesc *edesc,
972 struct scatterlist *src,
973 struct scatterlist *dst,
974 unsigned int len, unsigned int offset)
976 struct talitos_private *priv = dev_get_drvdata(dev);
977 bool is_sec1 = has_ftr_sec1(priv);
978 unsigned int src_nents = edesc->src_nents ? : 1;
979 unsigned int dst_nents = edesc->dst_nents ? : 1;
981 if (is_sec1 && dst && dst_nents > 1) {
982 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
983 len, DMA_FROM_DEVICE);
984 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
988 if (src_nents == 1 || !is_sec1)
989 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
991 if (dst && (dst_nents == 1 || !is_sec1))
992 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
993 } else if (src_nents == 1 || !is_sec1) {
994 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
998 static void ipsec_esp_unmap(struct device *dev,
999 struct talitos_edesc *edesc,
1000 struct aead_request *areq, bool encrypt)
1002 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1003 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1004 unsigned int ivsize = crypto_aead_ivsize(aead);
1005 unsigned int authsize = crypto_aead_authsize(aead);
1006 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1007 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
1008 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
1011 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1013 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1015 talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1016 cryptlen + authsize, areq->assoclen);
1019 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1022 if (!is_ipsec_esp) {
1023 unsigned int dst_nents = edesc->dst_nents ? : 1;
1025 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1026 areq->assoclen + cryptlen - ivsize);
1031 * ipsec_esp descriptor callbacks
1033 static void ipsec_esp_encrypt_done(struct device *dev,
1034 struct talitos_desc *desc, void *context,
1037 struct aead_request *areq = context;
1038 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1039 unsigned int ivsize = crypto_aead_ivsize(authenc);
1040 struct talitos_edesc *edesc;
1042 edesc = container_of(desc, struct talitos_edesc, desc);
1044 ipsec_esp_unmap(dev, edesc, areq, true);
1046 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1050 aead_request_complete(areq, err);
1053 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1054 struct talitos_desc *desc,
1055 void *context, int err)
1057 struct aead_request *req = context;
1058 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1059 unsigned int authsize = crypto_aead_authsize(authenc);
1060 struct talitos_edesc *edesc;
1063 edesc = container_of(desc, struct talitos_edesc, desc);
1065 ipsec_esp_unmap(dev, edesc, req, false);
1069 oicv = edesc->buf + edesc->dma_len;
1070 icv = oicv - authsize;
1072 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1077 aead_request_complete(req, err);
1080 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1081 struct talitos_desc *desc,
1082 void *context, int err)
1084 struct aead_request *req = context;
1085 struct talitos_edesc *edesc;
1087 edesc = container_of(desc, struct talitos_edesc, desc);
1089 ipsec_esp_unmap(dev, edesc, req, false);
1091 /* check ICV auth status */
1092 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1093 DESC_HDR_LO_ICCR1_PASS))
1098 aead_request_complete(req, err);
1102 * convert scatterlist to SEC h/w link table format
1103 * stop at cryptlen bytes
1105 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1106 unsigned int offset, int datalen, int elen,
1107 struct talitos_ptr *link_tbl_ptr)
1109 int n_sg = elen ? sg_count + 1 : sg_count;
1111 int cryptlen = datalen + elen;
1113 while (cryptlen && sg && n_sg--) {
1114 unsigned int len = sg_dma_len(sg);
1116 if (offset >= len) {
1126 if (datalen > 0 && len > datalen) {
1127 to_talitos_ptr(link_tbl_ptr + count,
1128 sg_dma_address(sg) + offset, datalen, 0);
1129 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1134 to_talitos_ptr(link_tbl_ptr + count,
1135 sg_dma_address(sg) + offset, len, 0);
1136 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1146 /* tag end of link table */
1148 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1149 DESC_PTR_LNKTBL_RET, 0);
1154 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1155 unsigned int len, struct talitos_edesc *edesc,
1156 struct talitos_ptr *ptr, int sg_count,
1157 unsigned int offset, int tbl_off, int elen,
1160 struct talitos_private *priv = dev_get_drvdata(dev);
1161 bool is_sec1 = has_ftr_sec1(priv);
1164 to_talitos_ptr(ptr, 0, 0, is_sec1);
1167 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1168 if (sg_count == 1 && !force) {
1169 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1173 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1176 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1177 &edesc->link_tbl[tbl_off]);
1178 if (sg_count == 1 && !force) {
1179 /* Only one segment now, so no link tbl needed*/
1180 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1183 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1184 tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1185 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1190 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1191 unsigned int len, struct talitos_edesc *edesc,
1192 struct talitos_ptr *ptr, int sg_count,
1193 unsigned int offset, int tbl_off)
1195 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1200 * fill in and submit ipsec_esp descriptor
1202 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1204 void (*callback)(struct device *dev,
1205 struct talitos_desc *desc,
1206 void *context, int error))
1208 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1209 unsigned int authsize = crypto_aead_authsize(aead);
1210 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1211 struct device *dev = ctx->dev;
1212 struct talitos_desc *desc = &edesc->desc;
1213 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1214 unsigned int ivsize = crypto_aead_ivsize(aead);
1218 bool sync_needed = false;
1219 struct talitos_private *priv = dev_get_drvdata(dev);
1220 bool is_sec1 = has_ftr_sec1(priv);
1221 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1222 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1223 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1224 dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1227 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1229 sg_count = edesc->src_nents ?: 1;
1230 if (is_sec1 && sg_count > 1)
1231 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1232 areq->assoclen + cryptlen);
1234 sg_count = dma_map_sg(dev, areq->src, sg_count,
1235 (areq->src == areq->dst) ?
1236 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1239 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1240 &desc->ptr[1], sg_count, 0, tbl_off);
1248 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1251 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1252 ctx->enckeylen, is_sec1);
1256 * map and adjust cipher len to aead request cryptlen.
1257 * extent is bytes of HMAC postpended to ciphertext,
1258 * typically 12 for ipsec
1260 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1263 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1264 sg_count, areq->assoclen, tbl_off, elen,
1273 if (areq->src != areq->dst) {
1274 sg_count = edesc->dst_nents ? : 1;
1275 if (!is_sec1 || sg_count == 1)
1276 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1279 if (is_ipsec_esp && encrypt)
1283 ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1284 sg_count, areq->assoclen, tbl_off, elen,
1285 is_ipsec_esp && !encrypt);
1289 edesc->icv_ool = !encrypt;
1291 if (!encrypt && is_ipsec_esp) {
1292 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1294 /* Add an entry to the link table for ICV data */
1295 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1296 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1298 /* icv data follows link tables */
1299 to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1300 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1302 } else if (!encrypt) {
1303 to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1305 } else if (!is_ipsec_esp) {
1306 talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1307 sg_count, areq->assoclen + cryptlen, tbl_off);
1312 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1316 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1320 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1321 if (ret != -EINPROGRESS) {
1322 ipsec_esp_unmap(dev, edesc, areq, encrypt);
1329 * allocate and map the extended descriptor
1331 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1332 struct scatterlist *src,
1333 struct scatterlist *dst,
1335 unsigned int assoclen,
1336 unsigned int cryptlen,
1337 unsigned int authsize,
1338 unsigned int ivsize,
1343 struct talitos_edesc *edesc;
1344 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1345 dma_addr_t iv_dma = 0;
1346 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1348 struct talitos_private *priv = dev_get_drvdata(dev);
1349 bool is_sec1 = has_ftr_sec1(priv);
1350 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1352 if (cryptlen + authsize > max_len) {
1353 dev_err(dev, "length exceeds h/w max limit\n");
1354 return ERR_PTR(-EINVAL);
1357 if (!dst || dst == src) {
1358 src_len = assoclen + cryptlen + authsize;
1359 src_nents = sg_nents_for_len(src, src_len);
1360 if (src_nents < 0) {
1361 dev_err(dev, "Invalid number of src SG.\n");
1362 return ERR_PTR(-EINVAL);
1364 src_nents = (src_nents == 1) ? 0 : src_nents;
1365 dst_nents = dst ? src_nents : 0;
1367 } else { /* dst && dst != src*/
1368 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1369 src_nents = sg_nents_for_len(src, src_len);
1370 if (src_nents < 0) {
1371 dev_err(dev, "Invalid number of src SG.\n");
1372 return ERR_PTR(-EINVAL);
1374 src_nents = (src_nents == 1) ? 0 : src_nents;
1375 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1376 dst_nents = sg_nents_for_len(dst, dst_len);
1377 if (dst_nents < 0) {
1378 dev_err(dev, "Invalid number of dst SG.\n");
1379 return ERR_PTR(-EINVAL);
1381 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1385 * allocate space for base edesc plus the link tables,
1386 * allowing for two separate entries for AD and generated ICV (+ 2),
1387 * and space for two sets of ICVs (stashed and generated)
1389 alloc_len = sizeof(struct talitos_edesc);
1390 if (src_nents || dst_nents || !encrypt) {
1392 dma_len = (src_nents ? src_len : 0) +
1393 (dst_nents ? dst_len : 0) + authsize;
1395 dma_len = (src_nents + dst_nents + 2) *
1396 sizeof(struct talitos_ptr) + authsize;
1397 alloc_len += dma_len;
1401 alloc_len += icv_stashing ? authsize : 0;
1403 /* if its a ahash, add space for a second desc next to the first one */
1404 if (is_sec1 && !dst)
1405 alloc_len += sizeof(struct talitos_desc);
1406 alloc_len += ivsize;
1408 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1410 return ERR_PTR(-ENOMEM);
1412 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1413 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1415 memset(&edesc->desc, 0, sizeof(edesc->desc));
1417 edesc->src_nents = src_nents;
1418 edesc->dst_nents = dst_nents;
1419 edesc->iv_dma = iv_dma;
1420 edesc->dma_len = dma_len;
1422 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1429 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1430 int icv_stashing, bool encrypt)
1432 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1433 unsigned int authsize = crypto_aead_authsize(authenc);
1434 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1435 unsigned int ivsize = crypto_aead_ivsize(authenc);
1436 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1438 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1439 iv, areq->assoclen, cryptlen,
1440 authsize, ivsize, icv_stashing,
1441 areq->base.flags, encrypt);
1444 static int aead_encrypt(struct aead_request *req)
1446 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1447 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1448 struct talitos_edesc *edesc;
1450 /* allocate extended descriptor */
1451 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1453 return PTR_ERR(edesc);
1456 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1458 return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1461 static int aead_decrypt(struct aead_request *req)
1463 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1464 unsigned int authsize = crypto_aead_authsize(authenc);
1465 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1466 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1467 struct talitos_edesc *edesc;
1470 /* allocate extended descriptor */
1471 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1473 return PTR_ERR(edesc);
1475 if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1476 (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1477 ((!edesc->src_nents && !edesc->dst_nents) ||
1478 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1480 /* decrypt and check the ICV */
1481 edesc->desc.hdr = ctx->desc_hdr_template |
1482 DESC_HDR_DIR_INBOUND |
1483 DESC_HDR_MODE1_MDEU_CICV;
1485 /* reset integrity check result bits */
1487 return ipsec_esp(edesc, req, false,
1488 ipsec_esp_decrypt_hwauth_done);
1491 /* Have to check the ICV with software */
1492 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1494 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1495 icvdata = edesc->buf + edesc->dma_len;
1497 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1498 req->assoclen + req->cryptlen - authsize);
1500 return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1503 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1504 const u8 *key, unsigned int keylen)
1506 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1507 struct device *dev = ctx->dev;
1510 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1512 memcpy(&ctx->key, key, keylen);
1513 ctx->keylen = keylen;
1515 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1520 static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
1521 const u8 *key, unsigned int keylen)
1523 u32 tmp[DES_EXPKEY_WORDS];
1525 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1526 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
1527 !des_ekey(tmp, key)) {
1528 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1532 return ablkcipher_setkey(cipher, key, keylen);
1535 static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1536 const u8 *key, unsigned int keylen)
1541 flags = crypto_ablkcipher_get_flags(cipher);
1542 err = __des3_verify_key(&flags, key);
1543 if (unlikely(err)) {
1544 crypto_ablkcipher_set_flags(cipher, flags);
1548 return ablkcipher_setkey(cipher, key, keylen);
1551 static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1552 const u8 *key, unsigned int keylen)
1554 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1555 keylen == AES_KEYSIZE_256)
1556 return ablkcipher_setkey(cipher, key, keylen);
1558 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1563 static void common_nonsnoop_unmap(struct device *dev,
1564 struct talitos_edesc *edesc,
1565 struct ablkcipher_request *areq)
1567 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1569 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1570 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1573 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1577 static void ablkcipher_done(struct device *dev,
1578 struct talitos_desc *desc, void *context,
1581 struct ablkcipher_request *areq = context;
1582 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1583 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1584 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1585 struct talitos_edesc *edesc;
1587 edesc = container_of(desc, struct talitos_edesc, desc);
1589 common_nonsnoop_unmap(dev, edesc, areq);
1590 memcpy(areq->info, ctx->iv, ivsize);
1594 areq->base.complete(&areq->base, err);
1597 static int common_nonsnoop(struct talitos_edesc *edesc,
1598 struct ablkcipher_request *areq,
1599 void (*callback) (struct device *dev,
1600 struct talitos_desc *desc,
1601 void *context, int error))
1603 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1604 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1605 struct device *dev = ctx->dev;
1606 struct talitos_desc *desc = &edesc->desc;
1607 unsigned int cryptlen = areq->nbytes;
1608 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1610 bool sync_needed = false;
1611 struct talitos_private *priv = dev_get_drvdata(dev);
1612 bool is_sec1 = has_ftr_sec1(priv);
1614 /* first DWORD empty */
1617 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1620 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1622 sg_count = edesc->src_nents ?: 1;
1623 if (is_sec1 && sg_count > 1)
1624 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1627 sg_count = dma_map_sg(dev, areq->src, sg_count,
1628 (areq->src == areq->dst) ?
1629 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1633 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1634 &desc->ptr[3], sg_count, 0, 0);
1639 if (areq->src != areq->dst) {
1640 sg_count = edesc->dst_nents ? : 1;
1641 if (!is_sec1 || sg_count == 1)
1642 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1645 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1646 sg_count, 0, (edesc->src_nents + 1));
1651 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1654 /* last DWORD empty */
1657 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1658 edesc->dma_len, DMA_BIDIRECTIONAL);
1660 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1661 if (ret != -EINPROGRESS) {
1662 common_nonsnoop_unmap(dev, edesc, areq);
1668 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1671 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1672 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1673 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1675 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1676 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1677 areq->base.flags, encrypt);
1680 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1682 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1683 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1684 struct talitos_edesc *edesc;
1685 unsigned int blocksize =
1686 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1691 if (areq->nbytes % blocksize)
1694 /* allocate extended descriptor */
1695 edesc = ablkcipher_edesc_alloc(areq, true);
1697 return PTR_ERR(edesc);
1700 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1702 return common_nonsnoop(edesc, areq, ablkcipher_done);
1705 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1707 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1708 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1709 struct talitos_edesc *edesc;
1710 unsigned int blocksize =
1711 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1716 if (areq->nbytes % blocksize)
1719 /* allocate extended descriptor */
1720 edesc = ablkcipher_edesc_alloc(areq, false);
1722 return PTR_ERR(edesc);
1724 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1726 return common_nonsnoop(edesc, areq, ablkcipher_done);
1729 static void common_nonsnoop_hash_unmap(struct device *dev,
1730 struct talitos_edesc *edesc,
1731 struct ahash_request *areq)
1733 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1734 struct talitos_private *priv = dev_get_drvdata(dev);
1735 bool is_sec1 = has_ftr_sec1(priv);
1736 struct talitos_desc *desc = &edesc->desc;
1737 struct talitos_desc *desc2 = (struct talitos_desc *)
1738 (edesc->buf + edesc->dma_len);
1740 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1741 if (desc->next_desc &&
1742 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1743 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1746 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1748 /* When using hashctx-in, must unmap it. */
1749 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1750 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1752 else if (desc->next_desc)
1753 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1756 if (is_sec1 && req_ctx->nbuf)
1757 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1761 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1764 if (edesc->desc.next_desc)
1765 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1766 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1769 static void ahash_done(struct device *dev,
1770 struct talitos_desc *desc, void *context,
1773 struct ahash_request *areq = context;
1774 struct talitos_edesc *edesc =
1775 container_of(desc, struct talitos_edesc, desc);
1776 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1778 if (!req_ctx->last && req_ctx->to_hash_later) {
1779 /* Position any partial block for next update/final/finup */
1780 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1781 req_ctx->nbuf = req_ctx->to_hash_later;
1783 common_nonsnoop_hash_unmap(dev, edesc, areq);
1787 areq->base.complete(&areq->base, err);
1791 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1792 * ourself and submit a padded block
1794 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1795 struct talitos_edesc *edesc,
1796 struct talitos_ptr *ptr)
1798 static u8 padded_hash[64] = {
1799 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1800 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1801 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1802 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1805 pr_err_once("Bug in SEC1, padding ourself\n");
1806 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1807 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1808 (char *)padded_hash, DMA_TO_DEVICE);
1811 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1812 struct ahash_request *areq, unsigned int length,
1813 void (*callback) (struct device *dev,
1814 struct talitos_desc *desc,
1815 void *context, int error))
1817 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1818 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1819 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1820 struct device *dev = ctx->dev;
1821 struct talitos_desc *desc = &edesc->desc;
1823 bool sync_needed = false;
1824 struct talitos_private *priv = dev_get_drvdata(dev);
1825 bool is_sec1 = has_ftr_sec1(priv);
1828 /* first DWORD empty */
1830 /* hash context in */
1831 if (!req_ctx->first || req_ctx->swinit) {
1832 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1833 req_ctx->hw_context_size,
1834 req_ctx->hw_context,
1836 req_ctx->swinit = 0;
1838 /* Indicate next op is not the first. */
1843 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1846 if (is_sec1 && req_ctx->nbuf)
1847 length -= req_ctx->nbuf;
1849 sg_count = edesc->src_nents ?: 1;
1850 if (is_sec1 && sg_count > 1)
1851 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1853 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1858 if (is_sec1 && req_ctx->nbuf) {
1859 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1860 req_ctx->buf[req_ctx->buf_idx],
1863 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1864 &desc->ptr[3], sg_count, 0, 0);
1869 /* fifth DWORD empty */
1871 /* hash/HMAC out -or- hash context out */
1873 map_single_talitos_ptr(dev, &desc->ptr[5],
1874 crypto_ahash_digestsize(tfm),
1875 areq->result, DMA_FROM_DEVICE);
1877 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1878 req_ctx->hw_context_size,
1879 req_ctx->hw_context,
1882 /* last DWORD empty */
1884 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1885 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1887 if (is_sec1 && req_ctx->nbuf && length) {
1888 struct talitos_desc *desc2 = (struct talitos_desc *)
1889 (edesc->buf + edesc->dma_len);
1890 dma_addr_t next_desc;
1892 memset(desc2, 0, sizeof(*desc2));
1893 desc2->hdr = desc->hdr;
1894 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1895 desc2->hdr1 = desc2->hdr;
1896 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1897 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1898 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1900 if (desc->ptr[1].ptr)
1901 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1904 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1905 req_ctx->hw_context_size,
1906 req_ctx->hw_context,
1908 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1909 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1910 &desc2->ptr[3], sg_count, 0, 0);
1913 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1915 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1916 req_ctx->hw_context_size,
1917 req_ctx->hw_context,
1920 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1922 desc->next_desc = cpu_to_be32(next_desc);
1926 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1927 edesc->dma_len, DMA_BIDIRECTIONAL);
1929 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1930 if (ret != -EINPROGRESS) {
1931 common_nonsnoop_hash_unmap(dev, edesc, areq);
1937 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1938 unsigned int nbytes)
1940 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1941 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1942 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1943 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1944 bool is_sec1 = has_ftr_sec1(priv);
1947 nbytes -= req_ctx->nbuf;
1949 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1950 nbytes, 0, 0, 0, areq->base.flags, false);
1953 static int ahash_init(struct ahash_request *areq)
1955 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1956 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1957 struct device *dev = ctx->dev;
1958 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1962 /* Initialize the context */
1963 req_ctx->buf_idx = 0;
1965 req_ctx->first = 1; /* first indicates h/w must init its context */
1966 req_ctx->swinit = 0; /* assume h/w init of context */
1967 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1968 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1969 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1970 req_ctx->hw_context_size = size;
1972 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1974 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1980 * on h/w without explicit sha224 support, we initialize h/w context
1981 * manually with sha224 constants, and tell it to run sha256.
1983 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1985 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1987 req_ctx->hw_context[0] = SHA224_H0;
1988 req_ctx->hw_context[1] = SHA224_H1;
1989 req_ctx->hw_context[2] = SHA224_H2;
1990 req_ctx->hw_context[3] = SHA224_H3;
1991 req_ctx->hw_context[4] = SHA224_H4;
1992 req_ctx->hw_context[5] = SHA224_H5;
1993 req_ctx->hw_context[6] = SHA224_H6;
1994 req_ctx->hw_context[7] = SHA224_H7;
1996 /* init 64-bit count */
1997 req_ctx->hw_context[8] = 0;
1998 req_ctx->hw_context[9] = 0;
2001 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
2006 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2008 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2009 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2010 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2011 struct talitos_edesc *edesc;
2012 unsigned int blocksize =
2013 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2014 unsigned int nbytes_to_hash;
2015 unsigned int to_hash_later;
2018 struct device *dev = ctx->dev;
2019 struct talitos_private *priv = dev_get_drvdata(dev);
2020 bool is_sec1 = has_ftr_sec1(priv);
2021 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
2023 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
2024 /* Buffer up to one whole block */
2025 nents = sg_nents_for_len(areq->src, nbytes);
2027 dev_err(ctx->dev, "Invalid number of src SG.\n");
2030 sg_copy_to_buffer(areq->src, nents,
2031 ctx_buf + req_ctx->nbuf, nbytes);
2032 req_ctx->nbuf += nbytes;
2036 /* At least (blocksize + 1) bytes are available to hash */
2037 nbytes_to_hash = nbytes + req_ctx->nbuf;
2038 to_hash_later = nbytes_to_hash & (blocksize - 1);
2042 else if (to_hash_later)
2043 /* There is a partial block. Hash the full block(s) now */
2044 nbytes_to_hash -= to_hash_later;
2046 /* Keep one block buffered */
2047 nbytes_to_hash -= blocksize;
2048 to_hash_later = blocksize;
2051 /* Chain in any previously buffered data */
2052 if (!is_sec1 && req_ctx->nbuf) {
2053 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2054 sg_init_table(req_ctx->bufsl, nsg);
2055 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2057 sg_chain(req_ctx->bufsl, 2, areq->src);
2058 req_ctx->psrc = req_ctx->bufsl;
2059 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2062 if (nbytes_to_hash > blocksize)
2063 offset = blocksize - req_ctx->nbuf;
2065 offset = nbytes_to_hash - req_ctx->nbuf;
2066 nents = sg_nents_for_len(areq->src, offset);
2068 dev_err(ctx->dev, "Invalid number of src SG.\n");
2071 sg_copy_to_buffer(areq->src, nents,
2072 ctx_buf + req_ctx->nbuf, offset);
2073 req_ctx->nbuf += offset;
2074 req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2077 req_ctx->psrc = areq->src;
2079 if (to_hash_later) {
2080 nents = sg_nents_for_len(areq->src, nbytes);
2082 dev_err(ctx->dev, "Invalid number of src SG.\n");
2085 sg_pcopy_to_buffer(areq->src, nents,
2086 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2088 nbytes - to_hash_later);
2090 req_ctx->to_hash_later = to_hash_later;
2092 /* Allocate extended descriptor */
2093 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2095 return PTR_ERR(edesc);
2097 edesc->desc.hdr = ctx->desc_hdr_template;
2099 /* On last one, request SEC to pad; otherwise continue */
2101 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2103 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2105 /* request SEC to INIT hash. */
2106 if (req_ctx->first && !req_ctx->swinit)
2107 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2109 /* When the tfm context has a keylen, it's an HMAC.
2110 * A first or last (ie. not middle) descriptor must request HMAC.
2112 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2113 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2115 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2118 static int ahash_update(struct ahash_request *areq)
2120 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2124 return ahash_process_req(areq, areq->nbytes);
2127 static int ahash_final(struct ahash_request *areq)
2129 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2133 return ahash_process_req(areq, 0);
2136 static int ahash_finup(struct ahash_request *areq)
2138 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2142 return ahash_process_req(areq, areq->nbytes);
2145 static int ahash_digest(struct ahash_request *areq)
2147 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2148 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2153 return ahash_process_req(areq, areq->nbytes);
2156 static int ahash_export(struct ahash_request *areq, void *out)
2158 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2159 struct talitos_export_state *export = out;
2160 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2161 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2162 struct device *dev = ctx->dev;
2165 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2167 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2169 memcpy(export->hw_context, req_ctx->hw_context,
2170 req_ctx->hw_context_size);
2171 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2172 export->swinit = req_ctx->swinit;
2173 export->first = req_ctx->first;
2174 export->last = req_ctx->last;
2175 export->to_hash_later = req_ctx->to_hash_later;
2176 export->nbuf = req_ctx->nbuf;
2181 static int ahash_import(struct ahash_request *areq, const void *in)
2183 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2184 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2185 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2186 struct device *dev = ctx->dev;
2187 const struct talitos_export_state *export = in;
2191 memset(req_ctx, 0, sizeof(*req_ctx));
2192 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2193 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2194 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2195 req_ctx->hw_context_size = size;
2196 memcpy(req_ctx->hw_context, export->hw_context, size);
2197 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2198 req_ctx->swinit = export->swinit;
2199 req_ctx->first = export->first;
2200 req_ctx->last = export->last;
2201 req_ctx->to_hash_later = export->to_hash_later;
2202 req_ctx->nbuf = export->nbuf;
2204 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2206 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2211 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2214 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2216 struct scatterlist sg[1];
2217 struct ahash_request *req;
2218 struct crypto_wait wait;
2221 crypto_init_wait(&wait);
2223 req = ahash_request_alloc(tfm, GFP_KERNEL);
2227 /* Keep tfm keylen == 0 during hash of the long key */
2229 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2230 crypto_req_done, &wait);
2232 sg_init_one(&sg[0], key, keylen);
2234 ahash_request_set_crypt(req, sg, hash, keylen);
2235 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2237 ahash_request_free(req);
2242 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2243 unsigned int keylen)
2245 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2246 struct device *dev = ctx->dev;
2247 unsigned int blocksize =
2248 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2249 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2250 unsigned int keysize = keylen;
2251 u8 hash[SHA512_DIGEST_SIZE];
2254 if (keylen <= blocksize)
2255 memcpy(ctx->key, key, keysize);
2257 /* Must get the hash of the long key */
2258 ret = keyhash(tfm, key, keylen, hash);
2261 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2265 keysize = digestsize;
2266 memcpy(ctx->key, hash, digestsize);
2270 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2272 ctx->keylen = keysize;
2273 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2279 struct talitos_alg_template {
2283 struct crypto_alg crypto;
2284 struct ahash_alg hash;
2285 struct aead_alg aead;
2287 __be32 desc_hdr_template;
2290 static struct talitos_alg_template driver_algs[] = {
2291 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2292 { .type = CRYPTO_ALG_TYPE_AEAD,
2295 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2296 .cra_driver_name = "authenc-hmac-sha1-"
2298 .cra_blocksize = AES_BLOCK_SIZE,
2299 .cra_flags = CRYPTO_ALG_ASYNC,
2301 .ivsize = AES_BLOCK_SIZE,
2302 .maxauthsize = SHA1_DIGEST_SIZE,
2304 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2305 DESC_HDR_SEL0_AESU |
2306 DESC_HDR_MODE0_AESU_CBC |
2307 DESC_HDR_SEL1_MDEUA |
2308 DESC_HDR_MODE1_MDEU_INIT |
2309 DESC_HDR_MODE1_MDEU_PAD |
2310 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2312 { .type = CRYPTO_ALG_TYPE_AEAD,
2313 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2316 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2317 .cra_driver_name = "authenc-hmac-sha1-"
2318 "cbc-aes-talitos-hsna",
2319 .cra_blocksize = AES_BLOCK_SIZE,
2320 .cra_flags = CRYPTO_ALG_ASYNC,
2322 .ivsize = AES_BLOCK_SIZE,
2323 .maxauthsize = SHA1_DIGEST_SIZE,
2325 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2326 DESC_HDR_SEL0_AESU |
2327 DESC_HDR_MODE0_AESU_CBC |
2328 DESC_HDR_SEL1_MDEUA |
2329 DESC_HDR_MODE1_MDEU_INIT |
2330 DESC_HDR_MODE1_MDEU_PAD |
2331 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2333 { .type = CRYPTO_ALG_TYPE_AEAD,
2336 .cra_name = "authenc(hmac(sha1),"
2338 .cra_driver_name = "authenc-hmac-sha1-"
2340 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2341 .cra_flags = CRYPTO_ALG_ASYNC,
2343 .ivsize = DES3_EDE_BLOCK_SIZE,
2344 .maxauthsize = SHA1_DIGEST_SIZE,
2345 .setkey = aead_des3_setkey,
2347 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2349 DESC_HDR_MODE0_DEU_CBC |
2350 DESC_HDR_MODE0_DEU_3DES |
2351 DESC_HDR_SEL1_MDEUA |
2352 DESC_HDR_MODE1_MDEU_INIT |
2353 DESC_HDR_MODE1_MDEU_PAD |
2354 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2356 { .type = CRYPTO_ALG_TYPE_AEAD,
2357 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2360 .cra_name = "authenc(hmac(sha1),"
2362 .cra_driver_name = "authenc-hmac-sha1-"
2363 "cbc-3des-talitos-hsna",
2364 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2365 .cra_flags = CRYPTO_ALG_ASYNC,
2367 .ivsize = DES3_EDE_BLOCK_SIZE,
2368 .maxauthsize = SHA1_DIGEST_SIZE,
2369 .setkey = aead_des3_setkey,
2371 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2373 DESC_HDR_MODE0_DEU_CBC |
2374 DESC_HDR_MODE0_DEU_3DES |
2375 DESC_HDR_SEL1_MDEUA |
2376 DESC_HDR_MODE1_MDEU_INIT |
2377 DESC_HDR_MODE1_MDEU_PAD |
2378 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2380 { .type = CRYPTO_ALG_TYPE_AEAD,
2383 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2384 .cra_driver_name = "authenc-hmac-sha224-"
2386 .cra_blocksize = AES_BLOCK_SIZE,
2387 .cra_flags = CRYPTO_ALG_ASYNC,
2389 .ivsize = AES_BLOCK_SIZE,
2390 .maxauthsize = SHA224_DIGEST_SIZE,
2392 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2393 DESC_HDR_SEL0_AESU |
2394 DESC_HDR_MODE0_AESU_CBC |
2395 DESC_HDR_SEL1_MDEUA |
2396 DESC_HDR_MODE1_MDEU_INIT |
2397 DESC_HDR_MODE1_MDEU_PAD |
2398 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2400 { .type = CRYPTO_ALG_TYPE_AEAD,
2401 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2404 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2405 .cra_driver_name = "authenc-hmac-sha224-"
2406 "cbc-aes-talitos-hsna",
2407 .cra_blocksize = AES_BLOCK_SIZE,
2408 .cra_flags = CRYPTO_ALG_ASYNC,
2410 .ivsize = AES_BLOCK_SIZE,
2411 .maxauthsize = SHA224_DIGEST_SIZE,
2413 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2414 DESC_HDR_SEL0_AESU |
2415 DESC_HDR_MODE0_AESU_CBC |
2416 DESC_HDR_SEL1_MDEUA |
2417 DESC_HDR_MODE1_MDEU_INIT |
2418 DESC_HDR_MODE1_MDEU_PAD |
2419 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2421 { .type = CRYPTO_ALG_TYPE_AEAD,
2424 .cra_name = "authenc(hmac(sha224),"
2426 .cra_driver_name = "authenc-hmac-sha224-"
2428 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2429 .cra_flags = CRYPTO_ALG_ASYNC,
2431 .ivsize = DES3_EDE_BLOCK_SIZE,
2432 .maxauthsize = SHA224_DIGEST_SIZE,
2433 .setkey = aead_des3_setkey,
2435 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2437 DESC_HDR_MODE0_DEU_CBC |
2438 DESC_HDR_MODE0_DEU_3DES |
2439 DESC_HDR_SEL1_MDEUA |
2440 DESC_HDR_MODE1_MDEU_INIT |
2441 DESC_HDR_MODE1_MDEU_PAD |
2442 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2444 { .type = CRYPTO_ALG_TYPE_AEAD,
2445 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2448 .cra_name = "authenc(hmac(sha224),"
2450 .cra_driver_name = "authenc-hmac-sha224-"
2451 "cbc-3des-talitos-hsna",
2452 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2453 .cra_flags = CRYPTO_ALG_ASYNC,
2455 .ivsize = DES3_EDE_BLOCK_SIZE,
2456 .maxauthsize = SHA224_DIGEST_SIZE,
2457 .setkey = aead_des3_setkey,
2459 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2461 DESC_HDR_MODE0_DEU_CBC |
2462 DESC_HDR_MODE0_DEU_3DES |
2463 DESC_HDR_SEL1_MDEUA |
2464 DESC_HDR_MODE1_MDEU_INIT |
2465 DESC_HDR_MODE1_MDEU_PAD |
2466 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2468 { .type = CRYPTO_ALG_TYPE_AEAD,
2471 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2472 .cra_driver_name = "authenc-hmac-sha256-"
2474 .cra_blocksize = AES_BLOCK_SIZE,
2475 .cra_flags = CRYPTO_ALG_ASYNC,
2477 .ivsize = AES_BLOCK_SIZE,
2478 .maxauthsize = SHA256_DIGEST_SIZE,
2480 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2481 DESC_HDR_SEL0_AESU |
2482 DESC_HDR_MODE0_AESU_CBC |
2483 DESC_HDR_SEL1_MDEUA |
2484 DESC_HDR_MODE1_MDEU_INIT |
2485 DESC_HDR_MODE1_MDEU_PAD |
2486 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2488 { .type = CRYPTO_ALG_TYPE_AEAD,
2489 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2492 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2493 .cra_driver_name = "authenc-hmac-sha256-"
2494 "cbc-aes-talitos-hsna",
2495 .cra_blocksize = AES_BLOCK_SIZE,
2496 .cra_flags = CRYPTO_ALG_ASYNC,
2498 .ivsize = AES_BLOCK_SIZE,
2499 .maxauthsize = SHA256_DIGEST_SIZE,
2501 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2502 DESC_HDR_SEL0_AESU |
2503 DESC_HDR_MODE0_AESU_CBC |
2504 DESC_HDR_SEL1_MDEUA |
2505 DESC_HDR_MODE1_MDEU_INIT |
2506 DESC_HDR_MODE1_MDEU_PAD |
2507 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2509 { .type = CRYPTO_ALG_TYPE_AEAD,
2512 .cra_name = "authenc(hmac(sha256),"
2514 .cra_driver_name = "authenc-hmac-sha256-"
2516 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2517 .cra_flags = CRYPTO_ALG_ASYNC,
2519 .ivsize = DES3_EDE_BLOCK_SIZE,
2520 .maxauthsize = SHA256_DIGEST_SIZE,
2521 .setkey = aead_des3_setkey,
2523 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2525 DESC_HDR_MODE0_DEU_CBC |
2526 DESC_HDR_MODE0_DEU_3DES |
2527 DESC_HDR_SEL1_MDEUA |
2528 DESC_HDR_MODE1_MDEU_INIT |
2529 DESC_HDR_MODE1_MDEU_PAD |
2530 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2532 { .type = CRYPTO_ALG_TYPE_AEAD,
2533 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2536 .cra_name = "authenc(hmac(sha256),"
2538 .cra_driver_name = "authenc-hmac-sha256-"
2539 "cbc-3des-talitos-hsna",
2540 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2541 .cra_flags = CRYPTO_ALG_ASYNC,
2543 .ivsize = DES3_EDE_BLOCK_SIZE,
2544 .maxauthsize = SHA256_DIGEST_SIZE,
2545 .setkey = aead_des3_setkey,
2547 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2549 DESC_HDR_MODE0_DEU_CBC |
2550 DESC_HDR_MODE0_DEU_3DES |
2551 DESC_HDR_SEL1_MDEUA |
2552 DESC_HDR_MODE1_MDEU_INIT |
2553 DESC_HDR_MODE1_MDEU_PAD |
2554 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2556 { .type = CRYPTO_ALG_TYPE_AEAD,
2559 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2560 .cra_driver_name = "authenc-hmac-sha384-"
2562 .cra_blocksize = AES_BLOCK_SIZE,
2563 .cra_flags = CRYPTO_ALG_ASYNC,
2565 .ivsize = AES_BLOCK_SIZE,
2566 .maxauthsize = SHA384_DIGEST_SIZE,
2568 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2569 DESC_HDR_SEL0_AESU |
2570 DESC_HDR_MODE0_AESU_CBC |
2571 DESC_HDR_SEL1_MDEUB |
2572 DESC_HDR_MODE1_MDEU_INIT |
2573 DESC_HDR_MODE1_MDEU_PAD |
2574 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2576 { .type = CRYPTO_ALG_TYPE_AEAD,
2579 .cra_name = "authenc(hmac(sha384),"
2581 .cra_driver_name = "authenc-hmac-sha384-"
2583 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2584 .cra_flags = CRYPTO_ALG_ASYNC,
2586 .ivsize = DES3_EDE_BLOCK_SIZE,
2587 .maxauthsize = SHA384_DIGEST_SIZE,
2588 .setkey = aead_des3_setkey,
2590 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2592 DESC_HDR_MODE0_DEU_CBC |
2593 DESC_HDR_MODE0_DEU_3DES |
2594 DESC_HDR_SEL1_MDEUB |
2595 DESC_HDR_MODE1_MDEU_INIT |
2596 DESC_HDR_MODE1_MDEU_PAD |
2597 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2599 { .type = CRYPTO_ALG_TYPE_AEAD,
2602 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2603 .cra_driver_name = "authenc-hmac-sha512-"
2605 .cra_blocksize = AES_BLOCK_SIZE,
2606 .cra_flags = CRYPTO_ALG_ASYNC,
2608 .ivsize = AES_BLOCK_SIZE,
2609 .maxauthsize = SHA512_DIGEST_SIZE,
2611 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2612 DESC_HDR_SEL0_AESU |
2613 DESC_HDR_MODE0_AESU_CBC |
2614 DESC_HDR_SEL1_MDEUB |
2615 DESC_HDR_MODE1_MDEU_INIT |
2616 DESC_HDR_MODE1_MDEU_PAD |
2617 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2619 { .type = CRYPTO_ALG_TYPE_AEAD,
2622 .cra_name = "authenc(hmac(sha512),"
2624 .cra_driver_name = "authenc-hmac-sha512-"
2626 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2627 .cra_flags = CRYPTO_ALG_ASYNC,
2629 .ivsize = DES3_EDE_BLOCK_SIZE,
2630 .maxauthsize = SHA512_DIGEST_SIZE,
2631 .setkey = aead_des3_setkey,
2633 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2635 DESC_HDR_MODE0_DEU_CBC |
2636 DESC_HDR_MODE0_DEU_3DES |
2637 DESC_HDR_SEL1_MDEUB |
2638 DESC_HDR_MODE1_MDEU_INIT |
2639 DESC_HDR_MODE1_MDEU_PAD |
2640 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2642 { .type = CRYPTO_ALG_TYPE_AEAD,
2645 .cra_name = "authenc(hmac(md5),cbc(aes))",
2646 .cra_driver_name = "authenc-hmac-md5-"
2648 .cra_blocksize = AES_BLOCK_SIZE,
2649 .cra_flags = CRYPTO_ALG_ASYNC,
2651 .ivsize = AES_BLOCK_SIZE,
2652 .maxauthsize = MD5_DIGEST_SIZE,
2654 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2655 DESC_HDR_SEL0_AESU |
2656 DESC_HDR_MODE0_AESU_CBC |
2657 DESC_HDR_SEL1_MDEUA |
2658 DESC_HDR_MODE1_MDEU_INIT |
2659 DESC_HDR_MODE1_MDEU_PAD |
2660 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2662 { .type = CRYPTO_ALG_TYPE_AEAD,
2663 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2666 .cra_name = "authenc(hmac(md5),cbc(aes))",
2667 .cra_driver_name = "authenc-hmac-md5-"
2668 "cbc-aes-talitos-hsna",
2669 .cra_blocksize = AES_BLOCK_SIZE,
2670 .cra_flags = CRYPTO_ALG_ASYNC,
2672 .ivsize = AES_BLOCK_SIZE,
2673 .maxauthsize = MD5_DIGEST_SIZE,
2675 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2676 DESC_HDR_SEL0_AESU |
2677 DESC_HDR_MODE0_AESU_CBC |
2678 DESC_HDR_SEL1_MDEUA |
2679 DESC_HDR_MODE1_MDEU_INIT |
2680 DESC_HDR_MODE1_MDEU_PAD |
2681 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2683 { .type = CRYPTO_ALG_TYPE_AEAD,
2686 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2687 .cra_driver_name = "authenc-hmac-md5-"
2689 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2690 .cra_flags = CRYPTO_ALG_ASYNC,
2692 .ivsize = DES3_EDE_BLOCK_SIZE,
2693 .maxauthsize = MD5_DIGEST_SIZE,
2694 .setkey = aead_des3_setkey,
2696 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2698 DESC_HDR_MODE0_DEU_CBC |
2699 DESC_HDR_MODE0_DEU_3DES |
2700 DESC_HDR_SEL1_MDEUA |
2701 DESC_HDR_MODE1_MDEU_INIT |
2702 DESC_HDR_MODE1_MDEU_PAD |
2703 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2705 { .type = CRYPTO_ALG_TYPE_AEAD,
2706 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2709 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2710 .cra_driver_name = "authenc-hmac-md5-"
2711 "cbc-3des-talitos-hsna",
2712 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2713 .cra_flags = CRYPTO_ALG_ASYNC,
2715 .ivsize = DES3_EDE_BLOCK_SIZE,
2716 .maxauthsize = MD5_DIGEST_SIZE,
2717 .setkey = aead_des3_setkey,
2719 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2721 DESC_HDR_MODE0_DEU_CBC |
2722 DESC_HDR_MODE0_DEU_3DES |
2723 DESC_HDR_SEL1_MDEUA |
2724 DESC_HDR_MODE1_MDEU_INIT |
2725 DESC_HDR_MODE1_MDEU_PAD |
2726 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2728 /* ABLKCIPHER algorithms. */
2729 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2731 .cra_name = "ecb(aes)",
2732 .cra_driver_name = "ecb-aes-talitos",
2733 .cra_blocksize = AES_BLOCK_SIZE,
2734 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2737 .min_keysize = AES_MIN_KEY_SIZE,
2738 .max_keysize = AES_MAX_KEY_SIZE,
2739 .setkey = ablkcipher_aes_setkey,
2742 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2745 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2747 .cra_name = "cbc(aes)",
2748 .cra_driver_name = "cbc-aes-talitos",
2749 .cra_blocksize = AES_BLOCK_SIZE,
2750 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2753 .min_keysize = AES_MIN_KEY_SIZE,
2754 .max_keysize = AES_MAX_KEY_SIZE,
2755 .ivsize = AES_BLOCK_SIZE,
2756 .setkey = ablkcipher_aes_setkey,
2759 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2760 DESC_HDR_SEL0_AESU |
2761 DESC_HDR_MODE0_AESU_CBC,
2763 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2765 .cra_name = "ctr(aes)",
2766 .cra_driver_name = "ctr-aes-talitos",
2768 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2771 .min_keysize = AES_MIN_KEY_SIZE,
2772 .max_keysize = AES_MAX_KEY_SIZE,
2773 .ivsize = AES_BLOCK_SIZE,
2774 .setkey = ablkcipher_aes_setkey,
2777 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2778 DESC_HDR_SEL0_AESU |
2779 DESC_HDR_MODE0_AESU_CTR,
2781 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2783 .cra_name = "ecb(des)",
2784 .cra_driver_name = "ecb-des-talitos",
2785 .cra_blocksize = DES_BLOCK_SIZE,
2786 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2789 .min_keysize = DES_KEY_SIZE,
2790 .max_keysize = DES_KEY_SIZE,
2791 .setkey = ablkcipher_des_setkey,
2794 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2797 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2799 .cra_name = "cbc(des)",
2800 .cra_driver_name = "cbc-des-talitos",
2801 .cra_blocksize = DES_BLOCK_SIZE,
2802 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2805 .min_keysize = DES_KEY_SIZE,
2806 .max_keysize = DES_KEY_SIZE,
2807 .ivsize = DES_BLOCK_SIZE,
2808 .setkey = ablkcipher_des_setkey,
2811 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2813 DESC_HDR_MODE0_DEU_CBC,
2815 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2817 .cra_name = "ecb(des3_ede)",
2818 .cra_driver_name = "ecb-3des-talitos",
2819 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2820 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2823 .min_keysize = DES3_EDE_KEY_SIZE,
2824 .max_keysize = DES3_EDE_KEY_SIZE,
2825 .setkey = ablkcipher_des3_setkey,
2828 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2830 DESC_HDR_MODE0_DEU_3DES,
2832 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2834 .cra_name = "cbc(des3_ede)",
2835 .cra_driver_name = "cbc-3des-talitos",
2836 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2837 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2840 .min_keysize = DES3_EDE_KEY_SIZE,
2841 .max_keysize = DES3_EDE_KEY_SIZE,
2842 .ivsize = DES3_EDE_BLOCK_SIZE,
2843 .setkey = ablkcipher_des3_setkey,
2846 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2848 DESC_HDR_MODE0_DEU_CBC |
2849 DESC_HDR_MODE0_DEU_3DES,
2851 /* AHASH algorithms. */
2852 { .type = CRYPTO_ALG_TYPE_AHASH,
2854 .halg.digestsize = MD5_DIGEST_SIZE,
2855 .halg.statesize = sizeof(struct talitos_export_state),
2858 .cra_driver_name = "md5-talitos",
2859 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2860 .cra_flags = CRYPTO_ALG_ASYNC,
2863 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2864 DESC_HDR_SEL0_MDEUA |
2865 DESC_HDR_MODE0_MDEU_MD5,
2867 { .type = CRYPTO_ALG_TYPE_AHASH,
2869 .halg.digestsize = SHA1_DIGEST_SIZE,
2870 .halg.statesize = sizeof(struct talitos_export_state),
2873 .cra_driver_name = "sha1-talitos",
2874 .cra_blocksize = SHA1_BLOCK_SIZE,
2875 .cra_flags = CRYPTO_ALG_ASYNC,
2878 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2879 DESC_HDR_SEL0_MDEUA |
2880 DESC_HDR_MODE0_MDEU_SHA1,
2882 { .type = CRYPTO_ALG_TYPE_AHASH,
2884 .halg.digestsize = SHA224_DIGEST_SIZE,
2885 .halg.statesize = sizeof(struct talitos_export_state),
2887 .cra_name = "sha224",
2888 .cra_driver_name = "sha224-talitos",
2889 .cra_blocksize = SHA224_BLOCK_SIZE,
2890 .cra_flags = CRYPTO_ALG_ASYNC,
2893 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2894 DESC_HDR_SEL0_MDEUA |
2895 DESC_HDR_MODE0_MDEU_SHA224,
2897 { .type = CRYPTO_ALG_TYPE_AHASH,
2899 .halg.digestsize = SHA256_DIGEST_SIZE,
2900 .halg.statesize = sizeof(struct talitos_export_state),
2902 .cra_name = "sha256",
2903 .cra_driver_name = "sha256-talitos",
2904 .cra_blocksize = SHA256_BLOCK_SIZE,
2905 .cra_flags = CRYPTO_ALG_ASYNC,
2908 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2909 DESC_HDR_SEL0_MDEUA |
2910 DESC_HDR_MODE0_MDEU_SHA256,
2912 { .type = CRYPTO_ALG_TYPE_AHASH,
2914 .halg.digestsize = SHA384_DIGEST_SIZE,
2915 .halg.statesize = sizeof(struct talitos_export_state),
2917 .cra_name = "sha384",
2918 .cra_driver_name = "sha384-talitos",
2919 .cra_blocksize = SHA384_BLOCK_SIZE,
2920 .cra_flags = CRYPTO_ALG_ASYNC,
2923 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2924 DESC_HDR_SEL0_MDEUB |
2925 DESC_HDR_MODE0_MDEUB_SHA384,
2927 { .type = CRYPTO_ALG_TYPE_AHASH,
2929 .halg.digestsize = SHA512_DIGEST_SIZE,
2930 .halg.statesize = sizeof(struct talitos_export_state),
2932 .cra_name = "sha512",
2933 .cra_driver_name = "sha512-talitos",
2934 .cra_blocksize = SHA512_BLOCK_SIZE,
2935 .cra_flags = CRYPTO_ALG_ASYNC,
2938 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2939 DESC_HDR_SEL0_MDEUB |
2940 DESC_HDR_MODE0_MDEUB_SHA512,
2942 { .type = CRYPTO_ALG_TYPE_AHASH,
2944 .halg.digestsize = MD5_DIGEST_SIZE,
2945 .halg.statesize = sizeof(struct talitos_export_state),
2947 .cra_name = "hmac(md5)",
2948 .cra_driver_name = "hmac-md5-talitos",
2949 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2950 .cra_flags = CRYPTO_ALG_ASYNC,
2953 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2954 DESC_HDR_SEL0_MDEUA |
2955 DESC_HDR_MODE0_MDEU_MD5,
2957 { .type = CRYPTO_ALG_TYPE_AHASH,
2959 .halg.digestsize = SHA1_DIGEST_SIZE,
2960 .halg.statesize = sizeof(struct talitos_export_state),
2962 .cra_name = "hmac(sha1)",
2963 .cra_driver_name = "hmac-sha1-talitos",
2964 .cra_blocksize = SHA1_BLOCK_SIZE,
2965 .cra_flags = CRYPTO_ALG_ASYNC,
2968 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2969 DESC_HDR_SEL0_MDEUA |
2970 DESC_HDR_MODE0_MDEU_SHA1,
2972 { .type = CRYPTO_ALG_TYPE_AHASH,
2974 .halg.digestsize = SHA224_DIGEST_SIZE,
2975 .halg.statesize = sizeof(struct talitos_export_state),
2977 .cra_name = "hmac(sha224)",
2978 .cra_driver_name = "hmac-sha224-talitos",
2979 .cra_blocksize = SHA224_BLOCK_SIZE,
2980 .cra_flags = CRYPTO_ALG_ASYNC,
2983 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2984 DESC_HDR_SEL0_MDEUA |
2985 DESC_HDR_MODE0_MDEU_SHA224,
2987 { .type = CRYPTO_ALG_TYPE_AHASH,
2989 .halg.digestsize = SHA256_DIGEST_SIZE,
2990 .halg.statesize = sizeof(struct talitos_export_state),
2992 .cra_name = "hmac(sha256)",
2993 .cra_driver_name = "hmac-sha256-talitos",
2994 .cra_blocksize = SHA256_BLOCK_SIZE,
2995 .cra_flags = CRYPTO_ALG_ASYNC,
2998 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2999 DESC_HDR_SEL0_MDEUA |
3000 DESC_HDR_MODE0_MDEU_SHA256,
3002 { .type = CRYPTO_ALG_TYPE_AHASH,
3004 .halg.digestsize = SHA384_DIGEST_SIZE,
3005 .halg.statesize = sizeof(struct talitos_export_state),
3007 .cra_name = "hmac(sha384)",
3008 .cra_driver_name = "hmac-sha384-talitos",
3009 .cra_blocksize = SHA384_BLOCK_SIZE,
3010 .cra_flags = CRYPTO_ALG_ASYNC,
3013 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3014 DESC_HDR_SEL0_MDEUB |
3015 DESC_HDR_MODE0_MDEUB_SHA384,
3017 { .type = CRYPTO_ALG_TYPE_AHASH,
3019 .halg.digestsize = SHA512_DIGEST_SIZE,
3020 .halg.statesize = sizeof(struct talitos_export_state),
3022 .cra_name = "hmac(sha512)",
3023 .cra_driver_name = "hmac-sha512-talitos",
3024 .cra_blocksize = SHA512_BLOCK_SIZE,
3025 .cra_flags = CRYPTO_ALG_ASYNC,
3028 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3029 DESC_HDR_SEL0_MDEUB |
3030 DESC_HDR_MODE0_MDEUB_SHA512,
3034 struct talitos_crypto_alg {
3035 struct list_head entry;
3037 struct talitos_alg_template algt;
3040 static int talitos_init_common(struct talitos_ctx *ctx,
3041 struct talitos_crypto_alg *talitos_alg)
3043 struct talitos_private *priv;
3045 /* update context with ptr to dev */
3046 ctx->dev = talitos_alg->dev;
3048 /* assign SEC channel to tfm in round-robin fashion */
3049 priv = dev_get_drvdata(ctx->dev);
3050 ctx->ch = atomic_inc_return(&priv->last_chan) &
3051 (priv->num_channels - 1);
3053 /* copy descriptor header template value */
3054 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3056 /* select done notification */
3057 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3062 static int talitos_cra_init(struct crypto_tfm *tfm)
3064 struct crypto_alg *alg = tfm->__crt_alg;
3065 struct talitos_crypto_alg *talitos_alg;
3066 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3068 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3069 talitos_alg = container_of(__crypto_ahash_alg(alg),
3070 struct talitos_crypto_alg,
3073 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3076 return talitos_init_common(ctx, talitos_alg);
3079 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3081 struct aead_alg *alg = crypto_aead_alg(tfm);
3082 struct talitos_crypto_alg *talitos_alg;
3083 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3085 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3088 return talitos_init_common(ctx, talitos_alg);
3091 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3093 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3095 talitos_cra_init(tfm);
3098 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3099 sizeof(struct talitos_ahash_req_ctx));
3104 static void talitos_cra_exit(struct crypto_tfm *tfm)
3106 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3107 struct device *dev = ctx->dev;
3110 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3114 * given the alg's descriptor header template, determine whether descriptor
3115 * type and primary/secondary execution units required match the hw
3116 * capabilities description provided in the device tree node.
3118 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3120 struct talitos_private *priv = dev_get_drvdata(dev);
3123 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3124 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3126 if (SECONDARY_EU(desc_hdr_template))
3127 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3128 & priv->exec_units);
3133 static int talitos_remove(struct platform_device *ofdev)
3135 struct device *dev = &ofdev->dev;
3136 struct talitos_private *priv = dev_get_drvdata(dev);
3137 struct talitos_crypto_alg *t_alg, *n;
3140 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3141 switch (t_alg->algt.type) {
3142 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3144 case CRYPTO_ALG_TYPE_AEAD:
3145 crypto_unregister_aead(&t_alg->algt.alg.aead);
3146 case CRYPTO_ALG_TYPE_AHASH:
3147 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3150 list_del(&t_alg->entry);
3153 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3154 talitos_unregister_rng(dev);
3156 for (i = 0; i < 2; i++)
3158 free_irq(priv->irq[i], dev);
3159 irq_dispose_mapping(priv->irq[i]);
3162 tasklet_kill(&priv->done_task[0]);
3164 tasklet_kill(&priv->done_task[1]);
3169 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3170 struct talitos_alg_template
3173 struct talitos_private *priv = dev_get_drvdata(dev);
3174 struct talitos_crypto_alg *t_alg;
3175 struct crypto_alg *alg;
3177 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3180 return ERR_PTR(-ENOMEM);
3182 t_alg->algt = *template;
3184 switch (t_alg->algt.type) {
3185 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3186 alg = &t_alg->algt.alg.crypto;
3187 alg->cra_init = talitos_cra_init;
3188 alg->cra_exit = talitos_cra_exit;
3189 alg->cra_type = &crypto_ablkcipher_type;
3190 alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
3192 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3193 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3195 case CRYPTO_ALG_TYPE_AEAD:
3196 alg = &t_alg->algt.alg.aead.base;
3197 alg->cra_exit = talitos_cra_exit;
3198 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3199 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3201 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3202 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3203 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3204 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3205 devm_kfree(dev, t_alg);
3206 return ERR_PTR(-ENOTSUPP);
3209 case CRYPTO_ALG_TYPE_AHASH:
3210 alg = &t_alg->algt.alg.hash.halg.base;
3211 alg->cra_init = talitos_cra_init_ahash;
3212 alg->cra_exit = talitos_cra_exit;
3213 t_alg->algt.alg.hash.init = ahash_init;
3214 t_alg->algt.alg.hash.update = ahash_update;
3215 t_alg->algt.alg.hash.final = ahash_final;
3216 t_alg->algt.alg.hash.finup = ahash_finup;
3217 t_alg->algt.alg.hash.digest = ahash_digest;
3218 if (!strncmp(alg->cra_name, "hmac", 4))
3219 t_alg->algt.alg.hash.setkey = ahash_setkey;
3220 t_alg->algt.alg.hash.import = ahash_import;
3221 t_alg->algt.alg.hash.export = ahash_export;
3223 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3224 !strncmp(alg->cra_name, "hmac", 4)) {
3225 devm_kfree(dev, t_alg);
3226 return ERR_PTR(-ENOTSUPP);
3228 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3229 (!strcmp(alg->cra_name, "sha224") ||
3230 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3231 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3232 t_alg->algt.desc_hdr_template =
3233 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3234 DESC_HDR_SEL0_MDEUA |
3235 DESC_HDR_MODE0_MDEU_SHA256;
3239 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3240 devm_kfree(dev, t_alg);
3241 return ERR_PTR(-EINVAL);
3244 alg->cra_module = THIS_MODULE;
3245 if (t_alg->algt.priority)
3246 alg->cra_priority = t_alg->algt.priority;
3248 alg->cra_priority = TALITOS_CRA_PRIORITY;
3249 if (has_ftr_sec1(priv))
3250 alg->cra_alignmask = 3;
3252 alg->cra_alignmask = 0;
3253 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3254 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3261 static int talitos_probe_irq(struct platform_device *ofdev)
3263 struct device *dev = &ofdev->dev;
3264 struct device_node *np = ofdev->dev.of_node;
3265 struct talitos_private *priv = dev_get_drvdata(dev);
3267 bool is_sec1 = has_ftr_sec1(priv);
3269 priv->irq[0] = irq_of_parse_and_map(np, 0);
3270 if (!priv->irq[0]) {
3271 dev_err(dev, "failed to map irq\n");
3275 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3276 dev_driver_string(dev), dev);
3280 priv->irq[1] = irq_of_parse_and_map(np, 1);
3282 /* get the primary irq line */
3283 if (!priv->irq[1]) {
3284 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3285 dev_driver_string(dev), dev);
3289 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3290 dev_driver_string(dev), dev);
3294 /* get the secondary irq line */
3295 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3296 dev_driver_string(dev), dev);
3298 dev_err(dev, "failed to request secondary irq\n");
3299 irq_dispose_mapping(priv->irq[1]);
3307 dev_err(dev, "failed to request primary irq\n");
3308 irq_dispose_mapping(priv->irq[0]);
3315 static int talitos_probe(struct platform_device *ofdev)
3317 struct device *dev = &ofdev->dev;
3318 struct device_node *np = ofdev->dev.of_node;
3319 struct talitos_private *priv;
3322 struct resource *res;
3324 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3328 INIT_LIST_HEAD(&priv->alg_list);
3330 dev_set_drvdata(dev, priv);
3332 priv->ofdev = ofdev;
3334 spin_lock_init(&priv->reg_lock);
3336 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3339 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3341 dev_err(dev, "failed to of_iomap\n");
3346 /* get SEC version capabilities from device tree */
3347 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3348 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3349 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3350 of_property_read_u32(np, "fsl,descriptor-types-mask",
3353 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3354 !priv->exec_units || !priv->desc_types) {
3355 dev_err(dev, "invalid property data in device tree node\n");
3360 if (of_device_is_compatible(np, "fsl,sec3.0"))
3361 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3363 if (of_device_is_compatible(np, "fsl,sec2.1"))
3364 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3365 TALITOS_FTR_SHA224_HWINIT |
3366 TALITOS_FTR_HMAC_OK;
3368 if (of_device_is_compatible(np, "fsl,sec1.0"))
3369 priv->features |= TALITOS_FTR_SEC1;
3371 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3372 priv->reg_deu = priv->reg + TALITOS12_DEU;
3373 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3374 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3375 stride = TALITOS1_CH_STRIDE;
3376 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3377 priv->reg_deu = priv->reg + TALITOS10_DEU;
3378 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3379 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3380 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3381 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3382 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3383 stride = TALITOS1_CH_STRIDE;
3385 priv->reg_deu = priv->reg + TALITOS2_DEU;
3386 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3387 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3388 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3389 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3390 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3391 priv->reg_keu = priv->reg + TALITOS2_KEU;
3392 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3393 stride = TALITOS2_CH_STRIDE;
3396 err = talitos_probe_irq(ofdev);
3400 if (has_ftr_sec1(priv)) {
3401 if (priv->num_channels == 1)
3402 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3403 (unsigned long)dev);
3405 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3406 (unsigned long)dev);
3409 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3410 (unsigned long)dev);
3411 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3412 (unsigned long)dev);
3413 } else if (priv->num_channels == 1) {
3414 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3415 (unsigned long)dev);
3417 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3418 (unsigned long)dev);
3422 priv->chan = devm_kcalloc(dev,
3424 sizeof(struct talitos_channel),
3427 dev_err(dev, "failed to allocate channel management space\n");
3432 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3434 for (i = 0; i < priv->num_channels; i++) {
3435 priv->chan[i].reg = priv->reg + stride * (i + 1);
3436 if (!priv->irq[1] || !(i & 1))
3437 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3439 spin_lock_init(&priv->chan[i].head_lock);
3440 spin_lock_init(&priv->chan[i].tail_lock);
3442 priv->chan[i].fifo = devm_kcalloc(dev,
3444 sizeof(struct talitos_request),
3446 if (!priv->chan[i].fifo) {
3447 dev_err(dev, "failed to allocate request fifo %d\n", i);
3452 atomic_set(&priv->chan[i].submit_count,
3453 -(priv->chfifo_len - 1));
3456 dma_set_mask(dev, DMA_BIT_MASK(36));
3458 /* reset and initialize the h/w */
3459 err = init_device(dev);
3461 dev_err(dev, "failed to initialize device\n");
3465 /* register the RNG, if available */
3466 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3467 err = talitos_register_rng(dev);
3469 dev_err(dev, "failed to register hwrng: %d\n", err);
3472 dev_info(dev, "hwrng\n");
3475 /* register crypto algorithms the device supports */
3476 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3477 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3478 struct talitos_crypto_alg *t_alg;
3479 struct crypto_alg *alg = NULL;
3481 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3482 if (IS_ERR(t_alg)) {
3483 err = PTR_ERR(t_alg);
3484 if (err == -ENOTSUPP)
3489 switch (t_alg->algt.type) {
3490 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3491 err = crypto_register_alg(
3492 &t_alg->algt.alg.crypto);
3493 alg = &t_alg->algt.alg.crypto;
3496 case CRYPTO_ALG_TYPE_AEAD:
3497 err = crypto_register_aead(
3498 &t_alg->algt.alg.aead);
3499 alg = &t_alg->algt.alg.aead.base;
3502 case CRYPTO_ALG_TYPE_AHASH:
3503 err = crypto_register_ahash(
3504 &t_alg->algt.alg.hash);
3505 alg = &t_alg->algt.alg.hash.halg.base;
3509 dev_err(dev, "%s alg registration failed\n",
3510 alg->cra_driver_name);
3511 devm_kfree(dev, t_alg);
3513 list_add_tail(&t_alg->entry, &priv->alg_list);
3516 if (!list_empty(&priv->alg_list))
3517 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3518 (char *)of_get_property(np, "compatible", NULL));
3523 talitos_remove(ofdev);
3528 static const struct of_device_id talitos_match[] = {
3529 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3531 .compatible = "fsl,sec1.0",
3534 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3536 .compatible = "fsl,sec2.0",
3541 MODULE_DEVICE_TABLE(of, talitos_match);
3543 static struct platform_driver talitos_driver = {
3546 .of_match_table = talitos_match,
3548 .probe = talitos_probe,
3549 .remove = talitos_remove,
3552 module_platform_driver(talitos_driver);
3554 MODULE_LICENSE("GPL");
3555 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3556 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");