2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 unsigned int len, bool is_sec1)
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
63 ptr->len1 = cpu_to_be16(len);
65 ptr->len = cpu_to_be16(len);
66 ptr->eptr = upper_32_bits(dma_addr);
70 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71 struct talitos_ptr *src_ptr, bool is_sec1)
73 dst_ptr->ptr = src_ptr->ptr;
75 dst_ptr->len1 = src_ptr->len1;
77 dst_ptr->len = src_ptr->len;
78 dst_ptr->eptr = src_ptr->eptr;
82 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86 return be16_to_cpu(ptr->len1);
88 return be16_to_cpu(ptr->len);
91 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
98 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
101 ptr->j_extent |= val;
105 * map virtual single (contiguous) pointer to h/w descriptor pointer
107 static void __map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir,
113 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
114 struct talitos_private *priv = dev_get_drvdata(dev);
115 bool is_sec1 = has_ftr_sec1(priv);
117 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
120 static void map_single_talitos_ptr(struct device *dev,
121 struct talitos_ptr *ptr,
122 unsigned int len, void *data,
123 enum dma_data_direction dir)
125 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
128 static void map_single_talitos_ptr_nosync(struct device *dev,
129 struct talitos_ptr *ptr,
130 unsigned int len, void *data,
131 enum dma_data_direction dir)
133 __map_single_talitos_ptr(dev, ptr, len, data, dir,
134 DMA_ATTR_SKIP_CPU_SYNC);
138 * unmap bus single (contiguous) h/w descriptor pointer
140 static void unmap_single_talitos_ptr(struct device *dev,
141 struct talitos_ptr *ptr,
142 enum dma_data_direction dir)
144 struct talitos_private *priv = dev_get_drvdata(dev);
145 bool is_sec1 = has_ftr_sec1(priv);
147 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
148 from_talitos_ptr_len(ptr, is_sec1), dir);
151 static int reset_channel(struct device *dev, int ch)
153 struct talitos_private *priv = dev_get_drvdata(dev);
154 unsigned int timeout = TALITOS_TIMEOUT;
155 bool is_sec1 = has_ftr_sec1(priv);
158 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
159 TALITOS1_CCCR_LO_RESET);
161 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
162 TALITOS1_CCCR_LO_RESET) && --timeout)
165 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
166 TALITOS2_CCCR_RESET);
168 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
169 TALITOS2_CCCR_RESET) && --timeout)
174 dev_err(dev, "failed to reset channel %d\n", ch);
178 /* set 36-bit addressing, done writeback enable and done IRQ enable */
179 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
180 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
181 /* enable chaining descriptors */
183 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
186 /* and ICCR writeback, if available */
187 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
188 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
189 TALITOS_CCCR_LO_IWSE);
194 static int reset_device(struct device *dev)
196 struct talitos_private *priv = dev_get_drvdata(dev);
197 unsigned int timeout = TALITOS_TIMEOUT;
198 bool is_sec1 = has_ftr_sec1(priv);
199 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
201 setbits32(priv->reg + TALITOS_MCR, mcr);
203 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
208 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
209 setbits32(priv->reg + TALITOS_MCR, mcr);
213 dev_err(dev, "failed to reset device\n");
221 * Reset and initialize the device
223 static int init_device(struct device *dev)
225 struct talitos_private *priv = dev_get_drvdata(dev);
227 bool is_sec1 = has_ftr_sec1(priv);
231 * errata documentation: warning: certain SEC interrupts
232 * are not fully cleared by writing the MCR:SWR bit,
233 * set bit twice to completely reset
235 err = reset_device(dev);
239 err = reset_device(dev);
244 for (ch = 0; ch < priv->num_channels; ch++) {
245 err = reset_channel(dev, ch);
250 /* enable channel done and error interrupts */
252 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
253 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
254 /* disable parity error check in DEU (erroneous? test vect.) */
255 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
257 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
258 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
261 /* disable integrity check error interrupts (use writeback instead) */
262 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
263 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
264 TALITOS_MDEUICR_LO_ICE);
270 * talitos_submit - submits a descriptor to the device for processing
271 * @dev: the SEC device to be used
272 * @ch: the SEC device channel to be used
273 * @desc: the descriptor to be processed by the device
274 * @callback: whom to call when processing is complete
275 * @context: a handle for use by caller (optional)
277 * desc must contain valid dma-mapped (bus physical) address pointers.
278 * callback must check err and feedback in descriptor header
279 * for device processing status.
281 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
282 void (*callback)(struct device *dev,
283 struct talitos_desc *desc,
284 void *context, int error),
287 struct talitos_private *priv = dev_get_drvdata(dev);
288 struct talitos_request *request;
291 bool is_sec1 = has_ftr_sec1(priv);
293 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
295 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
296 /* h/w fifo is full */
297 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
301 head = priv->chan[ch].head;
302 request = &priv->chan[ch].fifo[head];
304 /* map descriptor and save caller data */
306 desc->hdr1 = desc->hdr;
307 request->dma_desc = dma_map_single(dev, &desc->hdr1,
311 request->dma_desc = dma_map_single(dev, desc,
315 request->callback = callback;
316 request->context = context;
318 /* increment fifo head */
319 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
322 request->desc = desc;
326 out_be32(priv->chan[ch].reg + TALITOS_FF,
327 upper_32_bits(request->dma_desc));
328 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
329 lower_32_bits(request->dma_desc));
331 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
335 EXPORT_SYMBOL(talitos_submit);
337 static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
339 struct talitos_edesc *edesc;
342 return request->desc->hdr;
344 if (!request->desc->next_desc)
345 return request->desc->hdr1;
347 edesc = container_of(request->desc, struct talitos_edesc, desc);
349 return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
353 * process what was done, notify callback of error if not
355 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
357 struct talitos_private *priv = dev_get_drvdata(dev);
358 struct talitos_request *request, saved_req;
361 bool is_sec1 = has_ftr_sec1(priv);
363 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
365 tail = priv->chan[ch].tail;
366 while (priv->chan[ch].fifo[tail].desc) {
369 request = &priv->chan[ch].fifo[tail];
371 /* descriptors with their done bits set don't get the error */
373 hdr = get_request_hdr(request, is_sec1);
375 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
383 dma_unmap_single(dev, request->dma_desc,
387 /* copy entries so we can call callback outside lock */
388 saved_req.desc = request->desc;
389 saved_req.callback = request->callback;
390 saved_req.context = request->context;
392 /* release request entry in fifo */
394 request->desc = NULL;
396 /* increment fifo tail */
397 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
399 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
401 atomic_dec(&priv->chan[ch].submit_count);
403 saved_req.callback(dev, saved_req.desc, saved_req.context,
405 /* channel may resume processing in single desc error case */
406 if (error && !reset_ch && status == error)
408 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
409 tail = priv->chan[ch].tail;
412 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
416 * process completed requests for channels that have done status
418 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
419 static void talitos1_done_##name(unsigned long data) \
421 struct device *dev = (struct device *)data; \
422 struct talitos_private *priv = dev_get_drvdata(dev); \
423 unsigned long flags; \
425 if (ch_done_mask & 0x10000000) \
426 flush_channel(dev, 0, 0, 0); \
427 if (ch_done_mask & 0x40000000) \
428 flush_channel(dev, 1, 0, 0); \
429 if (ch_done_mask & 0x00010000) \
430 flush_channel(dev, 2, 0, 0); \
431 if (ch_done_mask & 0x00040000) \
432 flush_channel(dev, 3, 0, 0); \
434 /* At this point, all completed channels have been processed */ \
435 /* Unmask done interrupts for channels completed later on. */ \
436 spin_lock_irqsave(&priv->reg_lock, flags); \
437 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
438 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
439 spin_unlock_irqrestore(&priv->reg_lock, flags); \
442 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
443 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
445 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
446 static void talitos2_done_##name(unsigned long data) \
448 struct device *dev = (struct device *)data; \
449 struct talitos_private *priv = dev_get_drvdata(dev); \
450 unsigned long flags; \
452 if (ch_done_mask & 1) \
453 flush_channel(dev, 0, 0, 0); \
454 if (ch_done_mask & (1 << 2)) \
455 flush_channel(dev, 1, 0, 0); \
456 if (ch_done_mask & (1 << 4)) \
457 flush_channel(dev, 2, 0, 0); \
458 if (ch_done_mask & (1 << 6)) \
459 flush_channel(dev, 3, 0, 0); \
461 /* At this point, all completed channels have been processed */ \
462 /* Unmask done interrupts for channels completed later on. */ \
463 spin_lock_irqsave(&priv->reg_lock, flags); \
464 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
465 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
466 spin_unlock_irqrestore(&priv->reg_lock, flags); \
469 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
470 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
471 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
472 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
475 * locate current (offending) descriptor
477 static u32 current_desc_hdr(struct device *dev, int ch)
479 struct talitos_private *priv = dev_get_drvdata(dev);
483 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
484 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
487 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
491 tail = priv->chan[ch].tail;
494 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
495 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
496 iter = (iter + 1) & (priv->fifo_len - 1);
498 dev_err(dev, "couldn't locate current descriptor\n");
503 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
504 struct talitos_edesc *edesc;
506 edesc = container_of(priv->chan[ch].fifo[iter].desc,
507 struct talitos_edesc, desc);
508 return ((struct talitos_desc *)
509 (edesc->buf + edesc->dma_len))->hdr;
512 return priv->chan[ch].fifo[iter].desc->hdr;
516 * user diagnostics; report root cause of error based on execution unit status
518 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
520 struct talitos_private *priv = dev_get_drvdata(dev);
524 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
526 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
527 case DESC_HDR_SEL0_AFEU:
528 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
529 in_be32(priv->reg_afeu + TALITOS_EUISR),
530 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
532 case DESC_HDR_SEL0_DEU:
533 dev_err(dev, "DEUISR 0x%08x_%08x\n",
534 in_be32(priv->reg_deu + TALITOS_EUISR),
535 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
537 case DESC_HDR_SEL0_MDEUA:
538 case DESC_HDR_SEL0_MDEUB:
539 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
540 in_be32(priv->reg_mdeu + TALITOS_EUISR),
541 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
543 case DESC_HDR_SEL0_RNG:
544 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
545 in_be32(priv->reg_rngu + TALITOS_ISR),
546 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
548 case DESC_HDR_SEL0_PKEU:
549 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
550 in_be32(priv->reg_pkeu + TALITOS_EUISR),
551 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
553 case DESC_HDR_SEL0_AESU:
554 dev_err(dev, "AESUISR 0x%08x_%08x\n",
555 in_be32(priv->reg_aesu + TALITOS_EUISR),
556 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
558 case DESC_HDR_SEL0_CRCU:
559 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
560 in_be32(priv->reg_crcu + TALITOS_EUISR),
561 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
563 case DESC_HDR_SEL0_KEU:
564 dev_err(dev, "KEUISR 0x%08x_%08x\n",
565 in_be32(priv->reg_pkeu + TALITOS_EUISR),
566 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
570 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
571 case DESC_HDR_SEL1_MDEUA:
572 case DESC_HDR_SEL1_MDEUB:
573 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
574 in_be32(priv->reg_mdeu + TALITOS_EUISR),
575 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
577 case DESC_HDR_SEL1_CRCU:
578 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
579 in_be32(priv->reg_crcu + TALITOS_EUISR),
580 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
584 for (i = 0; i < 8; i++)
585 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
586 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
587 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
591 * recover from error interrupts
593 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
595 struct talitos_private *priv = dev_get_drvdata(dev);
596 unsigned int timeout = TALITOS_TIMEOUT;
597 int ch, error, reset_dev = 0;
599 bool is_sec1 = has_ftr_sec1(priv);
600 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
602 for (ch = 0; ch < priv->num_channels; ch++) {
603 /* skip channels without errors */
605 /* bits 29, 31, 17, 19 */
606 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
609 if (!(isr & (1 << (ch * 2 + 1))))
615 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
617 if (v_lo & TALITOS_CCPSR_LO_DOF) {
618 dev_err(dev, "double fetch fifo overflow error\n");
622 if (v_lo & TALITOS_CCPSR_LO_SOF) {
623 /* h/w dropped descriptor */
624 dev_err(dev, "single fetch fifo overflow error\n");
627 if (v_lo & TALITOS_CCPSR_LO_MDTE)
628 dev_err(dev, "master data transfer error\n");
629 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
630 dev_err(dev, is_sec1 ? "pointer not complete error\n"
631 : "s/g data length zero error\n");
632 if (v_lo & TALITOS_CCPSR_LO_FPZ)
633 dev_err(dev, is_sec1 ? "parity error\n"
634 : "fetch pointer zero error\n");
635 if (v_lo & TALITOS_CCPSR_LO_IDH)
636 dev_err(dev, "illegal descriptor header error\n");
637 if (v_lo & TALITOS_CCPSR_LO_IEU)
638 dev_err(dev, is_sec1 ? "static assignment error\n"
639 : "invalid exec unit error\n");
640 if (v_lo & TALITOS_CCPSR_LO_EU)
641 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
643 if (v_lo & TALITOS_CCPSR_LO_GB)
644 dev_err(dev, "gather boundary error\n");
645 if (v_lo & TALITOS_CCPSR_LO_GRL)
646 dev_err(dev, "gather return/length error\n");
647 if (v_lo & TALITOS_CCPSR_LO_SB)
648 dev_err(dev, "scatter boundary error\n");
649 if (v_lo & TALITOS_CCPSR_LO_SRL)
650 dev_err(dev, "scatter return/length error\n");
653 flush_channel(dev, ch, error, reset_ch);
656 reset_channel(dev, ch);
658 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
660 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
661 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
662 TALITOS2_CCCR_CONT) && --timeout)
665 dev_err(dev, "failed to restart channel %d\n",
671 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
672 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
673 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
674 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
677 dev_err(dev, "done overflow, internal time out, or "
678 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
680 /* purge request queues */
681 for (ch = 0; ch < priv->num_channels; ch++)
682 flush_channel(dev, ch, -EIO, 1);
684 /* reset and reinitialize the device */
689 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
690 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
692 struct device *dev = data; \
693 struct talitos_private *priv = dev_get_drvdata(dev); \
695 unsigned long flags; \
697 spin_lock_irqsave(&priv->reg_lock, flags); \
698 isr = in_be32(priv->reg + TALITOS_ISR); \
699 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
700 /* Acknowledge interrupt */ \
701 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
702 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
704 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
705 spin_unlock_irqrestore(&priv->reg_lock, flags); \
706 talitos_error(dev, isr & ch_err_mask, isr_lo); \
709 if (likely(isr & ch_done_mask)) { \
710 /* mask further done interrupts. */ \
711 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
712 /* done_task will unmask done interrupts at exit */ \
713 tasklet_schedule(&priv->done_task[tlet]); \
715 spin_unlock_irqrestore(&priv->reg_lock, flags); \
718 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
722 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
724 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
725 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
727 struct device *dev = data; \
728 struct talitos_private *priv = dev_get_drvdata(dev); \
730 unsigned long flags; \
732 spin_lock_irqsave(&priv->reg_lock, flags); \
733 isr = in_be32(priv->reg + TALITOS_ISR); \
734 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
735 /* Acknowledge interrupt */ \
736 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
737 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
739 if (unlikely(isr & ch_err_mask || isr_lo)) { \
740 spin_unlock_irqrestore(&priv->reg_lock, flags); \
741 talitos_error(dev, isr & ch_err_mask, isr_lo); \
744 if (likely(isr & ch_done_mask)) { \
745 /* mask further done interrupts. */ \
746 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
747 /* done_task will unmask done interrupts at exit */ \
748 tasklet_schedule(&priv->done_task[tlet]); \
750 spin_unlock_irqrestore(&priv->reg_lock, flags); \
753 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
757 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
758 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
760 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
766 static int talitos_rng_data_present(struct hwrng *rng, int wait)
768 struct device *dev = (struct device *)rng->priv;
769 struct talitos_private *priv = dev_get_drvdata(dev);
773 for (i = 0; i < 20; i++) {
774 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
775 TALITOS_RNGUSR_LO_OFL;
784 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
786 struct device *dev = (struct device *)rng->priv;
787 struct talitos_private *priv = dev_get_drvdata(dev);
789 /* rng fifo requires 64-bit accesses */
790 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
791 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
796 static int talitos_rng_init(struct hwrng *rng)
798 struct device *dev = (struct device *)rng->priv;
799 struct talitos_private *priv = dev_get_drvdata(dev);
800 unsigned int timeout = TALITOS_TIMEOUT;
802 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
803 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
804 & TALITOS_RNGUSR_LO_RD)
808 dev_err(dev, "failed to reset rng hw\n");
812 /* start generating */
813 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
818 static int talitos_register_rng(struct device *dev)
820 struct talitos_private *priv = dev_get_drvdata(dev);
823 priv->rng.name = dev_driver_string(dev),
824 priv->rng.init = talitos_rng_init,
825 priv->rng.data_present = talitos_rng_data_present,
826 priv->rng.data_read = talitos_rng_data_read,
827 priv->rng.priv = (unsigned long)dev;
829 err = hwrng_register(&priv->rng);
831 priv->rng_registered = true;
836 static void talitos_unregister_rng(struct device *dev)
838 struct talitos_private *priv = dev_get_drvdata(dev);
840 if (!priv->rng_registered)
843 hwrng_unregister(&priv->rng);
844 priv->rng_registered = false;
850 #define TALITOS_CRA_PRIORITY 3000
852 * Defines a priority for doing AEAD with descriptors type
853 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
855 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
856 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
857 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
862 __be32 desc_hdr_template;
863 u8 key[TALITOS_MAX_KEY_SIZE];
864 u8 iv[TALITOS_MAX_IV_LENGTH];
867 unsigned int enckeylen;
868 unsigned int authkeylen;
871 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
872 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
874 struct talitos_ahash_req_ctx {
875 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
876 unsigned int hw_context_size;
877 u8 buf[2][HASH_MAX_BLOCK_SIZE];
882 unsigned int to_hash_later;
884 struct scatterlist bufsl[2];
885 struct scatterlist *psrc;
888 struct talitos_export_state {
889 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
890 u8 buf[HASH_MAX_BLOCK_SIZE];
894 unsigned int to_hash_later;
898 static int aead_setkey(struct crypto_aead *authenc,
899 const u8 *key, unsigned int keylen)
901 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
902 struct device *dev = ctx->dev;
903 struct crypto_authenc_keys keys;
905 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
908 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
912 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
914 memcpy(ctx->key, keys.authkey, keys.authkeylen);
915 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
917 ctx->keylen = keys.authkeylen + keys.enckeylen;
918 ctx->enckeylen = keys.enckeylen;
919 ctx->authkeylen = keys.authkeylen;
920 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
923 memzero_explicit(&keys, sizeof(keys));
927 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
928 memzero_explicit(&keys, sizeof(keys));
932 static void talitos_sg_unmap(struct device *dev,
933 struct talitos_edesc *edesc,
934 struct scatterlist *src,
935 struct scatterlist *dst,
936 unsigned int len, unsigned int offset)
938 struct talitos_private *priv = dev_get_drvdata(dev);
939 bool is_sec1 = has_ftr_sec1(priv);
940 unsigned int src_nents = edesc->src_nents ? : 1;
941 unsigned int dst_nents = edesc->dst_nents ? : 1;
943 if (is_sec1 && dst && dst_nents > 1) {
944 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
945 len, DMA_FROM_DEVICE);
946 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
950 if (src_nents == 1 || !is_sec1)
951 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
953 if (dst && (dst_nents == 1 || !is_sec1))
954 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
955 } else if (src_nents == 1 || !is_sec1) {
956 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
960 static void ipsec_esp_unmap(struct device *dev,
961 struct talitos_edesc *edesc,
962 struct aead_request *areq)
964 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
965 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
966 unsigned int ivsize = crypto_aead_ivsize(aead);
967 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
968 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
971 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
973 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
975 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
979 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
983 unsigned int dst_nents = edesc->dst_nents ? : 1;
985 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
986 areq->assoclen + areq->cryptlen - ivsize);
991 * ipsec_esp descriptor callbacks
993 static void ipsec_esp_encrypt_done(struct device *dev,
994 struct talitos_desc *desc, void *context,
997 struct talitos_private *priv = dev_get_drvdata(dev);
998 bool is_sec1 = has_ftr_sec1(priv);
999 struct aead_request *areq = context;
1000 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1001 unsigned int authsize = crypto_aead_authsize(authenc);
1002 unsigned int ivsize = crypto_aead_ivsize(authenc);
1003 struct talitos_edesc *edesc;
1006 edesc = container_of(desc, struct talitos_edesc, desc);
1008 ipsec_esp_unmap(dev, edesc, areq);
1010 /* copy the generated ICV to dst */
1011 if (edesc->icv_ool) {
1013 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1015 icvdata = &edesc->link_tbl[edesc->src_nents +
1016 edesc->dst_nents + 2];
1017 sg_pcopy_from_buffer(areq->dst, edesc->dst_nents ? : 1, icvdata,
1018 authsize, areq->assoclen + areq->cryptlen);
1021 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1025 aead_request_complete(areq, err);
1028 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1029 struct talitos_desc *desc,
1030 void *context, int err)
1032 struct aead_request *req = context;
1033 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1034 unsigned int authsize = crypto_aead_authsize(authenc);
1035 struct talitos_edesc *edesc;
1037 struct talitos_private *priv = dev_get_drvdata(dev);
1038 bool is_sec1 = has_ftr_sec1(priv);
1040 edesc = container_of(desc, struct talitos_edesc, desc);
1042 ipsec_esp_unmap(dev, edesc, req);
1045 char icvdata[SHA512_DIGEST_SIZE];
1046 int nents = edesc->dst_nents ? : 1;
1047 unsigned int len = req->assoclen + req->cryptlen;
1051 sg_pcopy_to_buffer(req->dst, nents, icvdata, authsize,
1055 icv = (char *)sg_virt(req->dst) + len - authsize;
1058 if (edesc->dma_len) {
1060 oicv = (char *)&edesc->dma_link_tbl +
1061 req->assoclen + req->cryptlen;
1064 &edesc->link_tbl[edesc->src_nents +
1065 edesc->dst_nents + 2];
1067 icv = oicv + authsize;
1069 oicv = (char *)&edesc->link_tbl[0];
1071 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1076 aead_request_complete(req, err);
1079 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1080 struct talitos_desc *desc,
1081 void *context, int err)
1083 struct aead_request *req = context;
1084 struct talitos_edesc *edesc;
1086 edesc = container_of(desc, struct talitos_edesc, desc);
1088 ipsec_esp_unmap(dev, edesc, req);
1090 /* check ICV auth status */
1091 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1092 DESC_HDR_LO_ICCR1_PASS))
1097 aead_request_complete(req, err);
1101 * convert scatterlist to SEC h/w link table format
1102 * stop at cryptlen bytes
1104 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1105 unsigned int offset, int cryptlen,
1106 struct talitos_ptr *link_tbl_ptr)
1108 int n_sg = sg_count;
1111 while (cryptlen && sg && n_sg--) {
1112 unsigned int len = sg_dma_len(sg);
1114 if (offset >= len) {
1124 to_talitos_ptr(link_tbl_ptr + count,
1125 sg_dma_address(sg) + offset, len, 0);
1126 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1135 /* tag end of link table */
1137 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1138 DESC_PTR_LNKTBL_RETURN, 0);
1143 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1144 unsigned int len, struct talitos_edesc *edesc,
1145 struct talitos_ptr *ptr, int sg_count,
1146 unsigned int offset, int tbl_off, int elen)
1148 struct talitos_private *priv = dev_get_drvdata(dev);
1149 bool is_sec1 = has_ftr_sec1(priv);
1152 to_talitos_ptr(ptr, 0, 0, is_sec1);
1155 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1156 if (sg_count == 1) {
1157 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1161 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1164 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1165 &edesc->link_tbl[tbl_off]);
1166 if (sg_count == 1) {
1167 /* Only one segment now, so no link tbl needed*/
1168 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1171 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1172 tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1173 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1178 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1179 unsigned int len, struct talitos_edesc *edesc,
1180 struct talitos_ptr *ptr, int sg_count,
1181 unsigned int offset, int tbl_off)
1183 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1188 * fill in and submit ipsec_esp descriptor
1190 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1191 void (*callback)(struct device *dev,
1192 struct talitos_desc *desc,
1193 void *context, int error))
1195 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1196 unsigned int authsize = crypto_aead_authsize(aead);
1197 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1198 struct device *dev = ctx->dev;
1199 struct talitos_desc *desc = &edesc->desc;
1200 unsigned int cryptlen = areq->cryptlen;
1201 unsigned int ivsize = crypto_aead_ivsize(aead);
1205 bool sync_needed = false;
1206 struct talitos_private *priv = dev_get_drvdata(dev);
1207 bool is_sec1 = has_ftr_sec1(priv);
1208 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1209 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1210 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1213 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1215 sg_count = edesc->src_nents ?: 1;
1216 if (is_sec1 && sg_count > 1)
1217 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1218 areq->assoclen + cryptlen);
1220 sg_count = dma_map_sg(dev, areq->src, sg_count,
1221 (areq->src == areq->dst) ?
1222 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1225 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1226 &desc->ptr[1], sg_count, 0, tbl_off);
1234 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1237 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1238 ctx->enckeylen, is_sec1);
1242 * map and adjust cipher len to aead request cryptlen.
1243 * extent is bytes of HMAC postpended to ciphertext,
1244 * typically 12 for ipsec
1246 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1249 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1250 sg_count, areq->assoclen, tbl_off, elen);
1258 if (areq->src != areq->dst) {
1259 sg_count = edesc->dst_nents ? : 1;
1260 if (!is_sec1 || sg_count == 1)
1261 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1264 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1265 sg_count, areq->assoclen, tbl_off);
1268 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1273 edesc->icv_ool = true;
1277 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1278 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1279 sizeof(struct talitos_ptr) + authsize;
1281 /* Add an entry to the link table for ICV data */
1282 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1283 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1286 /* icv data follows link tables */
1287 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1290 dma_addr_t addr = edesc->dma_link_tbl;
1293 addr += areq->assoclen + cryptlen;
1295 addr += sizeof(struct talitos_ptr) * tbl_off;
1297 to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1299 } else if (!is_ipsec_esp) {
1300 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1301 &desc->ptr[6], sg_count, areq->assoclen +
1306 edesc->icv_ool = true;
1309 edesc->icv_ool = false;
1312 edesc->icv_ool = false;
1317 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1321 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1325 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1326 if (ret != -EINPROGRESS) {
1327 ipsec_esp_unmap(dev, edesc, areq);
1334 * allocate and map the extended descriptor
1336 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1337 struct scatterlist *src,
1338 struct scatterlist *dst,
1340 unsigned int assoclen,
1341 unsigned int cryptlen,
1342 unsigned int authsize,
1343 unsigned int ivsize,
1348 struct talitos_edesc *edesc;
1349 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1350 dma_addr_t iv_dma = 0;
1351 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1353 struct talitos_private *priv = dev_get_drvdata(dev);
1354 bool is_sec1 = has_ftr_sec1(priv);
1355 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1357 if (cryptlen + authsize > max_len) {
1358 dev_err(dev, "length exceeds h/w max limit\n");
1359 return ERR_PTR(-EINVAL);
1362 if (!dst || dst == src) {
1363 src_len = assoclen + cryptlen + authsize;
1364 src_nents = sg_nents_for_len(src, src_len);
1365 if (src_nents < 0) {
1366 dev_err(dev, "Invalid number of src SG.\n");
1367 return ERR_PTR(-EINVAL);
1369 src_nents = (src_nents == 1) ? 0 : src_nents;
1370 dst_nents = dst ? src_nents : 0;
1372 } else { /* dst && dst != src*/
1373 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1374 src_nents = sg_nents_for_len(src, src_len);
1375 if (src_nents < 0) {
1376 dev_err(dev, "Invalid number of src SG.\n");
1377 return ERR_PTR(-EINVAL);
1379 src_nents = (src_nents == 1) ? 0 : src_nents;
1380 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1381 dst_nents = sg_nents_for_len(dst, dst_len);
1382 if (dst_nents < 0) {
1383 dev_err(dev, "Invalid number of dst SG.\n");
1384 return ERR_PTR(-EINVAL);
1386 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1390 * allocate space for base edesc plus the link tables,
1391 * allowing for two separate entries for AD and generated ICV (+ 2),
1392 * and space for two sets of ICVs (stashed and generated)
1394 alloc_len = sizeof(struct talitos_edesc);
1395 if (src_nents || dst_nents) {
1397 dma_len = (src_nents ? src_len : 0) +
1398 (dst_nents ? dst_len : 0);
1400 dma_len = (src_nents + dst_nents + 2) *
1401 sizeof(struct talitos_ptr) + authsize * 2;
1402 alloc_len += dma_len;
1405 alloc_len += icv_stashing ? authsize : 0;
1408 /* if its a ahash, add space for a second desc next to the first one */
1409 if (is_sec1 && !dst)
1410 alloc_len += sizeof(struct talitos_desc);
1411 alloc_len += ivsize;
1413 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1415 return ERR_PTR(-ENOMEM);
1417 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1418 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1420 memset(&edesc->desc, 0, sizeof(edesc->desc));
1422 edesc->src_nents = src_nents;
1423 edesc->dst_nents = dst_nents;
1424 edesc->iv_dma = iv_dma;
1425 edesc->dma_len = dma_len;
1427 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1434 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1435 int icv_stashing, bool encrypt)
1437 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1438 unsigned int authsize = crypto_aead_authsize(authenc);
1439 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1440 unsigned int ivsize = crypto_aead_ivsize(authenc);
1442 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1443 iv, areq->assoclen, areq->cryptlen,
1444 authsize, ivsize, icv_stashing,
1445 areq->base.flags, encrypt);
1448 static int aead_encrypt(struct aead_request *req)
1450 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1451 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1452 struct talitos_edesc *edesc;
1454 /* allocate extended descriptor */
1455 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1457 return PTR_ERR(edesc);
1460 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1462 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1465 static int aead_decrypt(struct aead_request *req)
1467 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1468 unsigned int authsize = crypto_aead_authsize(authenc);
1469 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1470 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1471 struct talitos_edesc *edesc;
1474 req->cryptlen -= authsize;
1476 /* allocate extended descriptor */
1477 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1479 return PTR_ERR(edesc);
1481 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1482 ((!edesc->src_nents && !edesc->dst_nents) ||
1483 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1485 /* decrypt and check the ICV */
1486 edesc->desc.hdr = ctx->desc_hdr_template |
1487 DESC_HDR_DIR_INBOUND |
1488 DESC_HDR_MODE1_MDEU_CICV;
1490 /* reset integrity check result bits */
1492 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1495 /* Have to check the ICV with software */
1496 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1498 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1500 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1501 edesc->dst_nents + 2];
1503 icvdata = &edesc->link_tbl[0];
1505 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1506 req->assoclen + req->cryptlen - authsize);
1508 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1511 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1512 const u8 *key, unsigned int keylen)
1514 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1515 struct device *dev = ctx->dev;
1516 u32 tmp[DES_EXPKEY_WORDS];
1518 if (keylen > TALITOS_MAX_KEY_SIZE) {
1519 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1523 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1524 CRYPTO_TFM_REQ_WEAK_KEY) &&
1525 !des_ekey(tmp, key)) {
1526 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1531 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1533 memcpy(&ctx->key, key, keylen);
1534 ctx->keylen = keylen;
1536 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1541 static void common_nonsnoop_unmap(struct device *dev,
1542 struct talitos_edesc *edesc,
1543 struct ablkcipher_request *areq)
1545 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1547 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1548 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1551 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1555 static void ablkcipher_done(struct device *dev,
1556 struct talitos_desc *desc, void *context,
1559 struct ablkcipher_request *areq = context;
1560 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1561 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1562 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1563 struct talitos_edesc *edesc;
1565 edesc = container_of(desc, struct talitos_edesc, desc);
1567 common_nonsnoop_unmap(dev, edesc, areq);
1568 memcpy(areq->info, ctx->iv, ivsize);
1572 areq->base.complete(&areq->base, err);
1575 static int common_nonsnoop(struct talitos_edesc *edesc,
1576 struct ablkcipher_request *areq,
1577 void (*callback) (struct device *dev,
1578 struct talitos_desc *desc,
1579 void *context, int error))
1581 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1582 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1583 struct device *dev = ctx->dev;
1584 struct talitos_desc *desc = &edesc->desc;
1585 unsigned int cryptlen = areq->nbytes;
1586 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1588 bool sync_needed = false;
1589 struct talitos_private *priv = dev_get_drvdata(dev);
1590 bool is_sec1 = has_ftr_sec1(priv);
1592 /* first DWORD empty */
1595 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1598 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1600 sg_count = edesc->src_nents ?: 1;
1601 if (is_sec1 && sg_count > 1)
1602 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1605 sg_count = dma_map_sg(dev, areq->src, sg_count,
1606 (areq->src == areq->dst) ?
1607 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1611 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1612 &desc->ptr[3], sg_count, 0, 0);
1617 if (areq->src != areq->dst) {
1618 sg_count = edesc->dst_nents ? : 1;
1619 if (!is_sec1 || sg_count == 1)
1620 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1623 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1624 sg_count, 0, (edesc->src_nents + 1));
1629 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1632 /* last DWORD empty */
1635 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1636 edesc->dma_len, DMA_BIDIRECTIONAL);
1638 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1639 if (ret != -EINPROGRESS) {
1640 common_nonsnoop_unmap(dev, edesc, areq);
1646 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1649 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1650 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1651 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1653 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1654 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1655 areq->base.flags, encrypt);
1658 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1660 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1661 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1662 struct talitos_edesc *edesc;
1664 /* allocate extended descriptor */
1665 edesc = ablkcipher_edesc_alloc(areq, true);
1667 return PTR_ERR(edesc);
1670 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1672 return common_nonsnoop(edesc, areq, ablkcipher_done);
1675 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1677 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1678 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1679 struct talitos_edesc *edesc;
1681 /* allocate extended descriptor */
1682 edesc = ablkcipher_edesc_alloc(areq, false);
1684 return PTR_ERR(edesc);
1686 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1688 return common_nonsnoop(edesc, areq, ablkcipher_done);
1691 static void common_nonsnoop_hash_unmap(struct device *dev,
1692 struct talitos_edesc *edesc,
1693 struct ahash_request *areq)
1695 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1696 struct talitos_private *priv = dev_get_drvdata(dev);
1697 bool is_sec1 = has_ftr_sec1(priv);
1698 struct talitos_desc *desc = &edesc->desc;
1699 struct talitos_desc *desc2 = (struct talitos_desc *)
1700 (edesc->buf + edesc->dma_len);
1702 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1703 if (desc->next_desc &&
1704 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1705 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1708 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1710 /* When using hashctx-in, must unmap it. */
1711 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1712 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1714 else if (desc->next_desc)
1715 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1718 if (is_sec1 && req_ctx->nbuf)
1719 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1723 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1726 if (edesc->desc.next_desc)
1727 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1728 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1731 static void ahash_done(struct device *dev,
1732 struct talitos_desc *desc, void *context,
1735 struct ahash_request *areq = context;
1736 struct talitos_edesc *edesc =
1737 container_of(desc, struct talitos_edesc, desc);
1738 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1740 if (!req_ctx->last && req_ctx->to_hash_later) {
1741 /* Position any partial block for next update/final/finup */
1742 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1743 req_ctx->nbuf = req_ctx->to_hash_later;
1745 common_nonsnoop_hash_unmap(dev, edesc, areq);
1749 areq->base.complete(&areq->base, err);
1753 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1754 * ourself and submit a padded block
1756 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1757 struct talitos_edesc *edesc,
1758 struct talitos_ptr *ptr)
1760 static u8 padded_hash[64] = {
1761 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1762 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1763 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1764 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1767 pr_err_once("Bug in SEC1, padding ourself\n");
1768 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1769 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1770 (char *)padded_hash, DMA_TO_DEVICE);
1773 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1774 struct ahash_request *areq, unsigned int length,
1775 void (*callback) (struct device *dev,
1776 struct talitos_desc *desc,
1777 void *context, int error))
1779 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1780 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1781 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1782 struct device *dev = ctx->dev;
1783 struct talitos_desc *desc = &edesc->desc;
1785 bool sync_needed = false;
1786 struct talitos_private *priv = dev_get_drvdata(dev);
1787 bool is_sec1 = has_ftr_sec1(priv);
1790 /* first DWORD empty */
1792 /* hash context in */
1793 if (!req_ctx->first || req_ctx->swinit) {
1794 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1795 req_ctx->hw_context_size,
1796 req_ctx->hw_context,
1798 req_ctx->swinit = 0;
1800 /* Indicate next op is not the first. */
1805 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1808 if (is_sec1 && req_ctx->nbuf)
1809 length -= req_ctx->nbuf;
1811 sg_count = edesc->src_nents ?: 1;
1812 if (is_sec1 && sg_count > 1)
1813 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1815 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1820 if (is_sec1 && req_ctx->nbuf) {
1821 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1822 req_ctx->buf[req_ctx->buf_idx],
1825 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1826 &desc->ptr[3], sg_count, 0, 0);
1831 /* fifth DWORD empty */
1833 /* hash/HMAC out -or- hash context out */
1835 map_single_talitos_ptr(dev, &desc->ptr[5],
1836 crypto_ahash_digestsize(tfm),
1837 areq->result, DMA_FROM_DEVICE);
1839 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1840 req_ctx->hw_context_size,
1841 req_ctx->hw_context,
1844 /* last DWORD empty */
1846 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1847 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1849 if (is_sec1 && req_ctx->nbuf && length) {
1850 struct talitos_desc *desc2 = (struct talitos_desc *)
1851 (edesc->buf + edesc->dma_len);
1852 dma_addr_t next_desc;
1854 memset(desc2, 0, sizeof(*desc2));
1855 desc2->hdr = desc->hdr;
1856 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1857 desc2->hdr1 = desc2->hdr;
1858 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1859 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1860 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1862 if (desc->ptr[1].ptr)
1863 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1866 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1867 req_ctx->hw_context_size,
1868 req_ctx->hw_context,
1870 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1871 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1872 &desc2->ptr[3], sg_count, 0, 0);
1875 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1877 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1878 req_ctx->hw_context_size,
1879 req_ctx->hw_context,
1882 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1884 desc->next_desc = cpu_to_be32(next_desc);
1888 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1889 edesc->dma_len, DMA_BIDIRECTIONAL);
1891 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1892 if (ret != -EINPROGRESS) {
1893 common_nonsnoop_hash_unmap(dev, edesc, areq);
1899 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1900 unsigned int nbytes)
1902 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1903 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1904 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1905 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1906 bool is_sec1 = has_ftr_sec1(priv);
1909 nbytes -= req_ctx->nbuf;
1911 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1912 nbytes, 0, 0, 0, areq->base.flags, false);
1915 static int ahash_init(struct ahash_request *areq)
1917 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1918 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1919 struct device *dev = ctx->dev;
1920 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1924 /* Initialize the context */
1925 req_ctx->buf_idx = 0;
1927 req_ctx->first = 1; /* first indicates h/w must init its context */
1928 req_ctx->swinit = 0; /* assume h/w init of context */
1929 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1930 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1931 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1932 req_ctx->hw_context_size = size;
1934 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1936 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1942 * on h/w without explicit sha224 support, we initialize h/w context
1943 * manually with sha224 constants, and tell it to run sha256.
1945 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1947 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1949 req_ctx->hw_context[0] = SHA224_H0;
1950 req_ctx->hw_context[1] = SHA224_H1;
1951 req_ctx->hw_context[2] = SHA224_H2;
1952 req_ctx->hw_context[3] = SHA224_H3;
1953 req_ctx->hw_context[4] = SHA224_H4;
1954 req_ctx->hw_context[5] = SHA224_H5;
1955 req_ctx->hw_context[6] = SHA224_H6;
1956 req_ctx->hw_context[7] = SHA224_H7;
1958 /* init 64-bit count */
1959 req_ctx->hw_context[8] = 0;
1960 req_ctx->hw_context[9] = 0;
1963 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1968 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1970 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1971 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1972 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1973 struct talitos_edesc *edesc;
1974 unsigned int blocksize =
1975 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1976 unsigned int nbytes_to_hash;
1977 unsigned int to_hash_later;
1980 struct device *dev = ctx->dev;
1981 struct talitos_private *priv = dev_get_drvdata(dev);
1982 bool is_sec1 = has_ftr_sec1(priv);
1983 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1985 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1986 /* Buffer up to one whole block */
1987 nents = sg_nents_for_len(areq->src, nbytes);
1989 dev_err(ctx->dev, "Invalid number of src SG.\n");
1992 sg_copy_to_buffer(areq->src, nents,
1993 ctx_buf + req_ctx->nbuf, nbytes);
1994 req_ctx->nbuf += nbytes;
1998 /* At least (blocksize + 1) bytes are available to hash */
1999 nbytes_to_hash = nbytes + req_ctx->nbuf;
2000 to_hash_later = nbytes_to_hash & (blocksize - 1);
2004 else if (to_hash_later)
2005 /* There is a partial block. Hash the full block(s) now */
2006 nbytes_to_hash -= to_hash_later;
2008 /* Keep one block buffered */
2009 nbytes_to_hash -= blocksize;
2010 to_hash_later = blocksize;
2013 /* Chain in any previously buffered data */
2014 if (!is_sec1 && req_ctx->nbuf) {
2015 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2016 sg_init_table(req_ctx->bufsl, nsg);
2017 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2019 sg_chain(req_ctx->bufsl, 2, areq->src);
2020 req_ctx->psrc = req_ctx->bufsl;
2021 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2024 if (nbytes_to_hash > blocksize)
2025 offset = blocksize - req_ctx->nbuf;
2027 offset = nbytes_to_hash - req_ctx->nbuf;
2028 nents = sg_nents_for_len(areq->src, offset);
2030 dev_err(ctx->dev, "Invalid number of src SG.\n");
2033 sg_copy_to_buffer(areq->src, nents,
2034 ctx_buf + req_ctx->nbuf, offset);
2035 req_ctx->nbuf += offset;
2036 req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2039 req_ctx->psrc = areq->src;
2041 if (to_hash_later) {
2042 nents = sg_nents_for_len(areq->src, nbytes);
2044 dev_err(ctx->dev, "Invalid number of src SG.\n");
2047 sg_pcopy_to_buffer(areq->src, nents,
2048 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2050 nbytes - to_hash_later);
2052 req_ctx->to_hash_later = to_hash_later;
2054 /* Allocate extended descriptor */
2055 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2057 return PTR_ERR(edesc);
2059 edesc->desc.hdr = ctx->desc_hdr_template;
2061 /* On last one, request SEC to pad; otherwise continue */
2063 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2065 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2067 /* request SEC to INIT hash. */
2068 if (req_ctx->first && !req_ctx->swinit)
2069 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2071 /* When the tfm context has a keylen, it's an HMAC.
2072 * A first or last (ie. not middle) descriptor must request HMAC.
2074 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2075 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2077 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2080 static int ahash_update(struct ahash_request *areq)
2082 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2086 return ahash_process_req(areq, areq->nbytes);
2089 static int ahash_final(struct ahash_request *areq)
2091 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2095 return ahash_process_req(areq, 0);
2098 static int ahash_finup(struct ahash_request *areq)
2100 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2104 return ahash_process_req(areq, areq->nbytes);
2107 static int ahash_digest(struct ahash_request *areq)
2109 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2110 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2115 return ahash_process_req(areq, areq->nbytes);
2118 static int ahash_export(struct ahash_request *areq, void *out)
2120 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2121 struct talitos_export_state *export = out;
2122 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2123 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2124 struct device *dev = ctx->dev;
2127 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2129 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2131 memcpy(export->hw_context, req_ctx->hw_context,
2132 req_ctx->hw_context_size);
2133 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2134 export->swinit = req_ctx->swinit;
2135 export->first = req_ctx->first;
2136 export->last = req_ctx->last;
2137 export->to_hash_later = req_ctx->to_hash_later;
2138 export->nbuf = req_ctx->nbuf;
2143 static int ahash_import(struct ahash_request *areq, const void *in)
2145 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2146 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2147 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2148 struct device *dev = ctx->dev;
2149 const struct talitos_export_state *export = in;
2153 memset(req_ctx, 0, sizeof(*req_ctx));
2154 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2155 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2156 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2157 req_ctx->hw_context_size = size;
2158 memcpy(req_ctx->hw_context, export->hw_context, size);
2159 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2160 req_ctx->swinit = export->swinit;
2161 req_ctx->first = export->first;
2162 req_ctx->last = export->last;
2163 req_ctx->to_hash_later = export->to_hash_later;
2164 req_ctx->nbuf = export->nbuf;
2166 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2168 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2173 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2176 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2178 struct scatterlist sg[1];
2179 struct ahash_request *req;
2180 struct crypto_wait wait;
2183 crypto_init_wait(&wait);
2185 req = ahash_request_alloc(tfm, GFP_KERNEL);
2189 /* Keep tfm keylen == 0 during hash of the long key */
2191 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2192 crypto_req_done, &wait);
2194 sg_init_one(&sg[0], key, keylen);
2196 ahash_request_set_crypt(req, sg, hash, keylen);
2197 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2199 ahash_request_free(req);
2204 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2205 unsigned int keylen)
2207 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2208 struct device *dev = ctx->dev;
2209 unsigned int blocksize =
2210 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2211 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2212 unsigned int keysize = keylen;
2213 u8 hash[SHA512_DIGEST_SIZE];
2216 if (keylen <= blocksize)
2217 memcpy(ctx->key, key, keysize);
2219 /* Must get the hash of the long key */
2220 ret = keyhash(tfm, key, keylen, hash);
2223 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2227 keysize = digestsize;
2228 memcpy(ctx->key, hash, digestsize);
2232 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2234 ctx->keylen = keysize;
2235 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2241 struct talitos_alg_template {
2245 struct crypto_alg crypto;
2246 struct ahash_alg hash;
2247 struct aead_alg aead;
2249 __be32 desc_hdr_template;
2252 static struct talitos_alg_template driver_algs[] = {
2253 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2254 { .type = CRYPTO_ALG_TYPE_AEAD,
2257 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2258 .cra_driver_name = "authenc-hmac-sha1-"
2260 .cra_blocksize = AES_BLOCK_SIZE,
2261 .cra_flags = CRYPTO_ALG_ASYNC,
2263 .ivsize = AES_BLOCK_SIZE,
2264 .maxauthsize = SHA1_DIGEST_SIZE,
2266 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2267 DESC_HDR_SEL0_AESU |
2268 DESC_HDR_MODE0_AESU_CBC |
2269 DESC_HDR_SEL1_MDEUA |
2270 DESC_HDR_MODE1_MDEU_INIT |
2271 DESC_HDR_MODE1_MDEU_PAD |
2272 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2274 { .type = CRYPTO_ALG_TYPE_AEAD,
2275 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2278 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2279 .cra_driver_name = "authenc-hmac-sha1-"
2280 "cbc-aes-talitos-hsna",
2281 .cra_blocksize = AES_BLOCK_SIZE,
2282 .cra_flags = CRYPTO_ALG_ASYNC,
2284 .ivsize = AES_BLOCK_SIZE,
2285 .maxauthsize = SHA1_DIGEST_SIZE,
2287 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2288 DESC_HDR_SEL0_AESU |
2289 DESC_HDR_MODE0_AESU_CBC |
2290 DESC_HDR_SEL1_MDEUA |
2291 DESC_HDR_MODE1_MDEU_INIT |
2292 DESC_HDR_MODE1_MDEU_PAD |
2293 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2295 { .type = CRYPTO_ALG_TYPE_AEAD,
2298 .cra_name = "authenc(hmac(sha1),"
2300 .cra_driver_name = "authenc-hmac-sha1-"
2302 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2303 .cra_flags = CRYPTO_ALG_ASYNC,
2305 .ivsize = DES3_EDE_BLOCK_SIZE,
2306 .maxauthsize = SHA1_DIGEST_SIZE,
2308 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2310 DESC_HDR_MODE0_DEU_CBC |
2311 DESC_HDR_MODE0_DEU_3DES |
2312 DESC_HDR_SEL1_MDEUA |
2313 DESC_HDR_MODE1_MDEU_INIT |
2314 DESC_HDR_MODE1_MDEU_PAD |
2315 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2317 { .type = CRYPTO_ALG_TYPE_AEAD,
2318 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2321 .cra_name = "authenc(hmac(sha1),"
2323 .cra_driver_name = "authenc-hmac-sha1-"
2324 "cbc-3des-talitos-hsna",
2325 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2326 .cra_flags = CRYPTO_ALG_ASYNC,
2328 .ivsize = DES3_EDE_BLOCK_SIZE,
2329 .maxauthsize = SHA1_DIGEST_SIZE,
2331 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2333 DESC_HDR_MODE0_DEU_CBC |
2334 DESC_HDR_MODE0_DEU_3DES |
2335 DESC_HDR_SEL1_MDEUA |
2336 DESC_HDR_MODE1_MDEU_INIT |
2337 DESC_HDR_MODE1_MDEU_PAD |
2338 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2340 { .type = CRYPTO_ALG_TYPE_AEAD,
2343 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2344 .cra_driver_name = "authenc-hmac-sha224-"
2346 .cra_blocksize = AES_BLOCK_SIZE,
2347 .cra_flags = CRYPTO_ALG_ASYNC,
2349 .ivsize = AES_BLOCK_SIZE,
2350 .maxauthsize = SHA224_DIGEST_SIZE,
2352 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2353 DESC_HDR_SEL0_AESU |
2354 DESC_HDR_MODE0_AESU_CBC |
2355 DESC_HDR_SEL1_MDEUA |
2356 DESC_HDR_MODE1_MDEU_INIT |
2357 DESC_HDR_MODE1_MDEU_PAD |
2358 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2360 { .type = CRYPTO_ALG_TYPE_AEAD,
2361 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2364 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2365 .cra_driver_name = "authenc-hmac-sha224-"
2366 "cbc-aes-talitos-hsna",
2367 .cra_blocksize = AES_BLOCK_SIZE,
2368 .cra_flags = CRYPTO_ALG_ASYNC,
2370 .ivsize = AES_BLOCK_SIZE,
2371 .maxauthsize = SHA224_DIGEST_SIZE,
2373 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2374 DESC_HDR_SEL0_AESU |
2375 DESC_HDR_MODE0_AESU_CBC |
2376 DESC_HDR_SEL1_MDEUA |
2377 DESC_HDR_MODE1_MDEU_INIT |
2378 DESC_HDR_MODE1_MDEU_PAD |
2379 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2381 { .type = CRYPTO_ALG_TYPE_AEAD,
2384 .cra_name = "authenc(hmac(sha224),"
2386 .cra_driver_name = "authenc-hmac-sha224-"
2388 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2389 .cra_flags = CRYPTO_ALG_ASYNC,
2391 .ivsize = DES3_EDE_BLOCK_SIZE,
2392 .maxauthsize = SHA224_DIGEST_SIZE,
2394 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2396 DESC_HDR_MODE0_DEU_CBC |
2397 DESC_HDR_MODE0_DEU_3DES |
2398 DESC_HDR_SEL1_MDEUA |
2399 DESC_HDR_MODE1_MDEU_INIT |
2400 DESC_HDR_MODE1_MDEU_PAD |
2401 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2403 { .type = CRYPTO_ALG_TYPE_AEAD,
2404 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2407 .cra_name = "authenc(hmac(sha224),"
2409 .cra_driver_name = "authenc-hmac-sha224-"
2410 "cbc-3des-talitos-hsna",
2411 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2412 .cra_flags = CRYPTO_ALG_ASYNC,
2414 .ivsize = DES3_EDE_BLOCK_SIZE,
2415 .maxauthsize = SHA224_DIGEST_SIZE,
2417 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2419 DESC_HDR_MODE0_DEU_CBC |
2420 DESC_HDR_MODE0_DEU_3DES |
2421 DESC_HDR_SEL1_MDEUA |
2422 DESC_HDR_MODE1_MDEU_INIT |
2423 DESC_HDR_MODE1_MDEU_PAD |
2424 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2426 { .type = CRYPTO_ALG_TYPE_AEAD,
2429 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2430 .cra_driver_name = "authenc-hmac-sha256-"
2432 .cra_blocksize = AES_BLOCK_SIZE,
2433 .cra_flags = CRYPTO_ALG_ASYNC,
2435 .ivsize = AES_BLOCK_SIZE,
2436 .maxauthsize = SHA256_DIGEST_SIZE,
2438 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2439 DESC_HDR_SEL0_AESU |
2440 DESC_HDR_MODE0_AESU_CBC |
2441 DESC_HDR_SEL1_MDEUA |
2442 DESC_HDR_MODE1_MDEU_INIT |
2443 DESC_HDR_MODE1_MDEU_PAD |
2444 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2446 { .type = CRYPTO_ALG_TYPE_AEAD,
2447 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2450 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2451 .cra_driver_name = "authenc-hmac-sha256-"
2452 "cbc-aes-talitos-hsna",
2453 .cra_blocksize = AES_BLOCK_SIZE,
2454 .cra_flags = CRYPTO_ALG_ASYNC,
2456 .ivsize = AES_BLOCK_SIZE,
2457 .maxauthsize = SHA256_DIGEST_SIZE,
2459 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2460 DESC_HDR_SEL0_AESU |
2461 DESC_HDR_MODE0_AESU_CBC |
2462 DESC_HDR_SEL1_MDEUA |
2463 DESC_HDR_MODE1_MDEU_INIT |
2464 DESC_HDR_MODE1_MDEU_PAD |
2465 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2467 { .type = CRYPTO_ALG_TYPE_AEAD,
2470 .cra_name = "authenc(hmac(sha256),"
2472 .cra_driver_name = "authenc-hmac-sha256-"
2474 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2475 .cra_flags = CRYPTO_ALG_ASYNC,
2477 .ivsize = DES3_EDE_BLOCK_SIZE,
2478 .maxauthsize = SHA256_DIGEST_SIZE,
2480 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2482 DESC_HDR_MODE0_DEU_CBC |
2483 DESC_HDR_MODE0_DEU_3DES |
2484 DESC_HDR_SEL1_MDEUA |
2485 DESC_HDR_MODE1_MDEU_INIT |
2486 DESC_HDR_MODE1_MDEU_PAD |
2487 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2489 { .type = CRYPTO_ALG_TYPE_AEAD,
2490 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2493 .cra_name = "authenc(hmac(sha256),"
2495 .cra_driver_name = "authenc-hmac-sha256-"
2496 "cbc-3des-talitos-hsna",
2497 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2498 .cra_flags = CRYPTO_ALG_ASYNC,
2500 .ivsize = DES3_EDE_BLOCK_SIZE,
2501 .maxauthsize = SHA256_DIGEST_SIZE,
2503 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2505 DESC_HDR_MODE0_DEU_CBC |
2506 DESC_HDR_MODE0_DEU_3DES |
2507 DESC_HDR_SEL1_MDEUA |
2508 DESC_HDR_MODE1_MDEU_INIT |
2509 DESC_HDR_MODE1_MDEU_PAD |
2510 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2512 { .type = CRYPTO_ALG_TYPE_AEAD,
2515 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2516 .cra_driver_name = "authenc-hmac-sha384-"
2518 .cra_blocksize = AES_BLOCK_SIZE,
2519 .cra_flags = CRYPTO_ALG_ASYNC,
2521 .ivsize = AES_BLOCK_SIZE,
2522 .maxauthsize = SHA384_DIGEST_SIZE,
2524 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2525 DESC_HDR_SEL0_AESU |
2526 DESC_HDR_MODE0_AESU_CBC |
2527 DESC_HDR_SEL1_MDEUB |
2528 DESC_HDR_MODE1_MDEU_INIT |
2529 DESC_HDR_MODE1_MDEU_PAD |
2530 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2532 { .type = CRYPTO_ALG_TYPE_AEAD,
2535 .cra_name = "authenc(hmac(sha384),"
2537 .cra_driver_name = "authenc-hmac-sha384-"
2539 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2540 .cra_flags = CRYPTO_ALG_ASYNC,
2542 .ivsize = DES3_EDE_BLOCK_SIZE,
2543 .maxauthsize = SHA384_DIGEST_SIZE,
2545 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2547 DESC_HDR_MODE0_DEU_CBC |
2548 DESC_HDR_MODE0_DEU_3DES |
2549 DESC_HDR_SEL1_MDEUB |
2550 DESC_HDR_MODE1_MDEU_INIT |
2551 DESC_HDR_MODE1_MDEU_PAD |
2552 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2554 { .type = CRYPTO_ALG_TYPE_AEAD,
2557 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2558 .cra_driver_name = "authenc-hmac-sha512-"
2560 .cra_blocksize = AES_BLOCK_SIZE,
2561 .cra_flags = CRYPTO_ALG_ASYNC,
2563 .ivsize = AES_BLOCK_SIZE,
2564 .maxauthsize = SHA512_DIGEST_SIZE,
2566 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2567 DESC_HDR_SEL0_AESU |
2568 DESC_HDR_MODE0_AESU_CBC |
2569 DESC_HDR_SEL1_MDEUB |
2570 DESC_HDR_MODE1_MDEU_INIT |
2571 DESC_HDR_MODE1_MDEU_PAD |
2572 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2574 { .type = CRYPTO_ALG_TYPE_AEAD,
2577 .cra_name = "authenc(hmac(sha512),"
2579 .cra_driver_name = "authenc-hmac-sha512-"
2581 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2582 .cra_flags = CRYPTO_ALG_ASYNC,
2584 .ivsize = DES3_EDE_BLOCK_SIZE,
2585 .maxauthsize = SHA512_DIGEST_SIZE,
2587 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2589 DESC_HDR_MODE0_DEU_CBC |
2590 DESC_HDR_MODE0_DEU_3DES |
2591 DESC_HDR_SEL1_MDEUB |
2592 DESC_HDR_MODE1_MDEU_INIT |
2593 DESC_HDR_MODE1_MDEU_PAD |
2594 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2596 { .type = CRYPTO_ALG_TYPE_AEAD,
2599 .cra_name = "authenc(hmac(md5),cbc(aes))",
2600 .cra_driver_name = "authenc-hmac-md5-"
2602 .cra_blocksize = AES_BLOCK_SIZE,
2603 .cra_flags = CRYPTO_ALG_ASYNC,
2605 .ivsize = AES_BLOCK_SIZE,
2606 .maxauthsize = MD5_DIGEST_SIZE,
2608 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2609 DESC_HDR_SEL0_AESU |
2610 DESC_HDR_MODE0_AESU_CBC |
2611 DESC_HDR_SEL1_MDEUA |
2612 DESC_HDR_MODE1_MDEU_INIT |
2613 DESC_HDR_MODE1_MDEU_PAD |
2614 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2616 { .type = CRYPTO_ALG_TYPE_AEAD,
2617 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2620 .cra_name = "authenc(hmac(md5),cbc(aes))",
2621 .cra_driver_name = "authenc-hmac-md5-"
2622 "cbc-aes-talitos-hsna",
2623 .cra_blocksize = AES_BLOCK_SIZE,
2624 .cra_flags = CRYPTO_ALG_ASYNC,
2626 .ivsize = AES_BLOCK_SIZE,
2627 .maxauthsize = MD5_DIGEST_SIZE,
2629 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2630 DESC_HDR_SEL0_AESU |
2631 DESC_HDR_MODE0_AESU_CBC |
2632 DESC_HDR_SEL1_MDEUA |
2633 DESC_HDR_MODE1_MDEU_INIT |
2634 DESC_HDR_MODE1_MDEU_PAD |
2635 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2637 { .type = CRYPTO_ALG_TYPE_AEAD,
2640 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2641 .cra_driver_name = "authenc-hmac-md5-"
2643 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2644 .cra_flags = CRYPTO_ALG_ASYNC,
2646 .ivsize = DES3_EDE_BLOCK_SIZE,
2647 .maxauthsize = MD5_DIGEST_SIZE,
2649 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2651 DESC_HDR_MODE0_DEU_CBC |
2652 DESC_HDR_MODE0_DEU_3DES |
2653 DESC_HDR_SEL1_MDEUA |
2654 DESC_HDR_MODE1_MDEU_INIT |
2655 DESC_HDR_MODE1_MDEU_PAD |
2656 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2658 { .type = CRYPTO_ALG_TYPE_AEAD,
2659 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2662 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2663 .cra_driver_name = "authenc-hmac-md5-"
2664 "cbc-3des-talitos-hsna",
2665 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2666 .cra_flags = CRYPTO_ALG_ASYNC,
2668 .ivsize = DES3_EDE_BLOCK_SIZE,
2669 .maxauthsize = MD5_DIGEST_SIZE,
2671 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2673 DESC_HDR_MODE0_DEU_CBC |
2674 DESC_HDR_MODE0_DEU_3DES |
2675 DESC_HDR_SEL1_MDEUA |
2676 DESC_HDR_MODE1_MDEU_INIT |
2677 DESC_HDR_MODE1_MDEU_PAD |
2678 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2680 /* ABLKCIPHER algorithms. */
2681 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2683 .cra_name = "ecb(aes)",
2684 .cra_driver_name = "ecb-aes-talitos",
2685 .cra_blocksize = AES_BLOCK_SIZE,
2686 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2689 .min_keysize = AES_MIN_KEY_SIZE,
2690 .max_keysize = AES_MAX_KEY_SIZE,
2691 .ivsize = AES_BLOCK_SIZE,
2694 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2697 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2699 .cra_name = "cbc(aes)",
2700 .cra_driver_name = "cbc-aes-talitos",
2701 .cra_blocksize = AES_BLOCK_SIZE,
2702 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2705 .min_keysize = AES_MIN_KEY_SIZE,
2706 .max_keysize = AES_MAX_KEY_SIZE,
2707 .ivsize = AES_BLOCK_SIZE,
2710 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2711 DESC_HDR_SEL0_AESU |
2712 DESC_HDR_MODE0_AESU_CBC,
2714 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2716 .cra_name = "ctr(aes)",
2717 .cra_driver_name = "ctr-aes-talitos",
2718 .cra_blocksize = AES_BLOCK_SIZE,
2719 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2722 .min_keysize = AES_MIN_KEY_SIZE,
2723 .max_keysize = AES_MAX_KEY_SIZE,
2724 .ivsize = AES_BLOCK_SIZE,
2727 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2728 DESC_HDR_SEL0_AESU |
2729 DESC_HDR_MODE0_AESU_CTR,
2731 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2733 .cra_name = "ecb(des)",
2734 .cra_driver_name = "ecb-des-talitos",
2735 .cra_blocksize = DES_BLOCK_SIZE,
2736 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2739 .min_keysize = DES_KEY_SIZE,
2740 .max_keysize = DES_KEY_SIZE,
2741 .ivsize = DES_BLOCK_SIZE,
2744 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2747 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2749 .cra_name = "cbc(des)",
2750 .cra_driver_name = "cbc-des-talitos",
2751 .cra_blocksize = DES_BLOCK_SIZE,
2752 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2755 .min_keysize = DES_KEY_SIZE,
2756 .max_keysize = DES_KEY_SIZE,
2757 .ivsize = DES_BLOCK_SIZE,
2760 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2762 DESC_HDR_MODE0_DEU_CBC,
2764 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2766 .cra_name = "ecb(des3_ede)",
2767 .cra_driver_name = "ecb-3des-talitos",
2768 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2769 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2772 .min_keysize = DES3_EDE_KEY_SIZE,
2773 .max_keysize = DES3_EDE_KEY_SIZE,
2774 .ivsize = DES3_EDE_BLOCK_SIZE,
2777 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2779 DESC_HDR_MODE0_DEU_3DES,
2781 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2783 .cra_name = "cbc(des3_ede)",
2784 .cra_driver_name = "cbc-3des-talitos",
2785 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2786 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2789 .min_keysize = DES3_EDE_KEY_SIZE,
2790 .max_keysize = DES3_EDE_KEY_SIZE,
2791 .ivsize = DES3_EDE_BLOCK_SIZE,
2794 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2796 DESC_HDR_MODE0_DEU_CBC |
2797 DESC_HDR_MODE0_DEU_3DES,
2799 /* AHASH algorithms. */
2800 { .type = CRYPTO_ALG_TYPE_AHASH,
2802 .halg.digestsize = MD5_DIGEST_SIZE,
2803 .halg.statesize = sizeof(struct talitos_export_state),
2806 .cra_driver_name = "md5-talitos",
2807 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2808 .cra_flags = CRYPTO_ALG_ASYNC,
2811 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2812 DESC_HDR_SEL0_MDEUA |
2813 DESC_HDR_MODE0_MDEU_MD5,
2815 { .type = CRYPTO_ALG_TYPE_AHASH,
2817 .halg.digestsize = SHA1_DIGEST_SIZE,
2818 .halg.statesize = sizeof(struct talitos_export_state),
2821 .cra_driver_name = "sha1-talitos",
2822 .cra_blocksize = SHA1_BLOCK_SIZE,
2823 .cra_flags = CRYPTO_ALG_ASYNC,
2826 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2827 DESC_HDR_SEL0_MDEUA |
2828 DESC_HDR_MODE0_MDEU_SHA1,
2830 { .type = CRYPTO_ALG_TYPE_AHASH,
2832 .halg.digestsize = SHA224_DIGEST_SIZE,
2833 .halg.statesize = sizeof(struct talitos_export_state),
2835 .cra_name = "sha224",
2836 .cra_driver_name = "sha224-talitos",
2837 .cra_blocksize = SHA224_BLOCK_SIZE,
2838 .cra_flags = CRYPTO_ALG_ASYNC,
2841 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2842 DESC_HDR_SEL0_MDEUA |
2843 DESC_HDR_MODE0_MDEU_SHA224,
2845 { .type = CRYPTO_ALG_TYPE_AHASH,
2847 .halg.digestsize = SHA256_DIGEST_SIZE,
2848 .halg.statesize = sizeof(struct talitos_export_state),
2850 .cra_name = "sha256",
2851 .cra_driver_name = "sha256-talitos",
2852 .cra_blocksize = SHA256_BLOCK_SIZE,
2853 .cra_flags = CRYPTO_ALG_ASYNC,
2856 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2857 DESC_HDR_SEL0_MDEUA |
2858 DESC_HDR_MODE0_MDEU_SHA256,
2860 { .type = CRYPTO_ALG_TYPE_AHASH,
2862 .halg.digestsize = SHA384_DIGEST_SIZE,
2863 .halg.statesize = sizeof(struct talitos_export_state),
2865 .cra_name = "sha384",
2866 .cra_driver_name = "sha384-talitos",
2867 .cra_blocksize = SHA384_BLOCK_SIZE,
2868 .cra_flags = CRYPTO_ALG_ASYNC,
2871 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2872 DESC_HDR_SEL0_MDEUB |
2873 DESC_HDR_MODE0_MDEUB_SHA384,
2875 { .type = CRYPTO_ALG_TYPE_AHASH,
2877 .halg.digestsize = SHA512_DIGEST_SIZE,
2878 .halg.statesize = sizeof(struct talitos_export_state),
2880 .cra_name = "sha512",
2881 .cra_driver_name = "sha512-talitos",
2882 .cra_blocksize = SHA512_BLOCK_SIZE,
2883 .cra_flags = CRYPTO_ALG_ASYNC,
2886 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2887 DESC_HDR_SEL0_MDEUB |
2888 DESC_HDR_MODE0_MDEUB_SHA512,
2890 { .type = CRYPTO_ALG_TYPE_AHASH,
2892 .halg.digestsize = MD5_DIGEST_SIZE,
2893 .halg.statesize = sizeof(struct talitos_export_state),
2895 .cra_name = "hmac(md5)",
2896 .cra_driver_name = "hmac-md5-talitos",
2897 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2898 .cra_flags = CRYPTO_ALG_ASYNC,
2901 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2902 DESC_HDR_SEL0_MDEUA |
2903 DESC_HDR_MODE0_MDEU_MD5,
2905 { .type = CRYPTO_ALG_TYPE_AHASH,
2907 .halg.digestsize = SHA1_DIGEST_SIZE,
2908 .halg.statesize = sizeof(struct talitos_export_state),
2910 .cra_name = "hmac(sha1)",
2911 .cra_driver_name = "hmac-sha1-talitos",
2912 .cra_blocksize = SHA1_BLOCK_SIZE,
2913 .cra_flags = CRYPTO_ALG_ASYNC,
2916 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2917 DESC_HDR_SEL0_MDEUA |
2918 DESC_HDR_MODE0_MDEU_SHA1,
2920 { .type = CRYPTO_ALG_TYPE_AHASH,
2922 .halg.digestsize = SHA224_DIGEST_SIZE,
2923 .halg.statesize = sizeof(struct talitos_export_state),
2925 .cra_name = "hmac(sha224)",
2926 .cra_driver_name = "hmac-sha224-talitos",
2927 .cra_blocksize = SHA224_BLOCK_SIZE,
2928 .cra_flags = CRYPTO_ALG_ASYNC,
2931 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2932 DESC_HDR_SEL0_MDEUA |
2933 DESC_HDR_MODE0_MDEU_SHA224,
2935 { .type = CRYPTO_ALG_TYPE_AHASH,
2937 .halg.digestsize = SHA256_DIGEST_SIZE,
2938 .halg.statesize = sizeof(struct talitos_export_state),
2940 .cra_name = "hmac(sha256)",
2941 .cra_driver_name = "hmac-sha256-talitos",
2942 .cra_blocksize = SHA256_BLOCK_SIZE,
2943 .cra_flags = CRYPTO_ALG_ASYNC,
2946 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2947 DESC_HDR_SEL0_MDEUA |
2948 DESC_HDR_MODE0_MDEU_SHA256,
2950 { .type = CRYPTO_ALG_TYPE_AHASH,
2952 .halg.digestsize = SHA384_DIGEST_SIZE,
2953 .halg.statesize = sizeof(struct talitos_export_state),
2955 .cra_name = "hmac(sha384)",
2956 .cra_driver_name = "hmac-sha384-talitos",
2957 .cra_blocksize = SHA384_BLOCK_SIZE,
2958 .cra_flags = CRYPTO_ALG_ASYNC,
2961 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2962 DESC_HDR_SEL0_MDEUB |
2963 DESC_HDR_MODE0_MDEUB_SHA384,
2965 { .type = CRYPTO_ALG_TYPE_AHASH,
2967 .halg.digestsize = SHA512_DIGEST_SIZE,
2968 .halg.statesize = sizeof(struct talitos_export_state),
2970 .cra_name = "hmac(sha512)",
2971 .cra_driver_name = "hmac-sha512-talitos",
2972 .cra_blocksize = SHA512_BLOCK_SIZE,
2973 .cra_flags = CRYPTO_ALG_ASYNC,
2976 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2977 DESC_HDR_SEL0_MDEUB |
2978 DESC_HDR_MODE0_MDEUB_SHA512,
2982 struct talitos_crypto_alg {
2983 struct list_head entry;
2985 struct talitos_alg_template algt;
2988 static int talitos_init_common(struct talitos_ctx *ctx,
2989 struct talitos_crypto_alg *talitos_alg)
2991 struct talitos_private *priv;
2993 /* update context with ptr to dev */
2994 ctx->dev = talitos_alg->dev;
2996 /* assign SEC channel to tfm in round-robin fashion */
2997 priv = dev_get_drvdata(ctx->dev);
2998 ctx->ch = atomic_inc_return(&priv->last_chan) &
2999 (priv->num_channels - 1);
3001 /* copy descriptor header template value */
3002 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3004 /* select done notification */
3005 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3010 static int talitos_cra_init(struct crypto_tfm *tfm)
3012 struct crypto_alg *alg = tfm->__crt_alg;
3013 struct talitos_crypto_alg *talitos_alg;
3014 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3016 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3017 talitos_alg = container_of(__crypto_ahash_alg(alg),
3018 struct talitos_crypto_alg,
3021 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3024 return talitos_init_common(ctx, talitos_alg);
3027 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3029 struct aead_alg *alg = crypto_aead_alg(tfm);
3030 struct talitos_crypto_alg *talitos_alg;
3031 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3033 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3036 return talitos_init_common(ctx, talitos_alg);
3039 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3041 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3043 talitos_cra_init(tfm);
3046 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3047 sizeof(struct talitos_ahash_req_ctx));
3052 static void talitos_cra_exit(struct crypto_tfm *tfm)
3054 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3055 struct device *dev = ctx->dev;
3058 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3062 * given the alg's descriptor header template, determine whether descriptor
3063 * type and primary/secondary execution units required match the hw
3064 * capabilities description provided in the device tree node.
3066 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3068 struct talitos_private *priv = dev_get_drvdata(dev);
3071 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3072 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3074 if (SECONDARY_EU(desc_hdr_template))
3075 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3076 & priv->exec_units);
3081 static int talitos_remove(struct platform_device *ofdev)
3083 struct device *dev = &ofdev->dev;
3084 struct talitos_private *priv = dev_get_drvdata(dev);
3085 struct talitos_crypto_alg *t_alg, *n;
3088 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3089 switch (t_alg->algt.type) {
3090 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3092 case CRYPTO_ALG_TYPE_AEAD:
3093 crypto_unregister_aead(&t_alg->algt.alg.aead);
3094 case CRYPTO_ALG_TYPE_AHASH:
3095 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3098 list_del(&t_alg->entry);
3101 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3102 talitos_unregister_rng(dev);
3104 for (i = 0; i < 2; i++)
3106 free_irq(priv->irq[i], dev);
3107 irq_dispose_mapping(priv->irq[i]);
3110 tasklet_kill(&priv->done_task[0]);
3112 tasklet_kill(&priv->done_task[1]);
3117 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3118 struct talitos_alg_template
3121 struct talitos_private *priv = dev_get_drvdata(dev);
3122 struct talitos_crypto_alg *t_alg;
3123 struct crypto_alg *alg;
3125 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3128 return ERR_PTR(-ENOMEM);
3130 t_alg->algt = *template;
3132 switch (t_alg->algt.type) {
3133 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3134 alg = &t_alg->algt.alg.crypto;
3135 alg->cra_init = talitos_cra_init;
3136 alg->cra_exit = talitos_cra_exit;
3137 alg->cra_type = &crypto_ablkcipher_type;
3138 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3139 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3140 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3141 alg->cra_ablkcipher.geniv = "eseqiv";
3143 case CRYPTO_ALG_TYPE_AEAD:
3144 alg = &t_alg->algt.alg.aead.base;
3145 alg->cra_exit = talitos_cra_exit;
3146 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3147 t_alg->algt.alg.aead.setkey = aead_setkey;
3148 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3149 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3150 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3151 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3152 devm_kfree(dev, t_alg);
3153 return ERR_PTR(-ENOTSUPP);
3156 case CRYPTO_ALG_TYPE_AHASH:
3157 alg = &t_alg->algt.alg.hash.halg.base;
3158 alg->cra_init = talitos_cra_init_ahash;
3159 alg->cra_exit = talitos_cra_exit;
3160 t_alg->algt.alg.hash.init = ahash_init;
3161 t_alg->algt.alg.hash.update = ahash_update;
3162 t_alg->algt.alg.hash.final = ahash_final;
3163 t_alg->algt.alg.hash.finup = ahash_finup;
3164 t_alg->algt.alg.hash.digest = ahash_digest;
3165 if (!strncmp(alg->cra_name, "hmac", 4))
3166 t_alg->algt.alg.hash.setkey = ahash_setkey;
3167 t_alg->algt.alg.hash.import = ahash_import;
3168 t_alg->algt.alg.hash.export = ahash_export;
3170 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3171 !strncmp(alg->cra_name, "hmac", 4)) {
3172 devm_kfree(dev, t_alg);
3173 return ERR_PTR(-ENOTSUPP);
3175 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3176 (!strcmp(alg->cra_name, "sha224") ||
3177 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3178 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3179 t_alg->algt.desc_hdr_template =
3180 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3181 DESC_HDR_SEL0_MDEUA |
3182 DESC_HDR_MODE0_MDEU_SHA256;
3186 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3187 devm_kfree(dev, t_alg);
3188 return ERR_PTR(-EINVAL);
3191 alg->cra_module = THIS_MODULE;
3192 if (t_alg->algt.priority)
3193 alg->cra_priority = t_alg->algt.priority;
3195 alg->cra_priority = TALITOS_CRA_PRIORITY;
3196 if (has_ftr_sec1(priv))
3197 alg->cra_alignmask = 3;
3199 alg->cra_alignmask = 0;
3200 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3201 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3208 static int talitos_probe_irq(struct platform_device *ofdev)
3210 struct device *dev = &ofdev->dev;
3211 struct device_node *np = ofdev->dev.of_node;
3212 struct talitos_private *priv = dev_get_drvdata(dev);
3214 bool is_sec1 = has_ftr_sec1(priv);
3216 priv->irq[0] = irq_of_parse_and_map(np, 0);
3217 if (!priv->irq[0]) {
3218 dev_err(dev, "failed to map irq\n");
3222 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3223 dev_driver_string(dev), dev);
3227 priv->irq[1] = irq_of_parse_and_map(np, 1);
3229 /* get the primary irq line */
3230 if (!priv->irq[1]) {
3231 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3232 dev_driver_string(dev), dev);
3236 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3237 dev_driver_string(dev), dev);
3241 /* get the secondary irq line */
3242 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3243 dev_driver_string(dev), dev);
3245 dev_err(dev, "failed to request secondary irq\n");
3246 irq_dispose_mapping(priv->irq[1]);
3254 dev_err(dev, "failed to request primary irq\n");
3255 irq_dispose_mapping(priv->irq[0]);
3262 static int talitos_probe(struct platform_device *ofdev)
3264 struct device *dev = &ofdev->dev;
3265 struct device_node *np = ofdev->dev.of_node;
3266 struct talitos_private *priv;
3269 struct resource *res;
3271 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3275 INIT_LIST_HEAD(&priv->alg_list);
3277 dev_set_drvdata(dev, priv);
3279 priv->ofdev = ofdev;
3281 spin_lock_init(&priv->reg_lock);
3283 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3286 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3288 dev_err(dev, "failed to of_iomap\n");
3293 /* get SEC version capabilities from device tree */
3294 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3295 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3296 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3297 of_property_read_u32(np, "fsl,descriptor-types-mask",
3300 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3301 !priv->exec_units || !priv->desc_types) {
3302 dev_err(dev, "invalid property data in device tree node\n");
3307 if (of_device_is_compatible(np, "fsl,sec3.0"))
3308 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3310 if (of_device_is_compatible(np, "fsl,sec2.1"))
3311 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3312 TALITOS_FTR_SHA224_HWINIT |
3313 TALITOS_FTR_HMAC_OK;
3315 if (of_device_is_compatible(np, "fsl,sec1.0"))
3316 priv->features |= TALITOS_FTR_SEC1;
3318 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3319 priv->reg_deu = priv->reg + TALITOS12_DEU;
3320 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3321 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3322 stride = TALITOS1_CH_STRIDE;
3323 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3324 priv->reg_deu = priv->reg + TALITOS10_DEU;
3325 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3326 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3327 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3328 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3329 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3330 stride = TALITOS1_CH_STRIDE;
3332 priv->reg_deu = priv->reg + TALITOS2_DEU;
3333 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3334 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3335 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3336 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3337 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3338 priv->reg_keu = priv->reg + TALITOS2_KEU;
3339 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3340 stride = TALITOS2_CH_STRIDE;
3343 err = talitos_probe_irq(ofdev);
3347 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3348 if (priv->num_channels == 1)
3349 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3350 (unsigned long)dev);
3352 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3353 (unsigned long)dev);
3356 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3357 (unsigned long)dev);
3358 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3359 (unsigned long)dev);
3360 } else if (priv->num_channels == 1) {
3361 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3362 (unsigned long)dev);
3364 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3365 (unsigned long)dev);
3369 priv->chan = devm_kcalloc(dev,
3371 sizeof(struct talitos_channel),
3374 dev_err(dev, "failed to allocate channel management space\n");
3379 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3381 for (i = 0; i < priv->num_channels; i++) {
3382 priv->chan[i].reg = priv->reg + stride * (i + 1);
3383 if (!priv->irq[1] || !(i & 1))
3384 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3386 spin_lock_init(&priv->chan[i].head_lock);
3387 spin_lock_init(&priv->chan[i].tail_lock);
3389 priv->chan[i].fifo = devm_kcalloc(dev,
3391 sizeof(struct talitos_request),
3393 if (!priv->chan[i].fifo) {
3394 dev_err(dev, "failed to allocate request fifo %d\n", i);
3399 atomic_set(&priv->chan[i].submit_count,
3400 -(priv->chfifo_len - 1));
3403 dma_set_mask(dev, DMA_BIT_MASK(36));
3405 /* reset and initialize the h/w */
3406 err = init_device(dev);
3408 dev_err(dev, "failed to initialize device\n");
3412 /* register the RNG, if available */
3413 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3414 err = talitos_register_rng(dev);
3416 dev_err(dev, "failed to register hwrng: %d\n", err);
3419 dev_info(dev, "hwrng\n");
3422 /* register crypto algorithms the device supports */
3423 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3424 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3425 struct talitos_crypto_alg *t_alg;
3426 struct crypto_alg *alg = NULL;
3428 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3429 if (IS_ERR(t_alg)) {
3430 err = PTR_ERR(t_alg);
3431 if (err == -ENOTSUPP)
3436 switch (t_alg->algt.type) {
3437 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3438 err = crypto_register_alg(
3439 &t_alg->algt.alg.crypto);
3440 alg = &t_alg->algt.alg.crypto;
3443 case CRYPTO_ALG_TYPE_AEAD:
3444 err = crypto_register_aead(
3445 &t_alg->algt.alg.aead);
3446 alg = &t_alg->algt.alg.aead.base;
3449 case CRYPTO_ALG_TYPE_AHASH:
3450 err = crypto_register_ahash(
3451 &t_alg->algt.alg.hash);
3452 alg = &t_alg->algt.alg.hash.halg.base;
3456 dev_err(dev, "%s alg registration failed\n",
3457 alg->cra_driver_name);
3458 devm_kfree(dev, t_alg);
3460 list_add_tail(&t_alg->entry, &priv->alg_list);
3463 if (!list_empty(&priv->alg_list))
3464 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3465 (char *)of_get_property(np, "compatible", NULL));
3470 talitos_remove(ofdev);
3475 static const struct of_device_id talitos_match[] = {
3476 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3478 .compatible = "fsl,sec1.0",
3481 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3483 .compatible = "fsl,sec2.0",
3488 MODULE_DEVICE_TABLE(of, talitos_match);
3490 static struct platform_driver talitos_driver = {
3493 .of_match_table = talitos_match,
3495 .probe = talitos_probe,
3496 .remove = talitos_remove,
3499 module_platform_driver(talitos_driver);
3501 MODULE_LICENSE("GPL");
3502 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3503 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");