41b288bdcdbfe078cd3dc1e2d08fe0304fd3ef90
[platform/kernel/linux-rpi.git] / drivers / crypto / talitos.c
1 /*
2  * talitos - Freescale Integrated Security Engine (SEC) device driver
3  *
4  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5  *
6  * Scatterlist Crypto API glue code copied from files with the following:
7  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * Crypto algorithm registration code copied from hifn driver:
10  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11  * All rights reserved.
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26  */
27
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
43
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
55
56 #include "talitos.h"
57
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59                            unsigned int len, bool is_sec1)
60 {
61         ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62         if (is_sec1) {
63                 ptr->len1 = cpu_to_be16(len);
64         } else {
65                 ptr->len = cpu_to_be16(len);
66                 ptr->eptr = upper_32_bits(dma_addr);
67         }
68 }
69
70 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71                              struct talitos_ptr *src_ptr, bool is_sec1)
72 {
73         dst_ptr->ptr = src_ptr->ptr;
74         if (is_sec1) {
75                 dst_ptr->len1 = src_ptr->len1;
76         } else {
77                 dst_ptr->len = src_ptr->len;
78                 dst_ptr->eptr = src_ptr->eptr;
79         }
80 }
81
82 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
83                                            bool is_sec1)
84 {
85         if (is_sec1)
86                 return be16_to_cpu(ptr->len1);
87         else
88                 return be16_to_cpu(ptr->len);
89 }
90
91 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
92                                    bool is_sec1)
93 {
94         if (!is_sec1)
95                 ptr->j_extent = val;
96 }
97
98 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
99 {
100         if (!is_sec1)
101                 ptr->j_extent |= val;
102 }
103
104 /*
105  * map virtual single (contiguous) pointer to h/w descriptor pointer
106  */
107 static void __map_single_talitos_ptr(struct device *dev,
108                                      struct talitos_ptr *ptr,
109                                      unsigned int len, void *data,
110                                      enum dma_data_direction dir,
111                                      unsigned long attrs)
112 {
113         dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
114         struct talitos_private *priv = dev_get_drvdata(dev);
115         bool is_sec1 = has_ftr_sec1(priv);
116
117         to_talitos_ptr(ptr, dma_addr, len, is_sec1);
118 }
119
120 static void map_single_talitos_ptr(struct device *dev,
121                                    struct talitos_ptr *ptr,
122                                    unsigned int len, void *data,
123                                    enum dma_data_direction dir)
124 {
125         __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
126 }
127
128 static void map_single_talitos_ptr_nosync(struct device *dev,
129                                           struct talitos_ptr *ptr,
130                                           unsigned int len, void *data,
131                                           enum dma_data_direction dir)
132 {
133         __map_single_talitos_ptr(dev, ptr, len, data, dir,
134                                  DMA_ATTR_SKIP_CPU_SYNC);
135 }
136
137 /*
138  * unmap bus single (contiguous) h/w descriptor pointer
139  */
140 static void unmap_single_talitos_ptr(struct device *dev,
141                                      struct talitos_ptr *ptr,
142                                      enum dma_data_direction dir)
143 {
144         struct talitos_private *priv = dev_get_drvdata(dev);
145         bool is_sec1 = has_ftr_sec1(priv);
146
147         dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
148                          from_talitos_ptr_len(ptr, is_sec1), dir);
149 }
150
151 static int reset_channel(struct device *dev, int ch)
152 {
153         struct talitos_private *priv = dev_get_drvdata(dev);
154         unsigned int timeout = TALITOS_TIMEOUT;
155         bool is_sec1 = has_ftr_sec1(priv);
156
157         if (is_sec1) {
158                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
159                           TALITOS1_CCCR_LO_RESET);
160
161                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
162                         TALITOS1_CCCR_LO_RESET) && --timeout)
163                         cpu_relax();
164         } else {
165                 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
166                           TALITOS2_CCCR_RESET);
167
168                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
169                         TALITOS2_CCCR_RESET) && --timeout)
170                         cpu_relax();
171         }
172
173         if (timeout == 0) {
174                 dev_err(dev, "failed to reset channel %d\n", ch);
175                 return -EIO;
176         }
177
178         /* set 36-bit addressing, done writeback enable and done IRQ enable */
179         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
180                   TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
181         /* enable chaining descriptors */
182         if (is_sec1)
183                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
184                           TALITOS_CCCR_LO_NE);
185
186         /* and ICCR writeback, if available */
187         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
188                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
189                           TALITOS_CCCR_LO_IWSE);
190
191         return 0;
192 }
193
194 static int reset_device(struct device *dev)
195 {
196         struct talitos_private *priv = dev_get_drvdata(dev);
197         unsigned int timeout = TALITOS_TIMEOUT;
198         bool is_sec1 = has_ftr_sec1(priv);
199         u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
200
201         setbits32(priv->reg + TALITOS_MCR, mcr);
202
203         while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
204                && --timeout)
205                 cpu_relax();
206
207         if (priv->irq[1]) {
208                 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
209                 setbits32(priv->reg + TALITOS_MCR, mcr);
210         }
211
212         if (timeout == 0) {
213                 dev_err(dev, "failed to reset device\n");
214                 return -EIO;
215         }
216
217         return 0;
218 }
219
220 /*
221  * Reset and initialize the device
222  */
223 static int init_device(struct device *dev)
224 {
225         struct talitos_private *priv = dev_get_drvdata(dev);
226         int ch, err;
227         bool is_sec1 = has_ftr_sec1(priv);
228
229         /*
230          * Master reset
231          * errata documentation: warning: certain SEC interrupts
232          * are not fully cleared by writing the MCR:SWR bit,
233          * set bit twice to completely reset
234          */
235         err = reset_device(dev);
236         if (err)
237                 return err;
238
239         err = reset_device(dev);
240         if (err)
241                 return err;
242
243         /* reset channels */
244         for (ch = 0; ch < priv->num_channels; ch++) {
245                 err = reset_channel(dev, ch);
246                 if (err)
247                         return err;
248         }
249
250         /* enable channel done and error interrupts */
251         if (is_sec1) {
252                 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
253                 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
254                 /* disable parity error check in DEU (erroneous? test vect.) */
255                 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
256         } else {
257                 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
258                 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
259         }
260
261         /* disable integrity check error interrupts (use writeback instead) */
262         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
263                 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
264                           TALITOS_MDEUICR_LO_ICE);
265
266         return 0;
267 }
268
269 /**
270  * talitos_submit - submits a descriptor to the device for processing
271  * @dev:        the SEC device to be used
272  * @ch:         the SEC device channel to be used
273  * @desc:       the descriptor to be processed by the device
274  * @callback:   whom to call when processing is complete
275  * @context:    a handle for use by caller (optional)
276  *
277  * desc must contain valid dma-mapped (bus physical) address pointers.
278  * callback must check err and feedback in descriptor header
279  * for device processing status.
280  */
281 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
282                    void (*callback)(struct device *dev,
283                                     struct talitos_desc *desc,
284                                     void *context, int error),
285                    void *context)
286 {
287         struct talitos_private *priv = dev_get_drvdata(dev);
288         struct talitos_request *request;
289         unsigned long flags;
290         int head;
291         bool is_sec1 = has_ftr_sec1(priv);
292
293         spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
294
295         if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
296                 /* h/w fifo is full */
297                 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
298                 return -EAGAIN;
299         }
300
301         head = priv->chan[ch].head;
302         request = &priv->chan[ch].fifo[head];
303
304         /* map descriptor and save caller data */
305         if (is_sec1) {
306                 desc->hdr1 = desc->hdr;
307                 request->dma_desc = dma_map_single(dev, &desc->hdr1,
308                                                    TALITOS_DESC_SIZE,
309                                                    DMA_BIDIRECTIONAL);
310         } else {
311                 request->dma_desc = dma_map_single(dev, desc,
312                                                    TALITOS_DESC_SIZE,
313                                                    DMA_BIDIRECTIONAL);
314         }
315         request->callback = callback;
316         request->context = context;
317
318         /* increment fifo head */
319         priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
320
321         smp_wmb();
322         request->desc = desc;
323
324         /* GO! */
325         wmb();
326         out_be32(priv->chan[ch].reg + TALITOS_FF,
327                  upper_32_bits(request->dma_desc));
328         out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
329                  lower_32_bits(request->dma_desc));
330
331         spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
332
333         return -EINPROGRESS;
334 }
335 EXPORT_SYMBOL(talitos_submit);
336
337 static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
338 {
339         struct talitos_edesc *edesc;
340
341         if (!is_sec1)
342                 return request->desc->hdr;
343
344         if (!request->desc->next_desc)
345                 return request->desc->hdr1;
346
347         edesc = container_of(request->desc, struct talitos_edesc, desc);
348
349         return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
350 }
351
352 /*
353  * process what was done, notify callback of error if not
354  */
355 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
356 {
357         struct talitos_private *priv = dev_get_drvdata(dev);
358         struct talitos_request *request, saved_req;
359         unsigned long flags;
360         int tail, status;
361         bool is_sec1 = has_ftr_sec1(priv);
362
363         spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
364
365         tail = priv->chan[ch].tail;
366         while (priv->chan[ch].fifo[tail].desc) {
367                 __be32 hdr;
368
369                 request = &priv->chan[ch].fifo[tail];
370
371                 /* descriptors with their done bits set don't get the error */
372                 rmb();
373                 hdr = get_request_hdr(request, is_sec1);
374
375                 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
376                         status = 0;
377                 else
378                         if (!error)
379                                 break;
380                         else
381                                 status = error;
382
383                 dma_unmap_single(dev, request->dma_desc,
384                                  TALITOS_DESC_SIZE,
385                                  DMA_BIDIRECTIONAL);
386
387                 /* copy entries so we can call callback outside lock */
388                 saved_req.desc = request->desc;
389                 saved_req.callback = request->callback;
390                 saved_req.context = request->context;
391
392                 /* release request entry in fifo */
393                 smp_wmb();
394                 request->desc = NULL;
395
396                 /* increment fifo tail */
397                 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
398
399                 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
400
401                 atomic_dec(&priv->chan[ch].submit_count);
402
403                 saved_req.callback(dev, saved_req.desc, saved_req.context,
404                                    status);
405                 /* channel may resume processing in single desc error case */
406                 if (error && !reset_ch && status == error)
407                         return;
408                 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
409                 tail = priv->chan[ch].tail;
410         }
411
412         spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
413 }
414
415 /*
416  * process completed requests for channels that have done status
417  */
418 #define DEF_TALITOS1_DONE(name, ch_done_mask)                           \
419 static void talitos1_done_##name(unsigned long data)                    \
420 {                                                                       \
421         struct device *dev = (struct device *)data;                     \
422         struct talitos_private *priv = dev_get_drvdata(dev);            \
423         unsigned long flags;                                            \
424                                                                         \
425         if (ch_done_mask & 0x10000000)                                  \
426                 flush_channel(dev, 0, 0, 0);                    \
427         if (ch_done_mask & 0x40000000)                                  \
428                 flush_channel(dev, 1, 0, 0);                    \
429         if (ch_done_mask & 0x00010000)                                  \
430                 flush_channel(dev, 2, 0, 0);                    \
431         if (ch_done_mask & 0x00040000)                                  \
432                 flush_channel(dev, 3, 0, 0);                    \
433                                                                         \
434         /* At this point, all completed channels have been processed */ \
435         /* Unmask done interrupts for channels completed later on. */   \
436         spin_lock_irqsave(&priv->reg_lock, flags);                      \
437         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
438         clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);    \
439         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
440 }
441
442 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
443 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
444
445 #define DEF_TALITOS2_DONE(name, ch_done_mask)                           \
446 static void talitos2_done_##name(unsigned long data)                    \
447 {                                                                       \
448         struct device *dev = (struct device *)data;                     \
449         struct talitos_private *priv = dev_get_drvdata(dev);            \
450         unsigned long flags;                                            \
451                                                                         \
452         if (ch_done_mask & 1)                                           \
453                 flush_channel(dev, 0, 0, 0);                            \
454         if (ch_done_mask & (1 << 2))                                    \
455                 flush_channel(dev, 1, 0, 0);                            \
456         if (ch_done_mask & (1 << 4))                                    \
457                 flush_channel(dev, 2, 0, 0);                            \
458         if (ch_done_mask & (1 << 6))                                    \
459                 flush_channel(dev, 3, 0, 0);                            \
460                                                                         \
461         /* At this point, all completed channels have been processed */ \
462         /* Unmask done interrupts for channels completed later on. */   \
463         spin_lock_irqsave(&priv->reg_lock, flags);                      \
464         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
465         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);    \
466         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
467 }
468
469 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
470 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
471 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
472 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
473
474 /*
475  * locate current (offending) descriptor
476  */
477 static u32 current_desc_hdr(struct device *dev, int ch)
478 {
479         struct talitos_private *priv = dev_get_drvdata(dev);
480         int tail, iter;
481         dma_addr_t cur_desc;
482
483         cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
484         cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
485
486         if (!cur_desc) {
487                 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
488                 return 0;
489         }
490
491         tail = priv->chan[ch].tail;
492
493         iter = tail;
494         while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
495                priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
496                 iter = (iter + 1) & (priv->fifo_len - 1);
497                 if (iter == tail) {
498                         dev_err(dev, "couldn't locate current descriptor\n");
499                         return 0;
500                 }
501         }
502
503         if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
504                 struct talitos_edesc *edesc;
505
506                 edesc = container_of(priv->chan[ch].fifo[iter].desc,
507                                      struct talitos_edesc, desc);
508                 return ((struct talitos_desc *)
509                         (edesc->buf + edesc->dma_len))->hdr;
510         }
511
512         return priv->chan[ch].fifo[iter].desc->hdr;
513 }
514
515 /*
516  * user diagnostics; report root cause of error based on execution unit status
517  */
518 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
519 {
520         struct talitos_private *priv = dev_get_drvdata(dev);
521         int i;
522
523         if (!desc_hdr)
524                 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
525
526         switch (desc_hdr & DESC_HDR_SEL0_MASK) {
527         case DESC_HDR_SEL0_AFEU:
528                 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
529                         in_be32(priv->reg_afeu + TALITOS_EUISR),
530                         in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
531                 break;
532         case DESC_HDR_SEL0_DEU:
533                 dev_err(dev, "DEUISR 0x%08x_%08x\n",
534                         in_be32(priv->reg_deu + TALITOS_EUISR),
535                         in_be32(priv->reg_deu + TALITOS_EUISR_LO));
536                 break;
537         case DESC_HDR_SEL0_MDEUA:
538         case DESC_HDR_SEL0_MDEUB:
539                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
540                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
541                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
542                 break;
543         case DESC_HDR_SEL0_RNG:
544                 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
545                         in_be32(priv->reg_rngu + TALITOS_ISR),
546                         in_be32(priv->reg_rngu + TALITOS_ISR_LO));
547                 break;
548         case DESC_HDR_SEL0_PKEU:
549                 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
550                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
551                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
552                 break;
553         case DESC_HDR_SEL0_AESU:
554                 dev_err(dev, "AESUISR 0x%08x_%08x\n",
555                         in_be32(priv->reg_aesu + TALITOS_EUISR),
556                         in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
557                 break;
558         case DESC_HDR_SEL0_CRCU:
559                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
560                         in_be32(priv->reg_crcu + TALITOS_EUISR),
561                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
562                 break;
563         case DESC_HDR_SEL0_KEU:
564                 dev_err(dev, "KEUISR 0x%08x_%08x\n",
565                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
566                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
567                 break;
568         }
569
570         switch (desc_hdr & DESC_HDR_SEL1_MASK) {
571         case DESC_HDR_SEL1_MDEUA:
572         case DESC_HDR_SEL1_MDEUB:
573                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
574                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
575                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
576                 break;
577         case DESC_HDR_SEL1_CRCU:
578                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
579                         in_be32(priv->reg_crcu + TALITOS_EUISR),
580                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
581                 break;
582         }
583
584         for (i = 0; i < 8; i++)
585                 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
586                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
587                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
588 }
589
590 /*
591  * recover from error interrupts
592  */
593 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
594 {
595         struct talitos_private *priv = dev_get_drvdata(dev);
596         unsigned int timeout = TALITOS_TIMEOUT;
597         int ch, error, reset_dev = 0;
598         u32 v_lo;
599         bool is_sec1 = has_ftr_sec1(priv);
600         int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
601
602         for (ch = 0; ch < priv->num_channels; ch++) {
603                 /* skip channels without errors */
604                 if (is_sec1) {
605                         /* bits 29, 31, 17, 19 */
606                         if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
607                                 continue;
608                 } else {
609                         if (!(isr & (1 << (ch * 2 + 1))))
610                                 continue;
611                 }
612
613                 error = -EINVAL;
614
615                 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
616
617                 if (v_lo & TALITOS_CCPSR_LO_DOF) {
618                         dev_err(dev, "double fetch fifo overflow error\n");
619                         error = -EAGAIN;
620                         reset_ch = 1;
621                 }
622                 if (v_lo & TALITOS_CCPSR_LO_SOF) {
623                         /* h/w dropped descriptor */
624                         dev_err(dev, "single fetch fifo overflow error\n");
625                         error = -EAGAIN;
626                 }
627                 if (v_lo & TALITOS_CCPSR_LO_MDTE)
628                         dev_err(dev, "master data transfer error\n");
629                 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
630                         dev_err(dev, is_sec1 ? "pointer not complete error\n"
631                                              : "s/g data length zero error\n");
632                 if (v_lo & TALITOS_CCPSR_LO_FPZ)
633                         dev_err(dev, is_sec1 ? "parity error\n"
634                                              : "fetch pointer zero error\n");
635                 if (v_lo & TALITOS_CCPSR_LO_IDH)
636                         dev_err(dev, "illegal descriptor header error\n");
637                 if (v_lo & TALITOS_CCPSR_LO_IEU)
638                         dev_err(dev, is_sec1 ? "static assignment error\n"
639                                              : "invalid exec unit error\n");
640                 if (v_lo & TALITOS_CCPSR_LO_EU)
641                         report_eu_error(dev, ch, current_desc_hdr(dev, ch));
642                 if (!is_sec1) {
643                         if (v_lo & TALITOS_CCPSR_LO_GB)
644                                 dev_err(dev, "gather boundary error\n");
645                         if (v_lo & TALITOS_CCPSR_LO_GRL)
646                                 dev_err(dev, "gather return/length error\n");
647                         if (v_lo & TALITOS_CCPSR_LO_SB)
648                                 dev_err(dev, "scatter boundary error\n");
649                         if (v_lo & TALITOS_CCPSR_LO_SRL)
650                                 dev_err(dev, "scatter return/length error\n");
651                 }
652
653                 flush_channel(dev, ch, error, reset_ch);
654
655                 if (reset_ch) {
656                         reset_channel(dev, ch);
657                 } else {
658                         setbits32(priv->chan[ch].reg + TALITOS_CCCR,
659                                   TALITOS2_CCCR_CONT);
660                         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
661                         while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
662                                TALITOS2_CCCR_CONT) && --timeout)
663                                 cpu_relax();
664                         if (timeout == 0) {
665                                 dev_err(dev, "failed to restart channel %d\n",
666                                         ch);
667                                 reset_dev = 1;
668                         }
669                 }
670         }
671         if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
672             (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
673                 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
674                         dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
675                                 isr, isr_lo);
676                 else
677                         dev_err(dev, "done overflow, internal time out, or "
678                                 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
679
680                 /* purge request queues */
681                 for (ch = 0; ch < priv->num_channels; ch++)
682                         flush_channel(dev, ch, -EIO, 1);
683
684                 /* reset and reinitialize the device */
685                 init_device(dev);
686         }
687 }
688
689 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
690 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)              \
691 {                                                                              \
692         struct device *dev = data;                                             \
693         struct talitos_private *priv = dev_get_drvdata(dev);                   \
694         u32 isr, isr_lo;                                                       \
695         unsigned long flags;                                                   \
696                                                                                \
697         spin_lock_irqsave(&priv->reg_lock, flags);                             \
698         isr = in_be32(priv->reg + TALITOS_ISR);                                \
699         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
700         /* Acknowledge interrupt */                                            \
701         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
702         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
703                                                                                \
704         if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
705                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
706                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
707         }                                                                      \
708         else {                                                                 \
709                 if (likely(isr & ch_done_mask)) {                              \
710                         /* mask further done interrupts. */                    \
711                         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
712                         /* done_task will unmask done interrupts at exit */    \
713                         tasklet_schedule(&priv->done_task[tlet]);              \
714                 }                                                              \
715                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
716         }                                                                      \
717                                                                                \
718         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
719                                                                 IRQ_NONE;      \
720 }
721
722 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
723
724 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
725 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)              \
726 {                                                                              \
727         struct device *dev = data;                                             \
728         struct talitos_private *priv = dev_get_drvdata(dev);                   \
729         u32 isr, isr_lo;                                                       \
730         unsigned long flags;                                                   \
731                                                                                \
732         spin_lock_irqsave(&priv->reg_lock, flags);                             \
733         isr = in_be32(priv->reg + TALITOS_ISR);                                \
734         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
735         /* Acknowledge interrupt */                                            \
736         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
737         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
738                                                                                \
739         if (unlikely(isr & ch_err_mask || isr_lo)) {                           \
740                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
741                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
742         }                                                                      \
743         else {                                                                 \
744                 if (likely(isr & ch_done_mask)) {                              \
745                         /* mask further done interrupts. */                    \
746                         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
747                         /* done_task will unmask done interrupts at exit */    \
748                         tasklet_schedule(&priv->done_task[tlet]);              \
749                 }                                                              \
750                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
751         }                                                                      \
752                                                                                \
753         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
754                                                                 IRQ_NONE;      \
755 }
756
757 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
758 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
759                        0)
760 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
761                        1)
762
763 /*
764  * hwrng
765  */
766 static int talitos_rng_data_present(struct hwrng *rng, int wait)
767 {
768         struct device *dev = (struct device *)rng->priv;
769         struct talitos_private *priv = dev_get_drvdata(dev);
770         u32 ofl;
771         int i;
772
773         for (i = 0; i < 20; i++) {
774                 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
775                       TALITOS_RNGUSR_LO_OFL;
776                 if (ofl || !wait)
777                         break;
778                 udelay(10);
779         }
780
781         return !!ofl;
782 }
783
784 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
785 {
786         struct device *dev = (struct device *)rng->priv;
787         struct talitos_private *priv = dev_get_drvdata(dev);
788
789         /* rng fifo requires 64-bit accesses */
790         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
791         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
792
793         return sizeof(u32);
794 }
795
796 static int talitos_rng_init(struct hwrng *rng)
797 {
798         struct device *dev = (struct device *)rng->priv;
799         struct talitos_private *priv = dev_get_drvdata(dev);
800         unsigned int timeout = TALITOS_TIMEOUT;
801
802         setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
803         while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
804                  & TALITOS_RNGUSR_LO_RD)
805                && --timeout)
806                 cpu_relax();
807         if (timeout == 0) {
808                 dev_err(dev, "failed to reset rng hw\n");
809                 return -ENODEV;
810         }
811
812         /* start generating */
813         setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
814
815         return 0;
816 }
817
818 static int talitos_register_rng(struct device *dev)
819 {
820         struct talitos_private *priv = dev_get_drvdata(dev);
821         int err;
822
823         priv->rng.name          = dev_driver_string(dev),
824         priv->rng.init          = talitos_rng_init,
825         priv->rng.data_present  = talitos_rng_data_present,
826         priv->rng.data_read     = talitos_rng_data_read,
827         priv->rng.priv          = (unsigned long)dev;
828
829         err = hwrng_register(&priv->rng);
830         if (!err)
831                 priv->rng_registered = true;
832
833         return err;
834 }
835
836 static void talitos_unregister_rng(struct device *dev)
837 {
838         struct talitos_private *priv = dev_get_drvdata(dev);
839
840         if (!priv->rng_registered)
841                 return;
842
843         hwrng_unregister(&priv->rng);
844         priv->rng_registered = false;
845 }
846
847 /*
848  * crypto alg
849  */
850 #define TALITOS_CRA_PRIORITY            3000
851 /*
852  * Defines a priority for doing AEAD with descriptors type
853  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
854  */
855 #define TALITOS_CRA_PRIORITY_AEAD_HSNA  (TALITOS_CRA_PRIORITY - 1)
856 #define TALITOS_MAX_KEY_SIZE            (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
857 #define TALITOS_MAX_IV_LENGTH           16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
858
859 struct talitos_ctx {
860         struct device *dev;
861         int ch;
862         __be32 desc_hdr_template;
863         u8 key[TALITOS_MAX_KEY_SIZE];
864         u8 iv[TALITOS_MAX_IV_LENGTH];
865         dma_addr_t dma_key;
866         unsigned int keylen;
867         unsigned int enckeylen;
868         unsigned int authkeylen;
869 };
870
871 #define HASH_MAX_BLOCK_SIZE             SHA512_BLOCK_SIZE
872 #define TALITOS_MDEU_MAX_CONTEXT_SIZE   TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
873
874 struct talitos_ahash_req_ctx {
875         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
876         unsigned int hw_context_size;
877         u8 buf[2][HASH_MAX_BLOCK_SIZE];
878         int buf_idx;
879         unsigned int swinit;
880         unsigned int first;
881         unsigned int last;
882         unsigned int to_hash_later;
883         unsigned int nbuf;
884         struct scatterlist bufsl[2];
885         struct scatterlist *psrc;
886 };
887
888 struct talitos_export_state {
889         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
890         u8 buf[HASH_MAX_BLOCK_SIZE];
891         unsigned int swinit;
892         unsigned int first;
893         unsigned int last;
894         unsigned int to_hash_later;
895         unsigned int nbuf;
896 };
897
898 static int aead_setkey(struct crypto_aead *authenc,
899                        const u8 *key, unsigned int keylen)
900 {
901         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
902         struct device *dev = ctx->dev;
903         struct crypto_authenc_keys keys;
904
905         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
906                 goto badkey;
907
908         if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
909                 goto badkey;
910
911         if (ctx->keylen)
912                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
913
914         memcpy(ctx->key, keys.authkey, keys.authkeylen);
915         memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
916
917         ctx->keylen = keys.authkeylen + keys.enckeylen;
918         ctx->enckeylen = keys.enckeylen;
919         ctx->authkeylen = keys.authkeylen;
920         ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
921                                       DMA_TO_DEVICE);
922
923         memzero_explicit(&keys, sizeof(keys));
924         return 0;
925
926 badkey:
927         crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
928         memzero_explicit(&keys, sizeof(keys));
929         return -EINVAL;
930 }
931
932 static void talitos_sg_unmap(struct device *dev,
933                              struct talitos_edesc *edesc,
934                              struct scatterlist *src,
935                              struct scatterlist *dst,
936                              unsigned int len, unsigned int offset)
937 {
938         struct talitos_private *priv = dev_get_drvdata(dev);
939         bool is_sec1 = has_ftr_sec1(priv);
940         unsigned int src_nents = edesc->src_nents ? : 1;
941         unsigned int dst_nents = edesc->dst_nents ? : 1;
942
943         if (is_sec1 && dst && dst_nents > 1) {
944                 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
945                                            len, DMA_FROM_DEVICE);
946                 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
947                                      offset);
948         }
949         if (src != dst) {
950                 if (src_nents == 1 || !is_sec1)
951                         dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
952
953                 if (dst && (dst_nents == 1 || !is_sec1))
954                         dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
955         } else if (src_nents == 1 || !is_sec1) {
956                 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
957         }
958 }
959
960 static void ipsec_esp_unmap(struct device *dev,
961                             struct talitos_edesc *edesc,
962                             struct aead_request *areq)
963 {
964         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
965         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
966         unsigned int ivsize = crypto_aead_ivsize(aead);
967         bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
968         struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
969
970         if (is_ipsec_esp)
971                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
972                                          DMA_FROM_DEVICE);
973         unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
974
975         talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
976                          areq->assoclen);
977
978         if (edesc->dma_len)
979                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
980                                  DMA_BIDIRECTIONAL);
981
982         if (!is_ipsec_esp) {
983                 unsigned int dst_nents = edesc->dst_nents ? : 1;
984
985                 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
986                                    areq->assoclen + areq->cryptlen - ivsize);
987         }
988 }
989
990 /*
991  * ipsec_esp descriptor callbacks
992  */
993 static void ipsec_esp_encrypt_done(struct device *dev,
994                                    struct talitos_desc *desc, void *context,
995                                    int err)
996 {
997         struct talitos_private *priv = dev_get_drvdata(dev);
998         bool is_sec1 = has_ftr_sec1(priv);
999         struct aead_request *areq = context;
1000         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1001         unsigned int authsize = crypto_aead_authsize(authenc);
1002         unsigned int ivsize = crypto_aead_ivsize(authenc);
1003         struct talitos_edesc *edesc;
1004         void *icvdata;
1005
1006         edesc = container_of(desc, struct talitos_edesc, desc);
1007
1008         ipsec_esp_unmap(dev, edesc, areq);
1009
1010         /* copy the generated ICV to dst */
1011         if (edesc->icv_ool) {
1012                 if (is_sec1)
1013                         icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1014                 else
1015                         icvdata = &edesc->link_tbl[edesc->src_nents +
1016                                                    edesc->dst_nents + 2];
1017                 sg_pcopy_from_buffer(areq->dst, edesc->dst_nents ? : 1, icvdata,
1018                                      authsize, areq->assoclen + areq->cryptlen);
1019         }
1020
1021         dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1022
1023         kfree(edesc);
1024
1025         aead_request_complete(areq, err);
1026 }
1027
1028 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1029                                           struct talitos_desc *desc,
1030                                           void *context, int err)
1031 {
1032         struct aead_request *req = context;
1033         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1034         unsigned int authsize = crypto_aead_authsize(authenc);
1035         struct talitos_edesc *edesc;
1036         char *oicv, *icv;
1037         struct talitos_private *priv = dev_get_drvdata(dev);
1038         bool is_sec1 = has_ftr_sec1(priv);
1039
1040         edesc = container_of(desc, struct talitos_edesc, desc);
1041
1042         ipsec_esp_unmap(dev, edesc, req);
1043
1044         if (!err) {
1045                 char icvdata[SHA512_DIGEST_SIZE];
1046                 int nents = edesc->dst_nents ? : 1;
1047                 unsigned int len = req->assoclen + req->cryptlen;
1048
1049                 /* auth check */
1050                 if (nents > 1) {
1051                         sg_pcopy_to_buffer(req->dst, nents, icvdata, authsize,
1052                                            len - authsize);
1053                         icv = icvdata;
1054                 } else {
1055                         icv = (char *)sg_virt(req->dst) + len - authsize;
1056                 }
1057
1058                 if (edesc->dma_len) {
1059                         if (is_sec1)
1060                                 oicv = (char *)&edesc->dma_link_tbl +
1061                                                req->assoclen + req->cryptlen;
1062                         else
1063                                 oicv = (char *)
1064                                        &edesc->link_tbl[edesc->src_nents +
1065                                                         edesc->dst_nents + 2];
1066                         if (edesc->icv_ool)
1067                                 icv = oicv + authsize;
1068                 } else
1069                         oicv = (char *)&edesc->link_tbl[0];
1070
1071                 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1072         }
1073
1074         kfree(edesc);
1075
1076         aead_request_complete(req, err);
1077 }
1078
1079 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1080                                           struct talitos_desc *desc,
1081                                           void *context, int err)
1082 {
1083         struct aead_request *req = context;
1084         struct talitos_edesc *edesc;
1085
1086         edesc = container_of(desc, struct talitos_edesc, desc);
1087
1088         ipsec_esp_unmap(dev, edesc, req);
1089
1090         /* check ICV auth status */
1091         if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1092                      DESC_HDR_LO_ICCR1_PASS))
1093                 err = -EBADMSG;
1094
1095         kfree(edesc);
1096
1097         aead_request_complete(req, err);
1098 }
1099
1100 /*
1101  * convert scatterlist to SEC h/w link table format
1102  * stop at cryptlen bytes
1103  */
1104 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1105                                  unsigned int offset, int cryptlen,
1106                                  struct talitos_ptr *link_tbl_ptr)
1107 {
1108         int n_sg = sg_count;
1109         int count = 0;
1110
1111         while (cryptlen && sg && n_sg--) {
1112                 unsigned int len = sg_dma_len(sg);
1113
1114                 if (offset >= len) {
1115                         offset -= len;
1116                         goto next;
1117                 }
1118
1119                 len -= offset;
1120
1121                 if (len > cryptlen)
1122                         len = cryptlen;
1123
1124                 to_talitos_ptr(link_tbl_ptr + count,
1125                                sg_dma_address(sg) + offset, len, 0);
1126                 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1127                 count++;
1128                 cryptlen -= len;
1129                 offset = 0;
1130
1131 next:
1132                 sg = sg_next(sg);
1133         }
1134
1135         /* tag end of link table */
1136         if (count > 0)
1137                 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1138                                        DESC_PTR_LNKTBL_RETURN, 0);
1139
1140         return count;
1141 }
1142
1143 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1144                               unsigned int len, struct talitos_edesc *edesc,
1145                               struct talitos_ptr *ptr, int sg_count,
1146                               unsigned int offset, int tbl_off, int elen)
1147 {
1148         struct talitos_private *priv = dev_get_drvdata(dev);
1149         bool is_sec1 = has_ftr_sec1(priv);
1150
1151         if (!src) {
1152                 to_talitos_ptr(ptr, 0, 0, is_sec1);
1153                 return 1;
1154         }
1155         to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1156         if (sg_count == 1) {
1157                 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1158                 return sg_count;
1159         }
1160         if (is_sec1) {
1161                 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1162                 return sg_count;
1163         }
1164         sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1165                                          &edesc->link_tbl[tbl_off]);
1166         if (sg_count == 1) {
1167                 /* Only one segment now, so no link tbl needed*/
1168                 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1169                 return sg_count;
1170         }
1171         to_talitos_ptr(ptr, edesc->dma_link_tbl +
1172                             tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1173         to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1174
1175         return sg_count;
1176 }
1177
1178 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1179                           unsigned int len, struct talitos_edesc *edesc,
1180                           struct talitos_ptr *ptr, int sg_count,
1181                           unsigned int offset, int tbl_off)
1182 {
1183         return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1184                                   tbl_off, 0);
1185 }
1186
1187 /*
1188  * fill in and submit ipsec_esp descriptor
1189  */
1190 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1191                      void (*callback)(struct device *dev,
1192                                       struct talitos_desc *desc,
1193                                       void *context, int error))
1194 {
1195         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1196         unsigned int authsize = crypto_aead_authsize(aead);
1197         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1198         struct device *dev = ctx->dev;
1199         struct talitos_desc *desc = &edesc->desc;
1200         unsigned int cryptlen = areq->cryptlen;
1201         unsigned int ivsize = crypto_aead_ivsize(aead);
1202         int tbl_off = 0;
1203         int sg_count, ret;
1204         int elen = 0;
1205         bool sync_needed = false;
1206         struct talitos_private *priv = dev_get_drvdata(dev);
1207         bool is_sec1 = has_ftr_sec1(priv);
1208         bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1209         struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1210         struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1211
1212         /* hmac key */
1213         to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1214
1215         sg_count = edesc->src_nents ?: 1;
1216         if (is_sec1 && sg_count > 1)
1217                 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1218                                   areq->assoclen + cryptlen);
1219         else
1220                 sg_count = dma_map_sg(dev, areq->src, sg_count,
1221                                       (areq->src == areq->dst) ?
1222                                       DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1223
1224         /* hmac data */
1225         ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1226                              &desc->ptr[1], sg_count, 0, tbl_off);
1227
1228         if (ret > 1) {
1229                 tbl_off += ret;
1230                 sync_needed = true;
1231         }
1232
1233         /* cipher iv */
1234         to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1235
1236         /* cipher key */
1237         to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1238                        ctx->enckeylen, is_sec1);
1239
1240         /*
1241          * cipher in
1242          * map and adjust cipher len to aead request cryptlen.
1243          * extent is bytes of HMAC postpended to ciphertext,
1244          * typically 12 for ipsec
1245          */
1246         if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1247                 elen = authsize;
1248
1249         ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1250                                  sg_count, areq->assoclen, tbl_off, elen);
1251
1252         if (ret > 1) {
1253                 tbl_off += ret;
1254                 sync_needed = true;
1255         }
1256
1257         /* cipher out */
1258         if (areq->src != areq->dst) {
1259                 sg_count = edesc->dst_nents ? : 1;
1260                 if (!is_sec1 || sg_count == 1)
1261                         dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1262         }
1263
1264         ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1265                              sg_count, areq->assoclen, tbl_off);
1266
1267         if (is_ipsec_esp)
1268                 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1269
1270         /* ICV data */
1271         if (ret > 1) {
1272                 tbl_off += ret;
1273                 edesc->icv_ool = true;
1274                 sync_needed = true;
1275
1276                 if (is_ipsec_esp) {
1277                         struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1278                         int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1279                                      sizeof(struct talitos_ptr) + authsize;
1280
1281                         /* Add an entry to the link table for ICV data */
1282                         to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1283                         to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1284                                                is_sec1);
1285
1286                         /* icv data follows link tables */
1287                         to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1288                                        authsize, is_sec1);
1289                 } else {
1290                         dma_addr_t addr = edesc->dma_link_tbl;
1291
1292                         if (is_sec1)
1293                                 addr += areq->assoclen + cryptlen;
1294                         else
1295                                 addr += sizeof(struct talitos_ptr) * tbl_off;
1296
1297                         to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1298                 }
1299         } else if (!is_ipsec_esp) {
1300                 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1301                                      &desc->ptr[6], sg_count, areq->assoclen +
1302                                                               cryptlen,
1303                                      tbl_off);
1304                 if (ret > 1) {
1305                         tbl_off += ret;
1306                         edesc->icv_ool = true;
1307                         sync_needed = true;
1308                 } else {
1309                         edesc->icv_ool = false;
1310                 }
1311         } else {
1312                 edesc->icv_ool = false;
1313         }
1314
1315         /* iv out */
1316         if (is_ipsec_esp)
1317                 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1318                                        DMA_FROM_DEVICE);
1319
1320         if (sync_needed)
1321                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1322                                            edesc->dma_len,
1323                                            DMA_BIDIRECTIONAL);
1324
1325         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1326         if (ret != -EINPROGRESS) {
1327                 ipsec_esp_unmap(dev, edesc, areq);
1328                 kfree(edesc);
1329         }
1330         return ret;
1331 }
1332
1333 /*
1334  * allocate and map the extended descriptor
1335  */
1336 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1337                                                  struct scatterlist *src,
1338                                                  struct scatterlist *dst,
1339                                                  u8 *iv,
1340                                                  unsigned int assoclen,
1341                                                  unsigned int cryptlen,
1342                                                  unsigned int authsize,
1343                                                  unsigned int ivsize,
1344                                                  int icv_stashing,
1345                                                  u32 cryptoflags,
1346                                                  bool encrypt)
1347 {
1348         struct talitos_edesc *edesc;
1349         int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1350         dma_addr_t iv_dma = 0;
1351         gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1352                       GFP_ATOMIC;
1353         struct talitos_private *priv = dev_get_drvdata(dev);
1354         bool is_sec1 = has_ftr_sec1(priv);
1355         int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1356
1357         if (cryptlen + authsize > max_len) {
1358                 dev_err(dev, "length exceeds h/w max limit\n");
1359                 return ERR_PTR(-EINVAL);
1360         }
1361
1362         if (!dst || dst == src) {
1363                 src_len = assoclen + cryptlen + authsize;
1364                 src_nents = sg_nents_for_len(src, src_len);
1365                 if (src_nents < 0) {
1366                         dev_err(dev, "Invalid number of src SG.\n");
1367                         return ERR_PTR(-EINVAL);
1368                 }
1369                 src_nents = (src_nents == 1) ? 0 : src_nents;
1370                 dst_nents = dst ? src_nents : 0;
1371                 dst_len = 0;
1372         } else { /* dst && dst != src*/
1373                 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1374                 src_nents = sg_nents_for_len(src, src_len);
1375                 if (src_nents < 0) {
1376                         dev_err(dev, "Invalid number of src SG.\n");
1377                         return ERR_PTR(-EINVAL);
1378                 }
1379                 src_nents = (src_nents == 1) ? 0 : src_nents;
1380                 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1381                 dst_nents = sg_nents_for_len(dst, dst_len);
1382                 if (dst_nents < 0) {
1383                         dev_err(dev, "Invalid number of dst SG.\n");
1384                         return ERR_PTR(-EINVAL);
1385                 }
1386                 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1387         }
1388
1389         /*
1390          * allocate space for base edesc plus the link tables,
1391          * allowing for two separate entries for AD and generated ICV (+ 2),
1392          * and space for two sets of ICVs (stashed and generated)
1393          */
1394         alloc_len = sizeof(struct talitos_edesc);
1395         if (src_nents || dst_nents) {
1396                 if (is_sec1)
1397                         dma_len = (src_nents ? src_len : 0) +
1398                                   (dst_nents ? dst_len : 0);
1399                 else
1400                         dma_len = (src_nents + dst_nents + 2) *
1401                                   sizeof(struct talitos_ptr) + authsize * 2;
1402                 alloc_len += dma_len;
1403         } else {
1404                 dma_len = 0;
1405                 alloc_len += icv_stashing ? authsize : 0;
1406         }
1407
1408         /* if its a ahash, add space for a second desc next to the first one */
1409         if (is_sec1 && !dst)
1410                 alloc_len += sizeof(struct talitos_desc);
1411         alloc_len += ivsize;
1412
1413         edesc = kmalloc(alloc_len, GFP_DMA | flags);
1414         if (!edesc)
1415                 return ERR_PTR(-ENOMEM);
1416         if (ivsize) {
1417                 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1418                 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1419         }
1420         memset(&edesc->desc, 0, sizeof(edesc->desc));
1421
1422         edesc->src_nents = src_nents;
1423         edesc->dst_nents = dst_nents;
1424         edesc->iv_dma = iv_dma;
1425         edesc->dma_len = dma_len;
1426         if (dma_len)
1427                 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1428                                                      edesc->dma_len,
1429                                                      DMA_BIDIRECTIONAL);
1430
1431         return edesc;
1432 }
1433
1434 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1435                                               int icv_stashing, bool encrypt)
1436 {
1437         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1438         unsigned int authsize = crypto_aead_authsize(authenc);
1439         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1440         unsigned int ivsize = crypto_aead_ivsize(authenc);
1441
1442         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1443                                    iv, areq->assoclen, areq->cryptlen,
1444                                    authsize, ivsize, icv_stashing,
1445                                    areq->base.flags, encrypt);
1446 }
1447
1448 static int aead_encrypt(struct aead_request *req)
1449 {
1450         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1451         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1452         struct talitos_edesc *edesc;
1453
1454         /* allocate extended descriptor */
1455         edesc = aead_edesc_alloc(req, req->iv, 0, true);
1456         if (IS_ERR(edesc))
1457                 return PTR_ERR(edesc);
1458
1459         /* set encrypt */
1460         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1461
1462         return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1463 }
1464
1465 static int aead_decrypt(struct aead_request *req)
1466 {
1467         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1468         unsigned int authsize = crypto_aead_authsize(authenc);
1469         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1470         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1471         struct talitos_edesc *edesc;
1472         void *icvdata;
1473
1474         req->cryptlen -= authsize;
1475
1476         /* allocate extended descriptor */
1477         edesc = aead_edesc_alloc(req, req->iv, 1, false);
1478         if (IS_ERR(edesc))
1479                 return PTR_ERR(edesc);
1480
1481         if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1482             ((!edesc->src_nents && !edesc->dst_nents) ||
1483              priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1484
1485                 /* decrypt and check the ICV */
1486                 edesc->desc.hdr = ctx->desc_hdr_template |
1487                                   DESC_HDR_DIR_INBOUND |
1488                                   DESC_HDR_MODE1_MDEU_CICV;
1489
1490                 /* reset integrity check result bits */
1491
1492                 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1493         }
1494
1495         /* Have to check the ICV with software */
1496         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1497
1498         /* stash incoming ICV for later cmp with ICV generated by the h/w */
1499         if (edesc->dma_len)
1500                 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1501                                                    edesc->dst_nents + 2];
1502         else
1503                 icvdata = &edesc->link_tbl[0];
1504
1505         sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1506                            req->assoclen + req->cryptlen - authsize);
1507
1508         return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1509 }
1510
1511 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1512                              const u8 *key, unsigned int keylen)
1513 {
1514         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1515         struct device *dev = ctx->dev;
1516         u32 tmp[DES_EXPKEY_WORDS];
1517
1518         if (keylen > TALITOS_MAX_KEY_SIZE) {
1519                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1520                 return -EINVAL;
1521         }
1522
1523         if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1524                      CRYPTO_TFM_REQ_WEAK_KEY) &&
1525             !des_ekey(tmp, key)) {
1526                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1527                 return -EINVAL;
1528         }
1529
1530         if (ctx->keylen)
1531                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1532
1533         memcpy(&ctx->key, key, keylen);
1534         ctx->keylen = keylen;
1535
1536         ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1537
1538         return 0;
1539 }
1540
1541 static void common_nonsnoop_unmap(struct device *dev,
1542                                   struct talitos_edesc *edesc,
1543                                   struct ablkcipher_request *areq)
1544 {
1545         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1546
1547         talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1548         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1549
1550         if (edesc->dma_len)
1551                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1552                                  DMA_BIDIRECTIONAL);
1553 }
1554
1555 static void ablkcipher_done(struct device *dev,
1556                             struct talitos_desc *desc, void *context,
1557                             int err)
1558 {
1559         struct ablkcipher_request *areq = context;
1560         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1561         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1562         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1563         struct talitos_edesc *edesc;
1564
1565         edesc = container_of(desc, struct talitos_edesc, desc);
1566
1567         common_nonsnoop_unmap(dev, edesc, areq);
1568         memcpy(areq->info, ctx->iv, ivsize);
1569
1570         kfree(edesc);
1571
1572         areq->base.complete(&areq->base, err);
1573 }
1574
1575 static int common_nonsnoop(struct talitos_edesc *edesc,
1576                            struct ablkcipher_request *areq,
1577                            void (*callback) (struct device *dev,
1578                                              struct talitos_desc *desc,
1579                                              void *context, int error))
1580 {
1581         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1582         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1583         struct device *dev = ctx->dev;
1584         struct talitos_desc *desc = &edesc->desc;
1585         unsigned int cryptlen = areq->nbytes;
1586         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1587         int sg_count, ret;
1588         bool sync_needed = false;
1589         struct talitos_private *priv = dev_get_drvdata(dev);
1590         bool is_sec1 = has_ftr_sec1(priv);
1591
1592         /* first DWORD empty */
1593
1594         /* cipher iv */
1595         to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1596
1597         /* cipher key */
1598         to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1599
1600         sg_count = edesc->src_nents ?: 1;
1601         if (is_sec1 && sg_count > 1)
1602                 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1603                                   cryptlen);
1604         else
1605                 sg_count = dma_map_sg(dev, areq->src, sg_count,
1606                                       (areq->src == areq->dst) ?
1607                                       DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1608         /*
1609          * cipher in
1610          */
1611         sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1612                                   &desc->ptr[3], sg_count, 0, 0);
1613         if (sg_count > 1)
1614                 sync_needed = true;
1615
1616         /* cipher out */
1617         if (areq->src != areq->dst) {
1618                 sg_count = edesc->dst_nents ? : 1;
1619                 if (!is_sec1 || sg_count == 1)
1620                         dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1621         }
1622
1623         ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1624                              sg_count, 0, (edesc->src_nents + 1));
1625         if (ret > 1)
1626                 sync_needed = true;
1627
1628         /* iv out */
1629         map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1630                                DMA_FROM_DEVICE);
1631
1632         /* last DWORD empty */
1633
1634         if (sync_needed)
1635                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1636                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1637
1638         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1639         if (ret != -EINPROGRESS) {
1640                 common_nonsnoop_unmap(dev, edesc, areq);
1641                 kfree(edesc);
1642         }
1643         return ret;
1644 }
1645
1646 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1647                                                     areq, bool encrypt)
1648 {
1649         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1650         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1651         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1652
1653         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1654                                    areq->info, 0, areq->nbytes, 0, ivsize, 0,
1655                                    areq->base.flags, encrypt);
1656 }
1657
1658 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1659 {
1660         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1661         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1662         struct talitos_edesc *edesc;
1663
1664         /* allocate extended descriptor */
1665         edesc = ablkcipher_edesc_alloc(areq, true);
1666         if (IS_ERR(edesc))
1667                 return PTR_ERR(edesc);
1668
1669         /* set encrypt */
1670         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1671
1672         return common_nonsnoop(edesc, areq, ablkcipher_done);
1673 }
1674
1675 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1676 {
1677         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1678         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1679         struct talitos_edesc *edesc;
1680
1681         /* allocate extended descriptor */
1682         edesc = ablkcipher_edesc_alloc(areq, false);
1683         if (IS_ERR(edesc))
1684                 return PTR_ERR(edesc);
1685
1686         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1687
1688         return common_nonsnoop(edesc, areq, ablkcipher_done);
1689 }
1690
1691 static void common_nonsnoop_hash_unmap(struct device *dev,
1692                                        struct talitos_edesc *edesc,
1693                                        struct ahash_request *areq)
1694 {
1695         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1696         struct talitos_private *priv = dev_get_drvdata(dev);
1697         bool is_sec1 = has_ftr_sec1(priv);
1698         struct talitos_desc *desc = &edesc->desc;
1699         struct talitos_desc *desc2 = (struct talitos_desc *)
1700                                      (edesc->buf + edesc->dma_len);
1701
1702         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1703         if (desc->next_desc &&
1704             desc->ptr[5].ptr != desc2->ptr[5].ptr)
1705                 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1706
1707         if (req_ctx->psrc)
1708                 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1709
1710         /* When using hashctx-in, must unmap it. */
1711         if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1712                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1713                                          DMA_TO_DEVICE);
1714         else if (desc->next_desc)
1715                 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1716                                          DMA_TO_DEVICE);
1717
1718         if (is_sec1 && req_ctx->nbuf)
1719                 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1720                                          DMA_TO_DEVICE);
1721
1722         if (edesc->dma_len)
1723                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1724                                  DMA_BIDIRECTIONAL);
1725
1726         if (edesc->desc.next_desc)
1727                 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1728                                  TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1729 }
1730
1731 static void ahash_done(struct device *dev,
1732                        struct talitos_desc *desc, void *context,
1733                        int err)
1734 {
1735         struct ahash_request *areq = context;
1736         struct talitos_edesc *edesc =
1737                  container_of(desc, struct talitos_edesc, desc);
1738         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1739
1740         if (!req_ctx->last && req_ctx->to_hash_later) {
1741                 /* Position any partial block for next update/final/finup */
1742                 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1743                 req_ctx->nbuf = req_ctx->to_hash_later;
1744         }
1745         common_nonsnoop_hash_unmap(dev, edesc, areq);
1746
1747         kfree(edesc);
1748
1749         areq->base.complete(&areq->base, err);
1750 }
1751
1752 /*
1753  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1754  * ourself and submit a padded block
1755  */
1756 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1757                                struct talitos_edesc *edesc,
1758                                struct talitos_ptr *ptr)
1759 {
1760         static u8 padded_hash[64] = {
1761                 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1762                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1763                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1764                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1765         };
1766
1767         pr_err_once("Bug in SEC1, padding ourself\n");
1768         edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1769         map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1770                                (char *)padded_hash, DMA_TO_DEVICE);
1771 }
1772
1773 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1774                                 struct ahash_request *areq, unsigned int length,
1775                                 void (*callback) (struct device *dev,
1776                                                   struct talitos_desc *desc,
1777                                                   void *context, int error))
1778 {
1779         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1780         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1781         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1782         struct device *dev = ctx->dev;
1783         struct talitos_desc *desc = &edesc->desc;
1784         int ret;
1785         bool sync_needed = false;
1786         struct talitos_private *priv = dev_get_drvdata(dev);
1787         bool is_sec1 = has_ftr_sec1(priv);
1788         int sg_count;
1789
1790         /* first DWORD empty */
1791
1792         /* hash context in */
1793         if (!req_ctx->first || req_ctx->swinit) {
1794                 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1795                                               req_ctx->hw_context_size,
1796                                               req_ctx->hw_context,
1797                                               DMA_TO_DEVICE);
1798                 req_ctx->swinit = 0;
1799         }
1800         /* Indicate next op is not the first. */
1801         req_ctx->first = 0;
1802
1803         /* HMAC key */
1804         if (ctx->keylen)
1805                 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1806                                is_sec1);
1807
1808         if (is_sec1 && req_ctx->nbuf)
1809                 length -= req_ctx->nbuf;
1810
1811         sg_count = edesc->src_nents ?: 1;
1812         if (is_sec1 && sg_count > 1)
1813                 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1814         else if (length)
1815                 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1816                                       DMA_TO_DEVICE);
1817         /*
1818          * data in
1819          */
1820         if (is_sec1 && req_ctx->nbuf) {
1821                 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1822                                        req_ctx->buf[req_ctx->buf_idx],
1823                                        DMA_TO_DEVICE);
1824         } else {
1825                 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1826                                           &desc->ptr[3], sg_count, 0, 0);
1827                 if (sg_count > 1)
1828                         sync_needed = true;
1829         }
1830
1831         /* fifth DWORD empty */
1832
1833         /* hash/HMAC out -or- hash context out */
1834         if (req_ctx->last)
1835                 map_single_talitos_ptr(dev, &desc->ptr[5],
1836                                        crypto_ahash_digestsize(tfm),
1837                                        areq->result, DMA_FROM_DEVICE);
1838         else
1839                 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1840                                               req_ctx->hw_context_size,
1841                                               req_ctx->hw_context,
1842                                               DMA_FROM_DEVICE);
1843
1844         /* last DWORD empty */
1845
1846         if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1847                 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1848
1849         if (is_sec1 && req_ctx->nbuf && length) {
1850                 struct talitos_desc *desc2 = (struct talitos_desc *)
1851                                              (edesc->buf + edesc->dma_len);
1852                 dma_addr_t next_desc;
1853
1854                 memset(desc2, 0, sizeof(*desc2));
1855                 desc2->hdr = desc->hdr;
1856                 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1857                 desc2->hdr1 = desc2->hdr;
1858                 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1859                 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1860                 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1861
1862                 if (desc->ptr[1].ptr)
1863                         copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1864                                          is_sec1);
1865                 else
1866                         map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1867                                                       req_ctx->hw_context_size,
1868                                                       req_ctx->hw_context,
1869                                                       DMA_TO_DEVICE);
1870                 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1871                 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1872                                           &desc2->ptr[3], sg_count, 0, 0);
1873                 if (sg_count > 1)
1874                         sync_needed = true;
1875                 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1876                 if (req_ctx->last)
1877                         map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1878                                                       req_ctx->hw_context_size,
1879                                                       req_ctx->hw_context,
1880                                                       DMA_FROM_DEVICE);
1881
1882                 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1883                                            DMA_BIDIRECTIONAL);
1884                 desc->next_desc = cpu_to_be32(next_desc);
1885         }
1886
1887         if (sync_needed)
1888                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1889                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1890
1891         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1892         if (ret != -EINPROGRESS) {
1893                 common_nonsnoop_hash_unmap(dev, edesc, areq);
1894                 kfree(edesc);
1895         }
1896         return ret;
1897 }
1898
1899 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1900                                                unsigned int nbytes)
1901 {
1902         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1903         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1904         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1905         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1906         bool is_sec1 = has_ftr_sec1(priv);
1907
1908         if (is_sec1)
1909                 nbytes -= req_ctx->nbuf;
1910
1911         return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1912                                    nbytes, 0, 0, 0, areq->base.flags, false);
1913 }
1914
1915 static int ahash_init(struct ahash_request *areq)
1916 {
1917         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1918         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1919         struct device *dev = ctx->dev;
1920         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1921         unsigned int size;
1922         dma_addr_t dma;
1923
1924         /* Initialize the context */
1925         req_ctx->buf_idx = 0;
1926         req_ctx->nbuf = 0;
1927         req_ctx->first = 1; /* first indicates h/w must init its context */
1928         req_ctx->swinit = 0; /* assume h/w init of context */
1929         size =  (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1930                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1931                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1932         req_ctx->hw_context_size = size;
1933
1934         dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1935                              DMA_TO_DEVICE);
1936         dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1937
1938         return 0;
1939 }
1940
1941 /*
1942  * on h/w without explicit sha224 support, we initialize h/w context
1943  * manually with sha224 constants, and tell it to run sha256.
1944  */
1945 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1946 {
1947         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1948
1949         req_ctx->hw_context[0] = SHA224_H0;
1950         req_ctx->hw_context[1] = SHA224_H1;
1951         req_ctx->hw_context[2] = SHA224_H2;
1952         req_ctx->hw_context[3] = SHA224_H3;
1953         req_ctx->hw_context[4] = SHA224_H4;
1954         req_ctx->hw_context[5] = SHA224_H5;
1955         req_ctx->hw_context[6] = SHA224_H6;
1956         req_ctx->hw_context[7] = SHA224_H7;
1957
1958         /* init 64-bit count */
1959         req_ctx->hw_context[8] = 0;
1960         req_ctx->hw_context[9] = 0;
1961
1962         ahash_init(areq);
1963         req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1964
1965         return 0;
1966 }
1967
1968 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1969 {
1970         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1971         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1972         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1973         struct talitos_edesc *edesc;
1974         unsigned int blocksize =
1975                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1976         unsigned int nbytes_to_hash;
1977         unsigned int to_hash_later;
1978         unsigned int nsg;
1979         int nents;
1980         struct device *dev = ctx->dev;
1981         struct talitos_private *priv = dev_get_drvdata(dev);
1982         bool is_sec1 = has_ftr_sec1(priv);
1983         u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1984
1985         if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1986                 /* Buffer up to one whole block */
1987                 nents = sg_nents_for_len(areq->src, nbytes);
1988                 if (nents < 0) {
1989                         dev_err(ctx->dev, "Invalid number of src SG.\n");
1990                         return nents;
1991                 }
1992                 sg_copy_to_buffer(areq->src, nents,
1993                                   ctx_buf + req_ctx->nbuf, nbytes);
1994                 req_ctx->nbuf += nbytes;
1995                 return 0;
1996         }
1997
1998         /* At least (blocksize + 1) bytes are available to hash */
1999         nbytes_to_hash = nbytes + req_ctx->nbuf;
2000         to_hash_later = nbytes_to_hash & (blocksize - 1);
2001
2002         if (req_ctx->last)
2003                 to_hash_later = 0;
2004         else if (to_hash_later)
2005                 /* There is a partial block. Hash the full block(s) now */
2006                 nbytes_to_hash -= to_hash_later;
2007         else {
2008                 /* Keep one block buffered */
2009                 nbytes_to_hash -= blocksize;
2010                 to_hash_later = blocksize;
2011         }
2012
2013         /* Chain in any previously buffered data */
2014         if (!is_sec1 && req_ctx->nbuf) {
2015                 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2016                 sg_init_table(req_ctx->bufsl, nsg);
2017                 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2018                 if (nsg > 1)
2019                         sg_chain(req_ctx->bufsl, 2, areq->src);
2020                 req_ctx->psrc = req_ctx->bufsl;
2021         } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2022                 int offset;
2023
2024                 if (nbytes_to_hash > blocksize)
2025                         offset = blocksize - req_ctx->nbuf;
2026                 else
2027                         offset = nbytes_to_hash - req_ctx->nbuf;
2028                 nents = sg_nents_for_len(areq->src, offset);
2029                 if (nents < 0) {
2030                         dev_err(ctx->dev, "Invalid number of src SG.\n");
2031                         return nents;
2032                 }
2033                 sg_copy_to_buffer(areq->src, nents,
2034                                   ctx_buf + req_ctx->nbuf, offset);
2035                 req_ctx->nbuf += offset;
2036                 req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2037                                                  offset);
2038         } else
2039                 req_ctx->psrc = areq->src;
2040
2041         if (to_hash_later) {
2042                 nents = sg_nents_for_len(areq->src, nbytes);
2043                 if (nents < 0) {
2044                         dev_err(ctx->dev, "Invalid number of src SG.\n");
2045                         return nents;
2046                 }
2047                 sg_pcopy_to_buffer(areq->src, nents,
2048                                    req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2049                                       to_hash_later,
2050                                       nbytes - to_hash_later);
2051         }
2052         req_ctx->to_hash_later = to_hash_later;
2053
2054         /* Allocate extended descriptor */
2055         edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2056         if (IS_ERR(edesc))
2057                 return PTR_ERR(edesc);
2058
2059         edesc->desc.hdr = ctx->desc_hdr_template;
2060
2061         /* On last one, request SEC to pad; otherwise continue */
2062         if (req_ctx->last)
2063                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2064         else
2065                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2066
2067         /* request SEC to INIT hash. */
2068         if (req_ctx->first && !req_ctx->swinit)
2069                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2070
2071         /* When the tfm context has a keylen, it's an HMAC.
2072          * A first or last (ie. not middle) descriptor must request HMAC.
2073          */
2074         if (ctx->keylen && (req_ctx->first || req_ctx->last))
2075                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2076
2077         return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2078 }
2079
2080 static int ahash_update(struct ahash_request *areq)
2081 {
2082         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2083
2084         req_ctx->last = 0;
2085
2086         return ahash_process_req(areq, areq->nbytes);
2087 }
2088
2089 static int ahash_final(struct ahash_request *areq)
2090 {
2091         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2092
2093         req_ctx->last = 1;
2094
2095         return ahash_process_req(areq, 0);
2096 }
2097
2098 static int ahash_finup(struct ahash_request *areq)
2099 {
2100         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2101
2102         req_ctx->last = 1;
2103
2104         return ahash_process_req(areq, areq->nbytes);
2105 }
2106
2107 static int ahash_digest(struct ahash_request *areq)
2108 {
2109         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2110         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2111
2112         ahash->init(areq);
2113         req_ctx->last = 1;
2114
2115         return ahash_process_req(areq, areq->nbytes);
2116 }
2117
2118 static int ahash_export(struct ahash_request *areq, void *out)
2119 {
2120         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2121         struct talitos_export_state *export = out;
2122         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2123         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2124         struct device *dev = ctx->dev;
2125         dma_addr_t dma;
2126
2127         dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2128                              DMA_FROM_DEVICE);
2129         dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2130
2131         memcpy(export->hw_context, req_ctx->hw_context,
2132                req_ctx->hw_context_size);
2133         memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2134         export->swinit = req_ctx->swinit;
2135         export->first = req_ctx->first;
2136         export->last = req_ctx->last;
2137         export->to_hash_later = req_ctx->to_hash_later;
2138         export->nbuf = req_ctx->nbuf;
2139
2140         return 0;
2141 }
2142
2143 static int ahash_import(struct ahash_request *areq, const void *in)
2144 {
2145         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2146         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2147         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2148         struct device *dev = ctx->dev;
2149         const struct talitos_export_state *export = in;
2150         unsigned int size;
2151         dma_addr_t dma;
2152
2153         memset(req_ctx, 0, sizeof(*req_ctx));
2154         size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2155                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2156                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2157         req_ctx->hw_context_size = size;
2158         memcpy(req_ctx->hw_context, export->hw_context, size);
2159         memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2160         req_ctx->swinit = export->swinit;
2161         req_ctx->first = export->first;
2162         req_ctx->last = export->last;
2163         req_ctx->to_hash_later = export->to_hash_later;
2164         req_ctx->nbuf = export->nbuf;
2165
2166         dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2167                              DMA_TO_DEVICE);
2168         dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2169
2170         return 0;
2171 }
2172
2173 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2174                    u8 *hash)
2175 {
2176         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2177
2178         struct scatterlist sg[1];
2179         struct ahash_request *req;
2180         struct crypto_wait wait;
2181         int ret;
2182
2183         crypto_init_wait(&wait);
2184
2185         req = ahash_request_alloc(tfm, GFP_KERNEL);
2186         if (!req)
2187                 return -ENOMEM;
2188
2189         /* Keep tfm keylen == 0 during hash of the long key */
2190         ctx->keylen = 0;
2191         ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2192                                    crypto_req_done, &wait);
2193
2194         sg_init_one(&sg[0], key, keylen);
2195
2196         ahash_request_set_crypt(req, sg, hash, keylen);
2197         ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2198
2199         ahash_request_free(req);
2200
2201         return ret;
2202 }
2203
2204 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2205                         unsigned int keylen)
2206 {
2207         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2208         struct device *dev = ctx->dev;
2209         unsigned int blocksize =
2210                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2211         unsigned int digestsize = crypto_ahash_digestsize(tfm);
2212         unsigned int keysize = keylen;
2213         u8 hash[SHA512_DIGEST_SIZE];
2214         int ret;
2215
2216         if (keylen <= blocksize)
2217                 memcpy(ctx->key, key, keysize);
2218         else {
2219                 /* Must get the hash of the long key */
2220                 ret = keyhash(tfm, key, keylen, hash);
2221
2222                 if (ret) {
2223                         crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2224                         return -EINVAL;
2225                 }
2226
2227                 keysize = digestsize;
2228                 memcpy(ctx->key, hash, digestsize);
2229         }
2230
2231         if (ctx->keylen)
2232                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2233
2234         ctx->keylen = keysize;
2235         ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2236
2237         return 0;
2238 }
2239
2240
2241 struct talitos_alg_template {
2242         u32 type;
2243         u32 priority;
2244         union {
2245                 struct crypto_alg crypto;
2246                 struct ahash_alg hash;
2247                 struct aead_alg aead;
2248         } alg;
2249         __be32 desc_hdr_template;
2250 };
2251
2252 static struct talitos_alg_template driver_algs[] = {
2253         /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2254         {       .type = CRYPTO_ALG_TYPE_AEAD,
2255                 .alg.aead = {
2256                         .base = {
2257                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2258                                 .cra_driver_name = "authenc-hmac-sha1-"
2259                                                    "cbc-aes-talitos",
2260                                 .cra_blocksize = AES_BLOCK_SIZE,
2261                                 .cra_flags = CRYPTO_ALG_ASYNC,
2262                         },
2263                         .ivsize = AES_BLOCK_SIZE,
2264                         .maxauthsize = SHA1_DIGEST_SIZE,
2265                 },
2266                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2267                                      DESC_HDR_SEL0_AESU |
2268                                      DESC_HDR_MODE0_AESU_CBC |
2269                                      DESC_HDR_SEL1_MDEUA |
2270                                      DESC_HDR_MODE1_MDEU_INIT |
2271                                      DESC_HDR_MODE1_MDEU_PAD |
2272                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2273         },
2274         {       .type = CRYPTO_ALG_TYPE_AEAD,
2275                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2276                 .alg.aead = {
2277                         .base = {
2278                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2279                                 .cra_driver_name = "authenc-hmac-sha1-"
2280                                                    "cbc-aes-talitos-hsna",
2281                                 .cra_blocksize = AES_BLOCK_SIZE,
2282                                 .cra_flags = CRYPTO_ALG_ASYNC,
2283                         },
2284                         .ivsize = AES_BLOCK_SIZE,
2285                         .maxauthsize = SHA1_DIGEST_SIZE,
2286                 },
2287                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2288                                      DESC_HDR_SEL0_AESU |
2289                                      DESC_HDR_MODE0_AESU_CBC |
2290                                      DESC_HDR_SEL1_MDEUA |
2291                                      DESC_HDR_MODE1_MDEU_INIT |
2292                                      DESC_HDR_MODE1_MDEU_PAD |
2293                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2294         },
2295         {       .type = CRYPTO_ALG_TYPE_AEAD,
2296                 .alg.aead = {
2297                         .base = {
2298                                 .cra_name = "authenc(hmac(sha1),"
2299                                             "cbc(des3_ede))",
2300                                 .cra_driver_name = "authenc-hmac-sha1-"
2301                                                    "cbc-3des-talitos",
2302                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2303                                 .cra_flags = CRYPTO_ALG_ASYNC,
2304                         },
2305                         .ivsize = DES3_EDE_BLOCK_SIZE,
2306                         .maxauthsize = SHA1_DIGEST_SIZE,
2307                 },
2308                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2309                                      DESC_HDR_SEL0_DEU |
2310                                      DESC_HDR_MODE0_DEU_CBC |
2311                                      DESC_HDR_MODE0_DEU_3DES |
2312                                      DESC_HDR_SEL1_MDEUA |
2313                                      DESC_HDR_MODE1_MDEU_INIT |
2314                                      DESC_HDR_MODE1_MDEU_PAD |
2315                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2316         },
2317         {       .type = CRYPTO_ALG_TYPE_AEAD,
2318                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2319                 .alg.aead = {
2320                         .base = {
2321                                 .cra_name = "authenc(hmac(sha1),"
2322                                             "cbc(des3_ede))",
2323                                 .cra_driver_name = "authenc-hmac-sha1-"
2324                                                    "cbc-3des-talitos-hsna",
2325                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2326                                 .cra_flags = CRYPTO_ALG_ASYNC,
2327                         },
2328                         .ivsize = DES3_EDE_BLOCK_SIZE,
2329                         .maxauthsize = SHA1_DIGEST_SIZE,
2330                 },
2331                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2332                                      DESC_HDR_SEL0_DEU |
2333                                      DESC_HDR_MODE0_DEU_CBC |
2334                                      DESC_HDR_MODE0_DEU_3DES |
2335                                      DESC_HDR_SEL1_MDEUA |
2336                                      DESC_HDR_MODE1_MDEU_INIT |
2337                                      DESC_HDR_MODE1_MDEU_PAD |
2338                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2339         },
2340         {       .type = CRYPTO_ALG_TYPE_AEAD,
2341                 .alg.aead = {
2342                         .base = {
2343                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2344                                 .cra_driver_name = "authenc-hmac-sha224-"
2345                                                    "cbc-aes-talitos",
2346                                 .cra_blocksize = AES_BLOCK_SIZE,
2347                                 .cra_flags = CRYPTO_ALG_ASYNC,
2348                         },
2349                         .ivsize = AES_BLOCK_SIZE,
2350                         .maxauthsize = SHA224_DIGEST_SIZE,
2351                 },
2352                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2353                                      DESC_HDR_SEL0_AESU |
2354                                      DESC_HDR_MODE0_AESU_CBC |
2355                                      DESC_HDR_SEL1_MDEUA |
2356                                      DESC_HDR_MODE1_MDEU_INIT |
2357                                      DESC_HDR_MODE1_MDEU_PAD |
2358                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2359         },
2360         {       .type = CRYPTO_ALG_TYPE_AEAD,
2361                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2362                 .alg.aead = {
2363                         .base = {
2364                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2365                                 .cra_driver_name = "authenc-hmac-sha224-"
2366                                                    "cbc-aes-talitos-hsna",
2367                                 .cra_blocksize = AES_BLOCK_SIZE,
2368                                 .cra_flags = CRYPTO_ALG_ASYNC,
2369                         },
2370                         .ivsize = AES_BLOCK_SIZE,
2371                         .maxauthsize = SHA224_DIGEST_SIZE,
2372                 },
2373                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2374                                      DESC_HDR_SEL0_AESU |
2375                                      DESC_HDR_MODE0_AESU_CBC |
2376                                      DESC_HDR_SEL1_MDEUA |
2377                                      DESC_HDR_MODE1_MDEU_INIT |
2378                                      DESC_HDR_MODE1_MDEU_PAD |
2379                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2380         },
2381         {       .type = CRYPTO_ALG_TYPE_AEAD,
2382                 .alg.aead = {
2383                         .base = {
2384                                 .cra_name = "authenc(hmac(sha224),"
2385                                             "cbc(des3_ede))",
2386                                 .cra_driver_name = "authenc-hmac-sha224-"
2387                                                    "cbc-3des-talitos",
2388                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2389                                 .cra_flags = CRYPTO_ALG_ASYNC,
2390                         },
2391                         .ivsize = DES3_EDE_BLOCK_SIZE,
2392                         .maxauthsize = SHA224_DIGEST_SIZE,
2393                 },
2394                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2395                                      DESC_HDR_SEL0_DEU |
2396                                      DESC_HDR_MODE0_DEU_CBC |
2397                                      DESC_HDR_MODE0_DEU_3DES |
2398                                      DESC_HDR_SEL1_MDEUA |
2399                                      DESC_HDR_MODE1_MDEU_INIT |
2400                                      DESC_HDR_MODE1_MDEU_PAD |
2401                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2402         },
2403         {       .type = CRYPTO_ALG_TYPE_AEAD,
2404                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2405                 .alg.aead = {
2406                         .base = {
2407                                 .cra_name = "authenc(hmac(sha224),"
2408                                             "cbc(des3_ede))",
2409                                 .cra_driver_name = "authenc-hmac-sha224-"
2410                                                    "cbc-3des-talitos-hsna",
2411                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2412                                 .cra_flags = CRYPTO_ALG_ASYNC,
2413                         },
2414                         .ivsize = DES3_EDE_BLOCK_SIZE,
2415                         .maxauthsize = SHA224_DIGEST_SIZE,
2416                 },
2417                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2418                                      DESC_HDR_SEL0_DEU |
2419                                      DESC_HDR_MODE0_DEU_CBC |
2420                                      DESC_HDR_MODE0_DEU_3DES |
2421                                      DESC_HDR_SEL1_MDEUA |
2422                                      DESC_HDR_MODE1_MDEU_INIT |
2423                                      DESC_HDR_MODE1_MDEU_PAD |
2424                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2425         },
2426         {       .type = CRYPTO_ALG_TYPE_AEAD,
2427                 .alg.aead = {
2428                         .base = {
2429                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2430                                 .cra_driver_name = "authenc-hmac-sha256-"
2431                                                    "cbc-aes-talitos",
2432                                 .cra_blocksize = AES_BLOCK_SIZE,
2433                                 .cra_flags = CRYPTO_ALG_ASYNC,
2434                         },
2435                         .ivsize = AES_BLOCK_SIZE,
2436                         .maxauthsize = SHA256_DIGEST_SIZE,
2437                 },
2438                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2439                                      DESC_HDR_SEL0_AESU |
2440                                      DESC_HDR_MODE0_AESU_CBC |
2441                                      DESC_HDR_SEL1_MDEUA |
2442                                      DESC_HDR_MODE1_MDEU_INIT |
2443                                      DESC_HDR_MODE1_MDEU_PAD |
2444                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2445         },
2446         {       .type = CRYPTO_ALG_TYPE_AEAD,
2447                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2448                 .alg.aead = {
2449                         .base = {
2450                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2451                                 .cra_driver_name = "authenc-hmac-sha256-"
2452                                                    "cbc-aes-talitos-hsna",
2453                                 .cra_blocksize = AES_BLOCK_SIZE,
2454                                 .cra_flags = CRYPTO_ALG_ASYNC,
2455                         },
2456                         .ivsize = AES_BLOCK_SIZE,
2457                         .maxauthsize = SHA256_DIGEST_SIZE,
2458                 },
2459                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2460                                      DESC_HDR_SEL0_AESU |
2461                                      DESC_HDR_MODE0_AESU_CBC |
2462                                      DESC_HDR_SEL1_MDEUA |
2463                                      DESC_HDR_MODE1_MDEU_INIT |
2464                                      DESC_HDR_MODE1_MDEU_PAD |
2465                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2466         },
2467         {       .type = CRYPTO_ALG_TYPE_AEAD,
2468                 .alg.aead = {
2469                         .base = {
2470                                 .cra_name = "authenc(hmac(sha256),"
2471                                             "cbc(des3_ede))",
2472                                 .cra_driver_name = "authenc-hmac-sha256-"
2473                                                    "cbc-3des-talitos",
2474                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2475                                 .cra_flags = CRYPTO_ALG_ASYNC,
2476                         },
2477                         .ivsize = DES3_EDE_BLOCK_SIZE,
2478                         .maxauthsize = SHA256_DIGEST_SIZE,
2479                 },
2480                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2481                                      DESC_HDR_SEL0_DEU |
2482                                      DESC_HDR_MODE0_DEU_CBC |
2483                                      DESC_HDR_MODE0_DEU_3DES |
2484                                      DESC_HDR_SEL1_MDEUA |
2485                                      DESC_HDR_MODE1_MDEU_INIT |
2486                                      DESC_HDR_MODE1_MDEU_PAD |
2487                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2488         },
2489         {       .type = CRYPTO_ALG_TYPE_AEAD,
2490                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2491                 .alg.aead = {
2492                         .base = {
2493                                 .cra_name = "authenc(hmac(sha256),"
2494                                             "cbc(des3_ede))",
2495                                 .cra_driver_name = "authenc-hmac-sha256-"
2496                                                    "cbc-3des-talitos-hsna",
2497                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2498                                 .cra_flags = CRYPTO_ALG_ASYNC,
2499                         },
2500                         .ivsize = DES3_EDE_BLOCK_SIZE,
2501                         .maxauthsize = SHA256_DIGEST_SIZE,
2502                 },
2503                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2504                                      DESC_HDR_SEL0_DEU |
2505                                      DESC_HDR_MODE0_DEU_CBC |
2506                                      DESC_HDR_MODE0_DEU_3DES |
2507                                      DESC_HDR_SEL1_MDEUA |
2508                                      DESC_HDR_MODE1_MDEU_INIT |
2509                                      DESC_HDR_MODE1_MDEU_PAD |
2510                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2511         },
2512         {       .type = CRYPTO_ALG_TYPE_AEAD,
2513                 .alg.aead = {
2514                         .base = {
2515                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2516                                 .cra_driver_name = "authenc-hmac-sha384-"
2517                                                    "cbc-aes-talitos",
2518                                 .cra_blocksize = AES_BLOCK_SIZE,
2519                                 .cra_flags = CRYPTO_ALG_ASYNC,
2520                         },
2521                         .ivsize = AES_BLOCK_SIZE,
2522                         .maxauthsize = SHA384_DIGEST_SIZE,
2523                 },
2524                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2525                                      DESC_HDR_SEL0_AESU |
2526                                      DESC_HDR_MODE0_AESU_CBC |
2527                                      DESC_HDR_SEL1_MDEUB |
2528                                      DESC_HDR_MODE1_MDEU_INIT |
2529                                      DESC_HDR_MODE1_MDEU_PAD |
2530                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2531         },
2532         {       .type = CRYPTO_ALG_TYPE_AEAD,
2533                 .alg.aead = {
2534                         .base = {
2535                                 .cra_name = "authenc(hmac(sha384),"
2536                                             "cbc(des3_ede))",
2537                                 .cra_driver_name = "authenc-hmac-sha384-"
2538                                                    "cbc-3des-talitos",
2539                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2540                                 .cra_flags = CRYPTO_ALG_ASYNC,
2541                         },
2542                         .ivsize = DES3_EDE_BLOCK_SIZE,
2543                         .maxauthsize = SHA384_DIGEST_SIZE,
2544                 },
2545                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2546                                      DESC_HDR_SEL0_DEU |
2547                                      DESC_HDR_MODE0_DEU_CBC |
2548                                      DESC_HDR_MODE0_DEU_3DES |
2549                                      DESC_HDR_SEL1_MDEUB |
2550                                      DESC_HDR_MODE1_MDEU_INIT |
2551                                      DESC_HDR_MODE1_MDEU_PAD |
2552                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2553         },
2554         {       .type = CRYPTO_ALG_TYPE_AEAD,
2555                 .alg.aead = {
2556                         .base = {
2557                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2558                                 .cra_driver_name = "authenc-hmac-sha512-"
2559                                                    "cbc-aes-talitos",
2560                                 .cra_blocksize = AES_BLOCK_SIZE,
2561                                 .cra_flags = CRYPTO_ALG_ASYNC,
2562                         },
2563                         .ivsize = AES_BLOCK_SIZE,
2564                         .maxauthsize = SHA512_DIGEST_SIZE,
2565                 },
2566                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2567                                      DESC_HDR_SEL0_AESU |
2568                                      DESC_HDR_MODE0_AESU_CBC |
2569                                      DESC_HDR_SEL1_MDEUB |
2570                                      DESC_HDR_MODE1_MDEU_INIT |
2571                                      DESC_HDR_MODE1_MDEU_PAD |
2572                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2573         },
2574         {       .type = CRYPTO_ALG_TYPE_AEAD,
2575                 .alg.aead = {
2576                         .base = {
2577                                 .cra_name = "authenc(hmac(sha512),"
2578                                             "cbc(des3_ede))",
2579                                 .cra_driver_name = "authenc-hmac-sha512-"
2580                                                    "cbc-3des-talitos",
2581                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2582                                 .cra_flags = CRYPTO_ALG_ASYNC,
2583                         },
2584                         .ivsize = DES3_EDE_BLOCK_SIZE,
2585                         .maxauthsize = SHA512_DIGEST_SIZE,
2586                 },
2587                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2588                                      DESC_HDR_SEL0_DEU |
2589                                      DESC_HDR_MODE0_DEU_CBC |
2590                                      DESC_HDR_MODE0_DEU_3DES |
2591                                      DESC_HDR_SEL1_MDEUB |
2592                                      DESC_HDR_MODE1_MDEU_INIT |
2593                                      DESC_HDR_MODE1_MDEU_PAD |
2594                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2595         },
2596         {       .type = CRYPTO_ALG_TYPE_AEAD,
2597                 .alg.aead = {
2598                         .base = {
2599                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
2600                                 .cra_driver_name = "authenc-hmac-md5-"
2601                                                    "cbc-aes-talitos",
2602                                 .cra_blocksize = AES_BLOCK_SIZE,
2603                                 .cra_flags = CRYPTO_ALG_ASYNC,
2604                         },
2605                         .ivsize = AES_BLOCK_SIZE,
2606                         .maxauthsize = MD5_DIGEST_SIZE,
2607                 },
2608                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2609                                      DESC_HDR_SEL0_AESU |
2610                                      DESC_HDR_MODE0_AESU_CBC |
2611                                      DESC_HDR_SEL1_MDEUA |
2612                                      DESC_HDR_MODE1_MDEU_INIT |
2613                                      DESC_HDR_MODE1_MDEU_PAD |
2614                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2615         },
2616         {       .type = CRYPTO_ALG_TYPE_AEAD,
2617                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2618                 .alg.aead = {
2619                         .base = {
2620                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
2621                                 .cra_driver_name = "authenc-hmac-md5-"
2622                                                    "cbc-aes-talitos-hsna",
2623                                 .cra_blocksize = AES_BLOCK_SIZE,
2624                                 .cra_flags = CRYPTO_ALG_ASYNC,
2625                         },
2626                         .ivsize = AES_BLOCK_SIZE,
2627                         .maxauthsize = MD5_DIGEST_SIZE,
2628                 },
2629                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2630                                      DESC_HDR_SEL0_AESU |
2631                                      DESC_HDR_MODE0_AESU_CBC |
2632                                      DESC_HDR_SEL1_MDEUA |
2633                                      DESC_HDR_MODE1_MDEU_INIT |
2634                                      DESC_HDR_MODE1_MDEU_PAD |
2635                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2636         },
2637         {       .type = CRYPTO_ALG_TYPE_AEAD,
2638                 .alg.aead = {
2639                         .base = {
2640                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2641                                 .cra_driver_name = "authenc-hmac-md5-"
2642                                                    "cbc-3des-talitos",
2643                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2644                                 .cra_flags = CRYPTO_ALG_ASYNC,
2645                         },
2646                         .ivsize = DES3_EDE_BLOCK_SIZE,
2647                         .maxauthsize = MD5_DIGEST_SIZE,
2648                 },
2649                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2650                                      DESC_HDR_SEL0_DEU |
2651                                      DESC_HDR_MODE0_DEU_CBC |
2652                                      DESC_HDR_MODE0_DEU_3DES |
2653                                      DESC_HDR_SEL1_MDEUA |
2654                                      DESC_HDR_MODE1_MDEU_INIT |
2655                                      DESC_HDR_MODE1_MDEU_PAD |
2656                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2657         },
2658         {       .type = CRYPTO_ALG_TYPE_AEAD,
2659                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2660                 .alg.aead = {
2661                         .base = {
2662                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2663                                 .cra_driver_name = "authenc-hmac-md5-"
2664                                                    "cbc-3des-talitos-hsna",
2665                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2666                                 .cra_flags = CRYPTO_ALG_ASYNC,
2667                         },
2668                         .ivsize = DES3_EDE_BLOCK_SIZE,
2669                         .maxauthsize = MD5_DIGEST_SIZE,
2670                 },
2671                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2672                                      DESC_HDR_SEL0_DEU |
2673                                      DESC_HDR_MODE0_DEU_CBC |
2674                                      DESC_HDR_MODE0_DEU_3DES |
2675                                      DESC_HDR_SEL1_MDEUA |
2676                                      DESC_HDR_MODE1_MDEU_INIT |
2677                                      DESC_HDR_MODE1_MDEU_PAD |
2678                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2679         },
2680         /* ABLKCIPHER algorithms. */
2681         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2682                 .alg.crypto = {
2683                         .cra_name = "ecb(aes)",
2684                         .cra_driver_name = "ecb-aes-talitos",
2685                         .cra_blocksize = AES_BLOCK_SIZE,
2686                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2687                                      CRYPTO_ALG_ASYNC,
2688                         .cra_ablkcipher = {
2689                                 .min_keysize = AES_MIN_KEY_SIZE,
2690                                 .max_keysize = AES_MAX_KEY_SIZE,
2691                                 .ivsize = AES_BLOCK_SIZE,
2692                         }
2693                 },
2694                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2695                                      DESC_HDR_SEL0_AESU,
2696         },
2697         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2698                 .alg.crypto = {
2699                         .cra_name = "cbc(aes)",
2700                         .cra_driver_name = "cbc-aes-talitos",
2701                         .cra_blocksize = AES_BLOCK_SIZE,
2702                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2703                                      CRYPTO_ALG_ASYNC,
2704                         .cra_ablkcipher = {
2705                                 .min_keysize = AES_MIN_KEY_SIZE,
2706                                 .max_keysize = AES_MAX_KEY_SIZE,
2707                                 .ivsize = AES_BLOCK_SIZE,
2708                         }
2709                 },
2710                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2711                                      DESC_HDR_SEL0_AESU |
2712                                      DESC_HDR_MODE0_AESU_CBC,
2713         },
2714         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2715                 .alg.crypto = {
2716                         .cra_name = "ctr(aes)",
2717                         .cra_driver_name = "ctr-aes-talitos",
2718                         .cra_blocksize = AES_BLOCK_SIZE,
2719                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2720                                      CRYPTO_ALG_ASYNC,
2721                         .cra_ablkcipher = {
2722                                 .min_keysize = AES_MIN_KEY_SIZE,
2723                                 .max_keysize = AES_MAX_KEY_SIZE,
2724                                 .ivsize = AES_BLOCK_SIZE,
2725                         }
2726                 },
2727                 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2728                                      DESC_HDR_SEL0_AESU |
2729                                      DESC_HDR_MODE0_AESU_CTR,
2730         },
2731         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2732                 .alg.crypto = {
2733                         .cra_name = "ecb(des)",
2734                         .cra_driver_name = "ecb-des-talitos",
2735                         .cra_blocksize = DES_BLOCK_SIZE,
2736                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2737                                      CRYPTO_ALG_ASYNC,
2738                         .cra_ablkcipher = {
2739                                 .min_keysize = DES_KEY_SIZE,
2740                                 .max_keysize = DES_KEY_SIZE,
2741                                 .ivsize = DES_BLOCK_SIZE,
2742                         }
2743                 },
2744                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2745                                      DESC_HDR_SEL0_DEU,
2746         },
2747         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2748                 .alg.crypto = {
2749                         .cra_name = "cbc(des)",
2750                         .cra_driver_name = "cbc-des-talitos",
2751                         .cra_blocksize = DES_BLOCK_SIZE,
2752                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2753                                      CRYPTO_ALG_ASYNC,
2754                         .cra_ablkcipher = {
2755                                 .min_keysize = DES_KEY_SIZE,
2756                                 .max_keysize = DES_KEY_SIZE,
2757                                 .ivsize = DES_BLOCK_SIZE,
2758                         }
2759                 },
2760                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2761                                      DESC_HDR_SEL0_DEU |
2762                                      DESC_HDR_MODE0_DEU_CBC,
2763         },
2764         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2765                 .alg.crypto = {
2766                         .cra_name = "ecb(des3_ede)",
2767                         .cra_driver_name = "ecb-3des-talitos",
2768                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2769                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2770                                      CRYPTO_ALG_ASYNC,
2771                         .cra_ablkcipher = {
2772                                 .min_keysize = DES3_EDE_KEY_SIZE,
2773                                 .max_keysize = DES3_EDE_KEY_SIZE,
2774                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2775                         }
2776                 },
2777                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2778                                      DESC_HDR_SEL0_DEU |
2779                                      DESC_HDR_MODE0_DEU_3DES,
2780         },
2781         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2782                 .alg.crypto = {
2783                         .cra_name = "cbc(des3_ede)",
2784                         .cra_driver_name = "cbc-3des-talitos",
2785                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2786                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2787                                      CRYPTO_ALG_ASYNC,
2788                         .cra_ablkcipher = {
2789                                 .min_keysize = DES3_EDE_KEY_SIZE,
2790                                 .max_keysize = DES3_EDE_KEY_SIZE,
2791                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2792                         }
2793                 },
2794                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2795                                      DESC_HDR_SEL0_DEU |
2796                                      DESC_HDR_MODE0_DEU_CBC |
2797                                      DESC_HDR_MODE0_DEU_3DES,
2798         },
2799         /* AHASH algorithms. */
2800         {       .type = CRYPTO_ALG_TYPE_AHASH,
2801                 .alg.hash = {
2802                         .halg.digestsize = MD5_DIGEST_SIZE,
2803                         .halg.statesize = sizeof(struct talitos_export_state),
2804                         .halg.base = {
2805                                 .cra_name = "md5",
2806                                 .cra_driver_name = "md5-talitos",
2807                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2808                                 .cra_flags = CRYPTO_ALG_ASYNC,
2809                         }
2810                 },
2811                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2812                                      DESC_HDR_SEL0_MDEUA |
2813                                      DESC_HDR_MODE0_MDEU_MD5,
2814         },
2815         {       .type = CRYPTO_ALG_TYPE_AHASH,
2816                 .alg.hash = {
2817                         .halg.digestsize = SHA1_DIGEST_SIZE,
2818                         .halg.statesize = sizeof(struct talitos_export_state),
2819                         .halg.base = {
2820                                 .cra_name = "sha1",
2821                                 .cra_driver_name = "sha1-talitos",
2822                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2823                                 .cra_flags = CRYPTO_ALG_ASYNC,
2824                         }
2825                 },
2826                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2827                                      DESC_HDR_SEL0_MDEUA |
2828                                      DESC_HDR_MODE0_MDEU_SHA1,
2829         },
2830         {       .type = CRYPTO_ALG_TYPE_AHASH,
2831                 .alg.hash = {
2832                         .halg.digestsize = SHA224_DIGEST_SIZE,
2833                         .halg.statesize = sizeof(struct talitos_export_state),
2834                         .halg.base = {
2835                                 .cra_name = "sha224",
2836                                 .cra_driver_name = "sha224-talitos",
2837                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2838                                 .cra_flags = CRYPTO_ALG_ASYNC,
2839                         }
2840                 },
2841                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2842                                      DESC_HDR_SEL0_MDEUA |
2843                                      DESC_HDR_MODE0_MDEU_SHA224,
2844         },
2845         {       .type = CRYPTO_ALG_TYPE_AHASH,
2846                 .alg.hash = {
2847                         .halg.digestsize = SHA256_DIGEST_SIZE,
2848                         .halg.statesize = sizeof(struct talitos_export_state),
2849                         .halg.base = {
2850                                 .cra_name = "sha256",
2851                                 .cra_driver_name = "sha256-talitos",
2852                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2853                                 .cra_flags = CRYPTO_ALG_ASYNC,
2854                         }
2855                 },
2856                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2857                                      DESC_HDR_SEL0_MDEUA |
2858                                      DESC_HDR_MODE0_MDEU_SHA256,
2859         },
2860         {       .type = CRYPTO_ALG_TYPE_AHASH,
2861                 .alg.hash = {
2862                         .halg.digestsize = SHA384_DIGEST_SIZE,
2863                         .halg.statesize = sizeof(struct talitos_export_state),
2864                         .halg.base = {
2865                                 .cra_name = "sha384",
2866                                 .cra_driver_name = "sha384-talitos",
2867                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2868                                 .cra_flags = CRYPTO_ALG_ASYNC,
2869                         }
2870                 },
2871                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2872                                      DESC_HDR_SEL0_MDEUB |
2873                                      DESC_HDR_MODE0_MDEUB_SHA384,
2874         },
2875         {       .type = CRYPTO_ALG_TYPE_AHASH,
2876                 .alg.hash = {
2877                         .halg.digestsize = SHA512_DIGEST_SIZE,
2878                         .halg.statesize = sizeof(struct talitos_export_state),
2879                         .halg.base = {
2880                                 .cra_name = "sha512",
2881                                 .cra_driver_name = "sha512-talitos",
2882                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2883                                 .cra_flags = CRYPTO_ALG_ASYNC,
2884                         }
2885                 },
2886                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2887                                      DESC_HDR_SEL0_MDEUB |
2888                                      DESC_HDR_MODE0_MDEUB_SHA512,
2889         },
2890         {       .type = CRYPTO_ALG_TYPE_AHASH,
2891                 .alg.hash = {
2892                         .halg.digestsize = MD5_DIGEST_SIZE,
2893                         .halg.statesize = sizeof(struct talitos_export_state),
2894                         .halg.base = {
2895                                 .cra_name = "hmac(md5)",
2896                                 .cra_driver_name = "hmac-md5-talitos",
2897                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2898                                 .cra_flags = CRYPTO_ALG_ASYNC,
2899                         }
2900                 },
2901                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2902                                      DESC_HDR_SEL0_MDEUA |
2903                                      DESC_HDR_MODE0_MDEU_MD5,
2904         },
2905         {       .type = CRYPTO_ALG_TYPE_AHASH,
2906                 .alg.hash = {
2907                         .halg.digestsize = SHA1_DIGEST_SIZE,
2908                         .halg.statesize = sizeof(struct talitos_export_state),
2909                         .halg.base = {
2910                                 .cra_name = "hmac(sha1)",
2911                                 .cra_driver_name = "hmac-sha1-talitos",
2912                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2913                                 .cra_flags = CRYPTO_ALG_ASYNC,
2914                         }
2915                 },
2916                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2917                                      DESC_HDR_SEL0_MDEUA |
2918                                      DESC_HDR_MODE0_MDEU_SHA1,
2919         },
2920         {       .type = CRYPTO_ALG_TYPE_AHASH,
2921                 .alg.hash = {
2922                         .halg.digestsize = SHA224_DIGEST_SIZE,
2923                         .halg.statesize = sizeof(struct talitos_export_state),
2924                         .halg.base = {
2925                                 .cra_name = "hmac(sha224)",
2926                                 .cra_driver_name = "hmac-sha224-talitos",
2927                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2928                                 .cra_flags = CRYPTO_ALG_ASYNC,
2929                         }
2930                 },
2931                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2932                                      DESC_HDR_SEL0_MDEUA |
2933                                      DESC_HDR_MODE0_MDEU_SHA224,
2934         },
2935         {       .type = CRYPTO_ALG_TYPE_AHASH,
2936                 .alg.hash = {
2937                         .halg.digestsize = SHA256_DIGEST_SIZE,
2938                         .halg.statesize = sizeof(struct talitos_export_state),
2939                         .halg.base = {
2940                                 .cra_name = "hmac(sha256)",
2941                                 .cra_driver_name = "hmac-sha256-talitos",
2942                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2943                                 .cra_flags = CRYPTO_ALG_ASYNC,
2944                         }
2945                 },
2946                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2947                                      DESC_HDR_SEL0_MDEUA |
2948                                      DESC_HDR_MODE0_MDEU_SHA256,
2949         },
2950         {       .type = CRYPTO_ALG_TYPE_AHASH,
2951                 .alg.hash = {
2952                         .halg.digestsize = SHA384_DIGEST_SIZE,
2953                         .halg.statesize = sizeof(struct talitos_export_state),
2954                         .halg.base = {
2955                                 .cra_name = "hmac(sha384)",
2956                                 .cra_driver_name = "hmac-sha384-talitos",
2957                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2958                                 .cra_flags = CRYPTO_ALG_ASYNC,
2959                         }
2960                 },
2961                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2962                                      DESC_HDR_SEL0_MDEUB |
2963                                      DESC_HDR_MODE0_MDEUB_SHA384,
2964         },
2965         {       .type = CRYPTO_ALG_TYPE_AHASH,
2966                 .alg.hash = {
2967                         .halg.digestsize = SHA512_DIGEST_SIZE,
2968                         .halg.statesize = sizeof(struct talitos_export_state),
2969                         .halg.base = {
2970                                 .cra_name = "hmac(sha512)",
2971                                 .cra_driver_name = "hmac-sha512-talitos",
2972                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2973                                 .cra_flags = CRYPTO_ALG_ASYNC,
2974                         }
2975                 },
2976                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2977                                      DESC_HDR_SEL0_MDEUB |
2978                                      DESC_HDR_MODE0_MDEUB_SHA512,
2979         }
2980 };
2981
2982 struct talitos_crypto_alg {
2983         struct list_head entry;
2984         struct device *dev;
2985         struct talitos_alg_template algt;
2986 };
2987
2988 static int talitos_init_common(struct talitos_ctx *ctx,
2989                                struct talitos_crypto_alg *talitos_alg)
2990 {
2991         struct talitos_private *priv;
2992
2993         /* update context with ptr to dev */
2994         ctx->dev = talitos_alg->dev;
2995
2996         /* assign SEC channel to tfm in round-robin fashion */
2997         priv = dev_get_drvdata(ctx->dev);
2998         ctx->ch = atomic_inc_return(&priv->last_chan) &
2999                   (priv->num_channels - 1);
3000
3001         /* copy descriptor header template value */
3002         ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3003
3004         /* select done notification */
3005         ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3006
3007         return 0;
3008 }
3009
3010 static int talitos_cra_init(struct crypto_tfm *tfm)
3011 {
3012         struct crypto_alg *alg = tfm->__crt_alg;
3013         struct talitos_crypto_alg *talitos_alg;
3014         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3015
3016         if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3017                 talitos_alg = container_of(__crypto_ahash_alg(alg),
3018                                            struct talitos_crypto_alg,
3019                                            algt.alg.hash);
3020         else
3021                 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3022                                            algt.alg.crypto);
3023
3024         return talitos_init_common(ctx, talitos_alg);
3025 }
3026
3027 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3028 {
3029         struct aead_alg *alg = crypto_aead_alg(tfm);
3030         struct talitos_crypto_alg *talitos_alg;
3031         struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3032
3033         talitos_alg = container_of(alg, struct talitos_crypto_alg,
3034                                    algt.alg.aead);
3035
3036         return talitos_init_common(ctx, talitos_alg);
3037 }
3038
3039 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3040 {
3041         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3042
3043         talitos_cra_init(tfm);
3044
3045         ctx->keylen = 0;
3046         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3047                                  sizeof(struct talitos_ahash_req_ctx));
3048
3049         return 0;
3050 }
3051
3052 static void talitos_cra_exit(struct crypto_tfm *tfm)
3053 {
3054         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3055         struct device *dev = ctx->dev;
3056
3057         if (ctx->keylen)
3058                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3059 }
3060
3061 /*
3062  * given the alg's descriptor header template, determine whether descriptor
3063  * type and primary/secondary execution units required match the hw
3064  * capabilities description provided in the device tree node.
3065  */
3066 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3067 {
3068         struct talitos_private *priv = dev_get_drvdata(dev);
3069         int ret;
3070
3071         ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3072               (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3073
3074         if (SECONDARY_EU(desc_hdr_template))
3075                 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3076                               & priv->exec_units);
3077
3078         return ret;
3079 }
3080
3081 static int talitos_remove(struct platform_device *ofdev)
3082 {
3083         struct device *dev = &ofdev->dev;
3084         struct talitos_private *priv = dev_get_drvdata(dev);
3085         struct talitos_crypto_alg *t_alg, *n;
3086         int i;
3087
3088         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3089                 switch (t_alg->algt.type) {
3090                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3091                         break;
3092                 case CRYPTO_ALG_TYPE_AEAD:
3093                         crypto_unregister_aead(&t_alg->algt.alg.aead);
3094                 case CRYPTO_ALG_TYPE_AHASH:
3095                         crypto_unregister_ahash(&t_alg->algt.alg.hash);
3096                         break;
3097                 }
3098                 list_del(&t_alg->entry);
3099         }
3100
3101         if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3102                 talitos_unregister_rng(dev);
3103
3104         for (i = 0; i < 2; i++)
3105                 if (priv->irq[i]) {
3106                         free_irq(priv->irq[i], dev);
3107                         irq_dispose_mapping(priv->irq[i]);
3108                 }
3109
3110         tasklet_kill(&priv->done_task[0]);
3111         if (priv->irq[1])
3112                 tasklet_kill(&priv->done_task[1]);
3113
3114         return 0;
3115 }
3116
3117 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3118                                                     struct talitos_alg_template
3119                                                            *template)
3120 {
3121         struct talitos_private *priv = dev_get_drvdata(dev);
3122         struct talitos_crypto_alg *t_alg;
3123         struct crypto_alg *alg;
3124
3125         t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3126                              GFP_KERNEL);
3127         if (!t_alg)
3128                 return ERR_PTR(-ENOMEM);
3129
3130         t_alg->algt = *template;
3131
3132         switch (t_alg->algt.type) {
3133         case CRYPTO_ALG_TYPE_ABLKCIPHER:
3134                 alg = &t_alg->algt.alg.crypto;
3135                 alg->cra_init = talitos_cra_init;
3136                 alg->cra_exit = talitos_cra_exit;
3137                 alg->cra_type = &crypto_ablkcipher_type;
3138                 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3139                 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3140                 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3141                 alg->cra_ablkcipher.geniv = "eseqiv";
3142                 break;
3143         case CRYPTO_ALG_TYPE_AEAD:
3144                 alg = &t_alg->algt.alg.aead.base;
3145                 alg->cra_exit = talitos_cra_exit;
3146                 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3147                 t_alg->algt.alg.aead.setkey = aead_setkey;
3148                 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3149                 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3150                 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3151                     !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3152                         devm_kfree(dev, t_alg);
3153                         return ERR_PTR(-ENOTSUPP);
3154                 }
3155                 break;
3156         case CRYPTO_ALG_TYPE_AHASH:
3157                 alg = &t_alg->algt.alg.hash.halg.base;
3158                 alg->cra_init = talitos_cra_init_ahash;
3159                 alg->cra_exit = talitos_cra_exit;
3160                 t_alg->algt.alg.hash.init = ahash_init;
3161                 t_alg->algt.alg.hash.update = ahash_update;
3162                 t_alg->algt.alg.hash.final = ahash_final;
3163                 t_alg->algt.alg.hash.finup = ahash_finup;
3164                 t_alg->algt.alg.hash.digest = ahash_digest;
3165                 if (!strncmp(alg->cra_name, "hmac", 4))
3166                         t_alg->algt.alg.hash.setkey = ahash_setkey;
3167                 t_alg->algt.alg.hash.import = ahash_import;
3168                 t_alg->algt.alg.hash.export = ahash_export;
3169
3170                 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3171                     !strncmp(alg->cra_name, "hmac", 4)) {
3172                         devm_kfree(dev, t_alg);
3173                         return ERR_PTR(-ENOTSUPP);
3174                 }
3175                 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3176                     (!strcmp(alg->cra_name, "sha224") ||
3177                      !strcmp(alg->cra_name, "hmac(sha224)"))) {
3178                         t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3179                         t_alg->algt.desc_hdr_template =
3180                                         DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3181                                         DESC_HDR_SEL0_MDEUA |
3182                                         DESC_HDR_MODE0_MDEU_SHA256;
3183                 }
3184                 break;
3185         default:
3186                 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3187                 devm_kfree(dev, t_alg);
3188                 return ERR_PTR(-EINVAL);
3189         }
3190
3191         alg->cra_module = THIS_MODULE;
3192         if (t_alg->algt.priority)
3193                 alg->cra_priority = t_alg->algt.priority;
3194         else
3195                 alg->cra_priority = TALITOS_CRA_PRIORITY;
3196         if (has_ftr_sec1(priv))
3197                 alg->cra_alignmask = 3;
3198         else
3199                 alg->cra_alignmask = 0;
3200         alg->cra_ctxsize = sizeof(struct talitos_ctx);
3201         alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3202
3203         t_alg->dev = dev;
3204
3205         return t_alg;
3206 }
3207
3208 static int talitos_probe_irq(struct platform_device *ofdev)
3209 {
3210         struct device *dev = &ofdev->dev;
3211         struct device_node *np = ofdev->dev.of_node;
3212         struct talitos_private *priv = dev_get_drvdata(dev);
3213         int err;
3214         bool is_sec1 = has_ftr_sec1(priv);
3215
3216         priv->irq[0] = irq_of_parse_and_map(np, 0);
3217         if (!priv->irq[0]) {
3218                 dev_err(dev, "failed to map irq\n");
3219                 return -EINVAL;
3220         }
3221         if (is_sec1) {
3222                 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3223                                   dev_driver_string(dev), dev);
3224                 goto primary_out;
3225         }
3226
3227         priv->irq[1] = irq_of_parse_and_map(np, 1);
3228
3229         /* get the primary irq line */
3230         if (!priv->irq[1]) {
3231                 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3232                                   dev_driver_string(dev), dev);
3233                 goto primary_out;
3234         }
3235
3236         err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3237                           dev_driver_string(dev), dev);
3238         if (err)
3239                 goto primary_out;
3240
3241         /* get the secondary irq line */
3242         err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3243                           dev_driver_string(dev), dev);
3244         if (err) {
3245                 dev_err(dev, "failed to request secondary irq\n");
3246                 irq_dispose_mapping(priv->irq[1]);
3247                 priv->irq[1] = 0;
3248         }
3249
3250         return err;
3251
3252 primary_out:
3253         if (err) {
3254                 dev_err(dev, "failed to request primary irq\n");
3255                 irq_dispose_mapping(priv->irq[0]);
3256                 priv->irq[0] = 0;
3257         }
3258
3259         return err;
3260 }
3261
3262 static int talitos_probe(struct platform_device *ofdev)
3263 {
3264         struct device *dev = &ofdev->dev;
3265         struct device_node *np = ofdev->dev.of_node;
3266         struct talitos_private *priv;
3267         int i, err;
3268         int stride;
3269         struct resource *res;
3270
3271         priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3272         if (!priv)
3273                 return -ENOMEM;
3274
3275         INIT_LIST_HEAD(&priv->alg_list);
3276
3277         dev_set_drvdata(dev, priv);
3278
3279         priv->ofdev = ofdev;
3280
3281         spin_lock_init(&priv->reg_lock);
3282
3283         res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3284         if (!res)
3285                 return -ENXIO;
3286         priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3287         if (!priv->reg) {
3288                 dev_err(dev, "failed to of_iomap\n");
3289                 err = -ENOMEM;
3290                 goto err_out;
3291         }
3292
3293         /* get SEC version capabilities from device tree */
3294         of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3295         of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3296         of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3297         of_property_read_u32(np, "fsl,descriptor-types-mask",
3298                              &priv->desc_types);
3299
3300         if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3301             !priv->exec_units || !priv->desc_types) {
3302                 dev_err(dev, "invalid property data in device tree node\n");
3303                 err = -EINVAL;
3304                 goto err_out;
3305         }
3306
3307         if (of_device_is_compatible(np, "fsl,sec3.0"))
3308                 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3309
3310         if (of_device_is_compatible(np, "fsl,sec2.1"))
3311                 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3312                                   TALITOS_FTR_SHA224_HWINIT |
3313                                   TALITOS_FTR_HMAC_OK;
3314
3315         if (of_device_is_compatible(np, "fsl,sec1.0"))
3316                 priv->features |= TALITOS_FTR_SEC1;
3317
3318         if (of_device_is_compatible(np, "fsl,sec1.2")) {
3319                 priv->reg_deu = priv->reg + TALITOS12_DEU;
3320                 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3321                 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3322                 stride = TALITOS1_CH_STRIDE;
3323         } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3324                 priv->reg_deu = priv->reg + TALITOS10_DEU;
3325                 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3326                 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3327                 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3328                 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3329                 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3330                 stride = TALITOS1_CH_STRIDE;
3331         } else {
3332                 priv->reg_deu = priv->reg + TALITOS2_DEU;
3333                 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3334                 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3335                 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3336                 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3337                 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3338                 priv->reg_keu = priv->reg + TALITOS2_KEU;
3339                 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3340                 stride = TALITOS2_CH_STRIDE;
3341         }
3342
3343         err = talitos_probe_irq(ofdev);
3344         if (err)
3345                 goto err_out;
3346
3347         if (of_device_is_compatible(np, "fsl,sec1.0")) {
3348                 if (priv->num_channels == 1)
3349                         tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3350                                      (unsigned long)dev);
3351                 else
3352                         tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3353                                      (unsigned long)dev);
3354         } else {
3355                 if (priv->irq[1]) {
3356                         tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3357                                      (unsigned long)dev);
3358                         tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3359                                      (unsigned long)dev);
3360                 } else if (priv->num_channels == 1) {
3361                         tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3362                                      (unsigned long)dev);
3363                 } else {
3364                         tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3365                                      (unsigned long)dev);
3366                 }
3367         }
3368
3369         priv->chan = devm_kcalloc(dev,
3370                                   priv->num_channels,
3371                                   sizeof(struct talitos_channel),
3372                                   GFP_KERNEL);
3373         if (!priv->chan) {
3374                 dev_err(dev, "failed to allocate channel management space\n");
3375                 err = -ENOMEM;
3376                 goto err_out;
3377         }
3378
3379         priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3380
3381         for (i = 0; i < priv->num_channels; i++) {
3382                 priv->chan[i].reg = priv->reg + stride * (i + 1);
3383                 if (!priv->irq[1] || !(i & 1))
3384                         priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3385
3386                 spin_lock_init(&priv->chan[i].head_lock);
3387                 spin_lock_init(&priv->chan[i].tail_lock);
3388
3389                 priv->chan[i].fifo = devm_kcalloc(dev,
3390                                                 priv->fifo_len,
3391                                                 sizeof(struct talitos_request),
3392                                                 GFP_KERNEL);
3393                 if (!priv->chan[i].fifo) {
3394                         dev_err(dev, "failed to allocate request fifo %d\n", i);
3395                         err = -ENOMEM;
3396                         goto err_out;
3397                 }
3398
3399                 atomic_set(&priv->chan[i].submit_count,
3400                            -(priv->chfifo_len - 1));
3401         }
3402
3403         dma_set_mask(dev, DMA_BIT_MASK(36));
3404
3405         /* reset and initialize the h/w */
3406         err = init_device(dev);
3407         if (err) {
3408                 dev_err(dev, "failed to initialize device\n");
3409                 goto err_out;
3410         }
3411
3412         /* register the RNG, if available */
3413         if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3414                 err = talitos_register_rng(dev);
3415                 if (err) {
3416                         dev_err(dev, "failed to register hwrng: %d\n", err);
3417                         goto err_out;
3418                 } else
3419                         dev_info(dev, "hwrng\n");
3420         }
3421
3422         /* register crypto algorithms the device supports */
3423         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3424                 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3425                         struct talitos_crypto_alg *t_alg;
3426                         struct crypto_alg *alg = NULL;
3427
3428                         t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3429                         if (IS_ERR(t_alg)) {
3430                                 err = PTR_ERR(t_alg);
3431                                 if (err == -ENOTSUPP)
3432                                         continue;
3433                                 goto err_out;
3434                         }
3435
3436                         switch (t_alg->algt.type) {
3437                         case CRYPTO_ALG_TYPE_ABLKCIPHER:
3438                                 err = crypto_register_alg(
3439                                                 &t_alg->algt.alg.crypto);
3440                                 alg = &t_alg->algt.alg.crypto;
3441                                 break;
3442
3443                         case CRYPTO_ALG_TYPE_AEAD:
3444                                 err = crypto_register_aead(
3445                                         &t_alg->algt.alg.aead);
3446                                 alg = &t_alg->algt.alg.aead.base;
3447                                 break;
3448
3449                         case CRYPTO_ALG_TYPE_AHASH:
3450                                 err = crypto_register_ahash(
3451                                                 &t_alg->algt.alg.hash);
3452                                 alg = &t_alg->algt.alg.hash.halg.base;
3453                                 break;
3454                         }
3455                         if (err) {
3456                                 dev_err(dev, "%s alg registration failed\n",
3457                                         alg->cra_driver_name);
3458                                 devm_kfree(dev, t_alg);
3459                         } else
3460                                 list_add_tail(&t_alg->entry, &priv->alg_list);
3461                 }
3462         }
3463         if (!list_empty(&priv->alg_list))
3464                 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3465                          (char *)of_get_property(np, "compatible", NULL));
3466
3467         return 0;
3468
3469 err_out:
3470         talitos_remove(ofdev);
3471
3472         return err;
3473 }
3474
3475 static const struct of_device_id talitos_match[] = {
3476 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3477         {
3478                 .compatible = "fsl,sec1.0",
3479         },
3480 #endif
3481 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3482         {
3483                 .compatible = "fsl,sec2.0",
3484         },
3485 #endif
3486         {},
3487 };
3488 MODULE_DEVICE_TABLE(of, talitos_match);
3489
3490 static struct platform_driver talitos_driver = {
3491         .driver = {
3492                 .name = "talitos",
3493                 .of_match_table = talitos_match,
3494         },
3495         .probe = talitos_probe,
3496         .remove = talitos_remove,
3497 };
3498
3499 module_platform_driver(talitos_driver);
3500
3501 MODULE_LICENSE("GPL");
3502 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3503 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");