Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[platform/kernel/linux-starfive.git] / drivers / crypto / bcm / cipher.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2016 Broadcom
4  */
5
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/errno.h>
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/platform_device.h>
13 #include <linux/scatterlist.h>
14 #include <linux/crypto.h>
15 #include <linux/kthread.h>
16 #include <linux/rtnetlink.h>
17 #include <linux/sched.h>
18 #include <linux/of_address.h>
19 #include <linux/of_device.h>
20 #include <linux/io.h>
21 #include <linux/bitops.h>
22
23 #include <crypto/algapi.h>
24 #include <crypto/aead.h>
25 #include <crypto/internal/aead.h>
26 #include <crypto/aes.h>
27 #include <crypto/des.h>
28 #include <crypto/hmac.h>
29 #include <crypto/sha.h>
30 #include <crypto/md5.h>
31 #include <crypto/authenc.h>
32 #include <crypto/skcipher.h>
33 #include <crypto/hash.h>
34 #include <crypto/sha3.h>
35
36 #include "util.h"
37 #include "cipher.h"
38 #include "spu.h"
39 #include "spum.h"
40 #include "spu2.h"
41
42 /* ================= Device Structure ================== */
43
44 struct device_private iproc_priv;
45
46 /* ==================== Parameters ===================== */
47
48 int flow_debug_logging;
49 module_param(flow_debug_logging, int, 0644);
50 MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
51
52 int packet_debug_logging;
53 module_param(packet_debug_logging, int, 0644);
54 MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
55
56 int debug_logging_sleep;
57 module_param(debug_logging_sleep, int, 0644);
58 MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
59
60 /*
61  * The value of these module parameters is used to set the priority for each
62  * algo type when this driver registers algos with the kernel crypto API.
63  * To use a priority other than the default, set the priority in the insmod or
64  * modprobe. Changing the module priority after init time has no effect.
65  *
66  * The default priorities are chosen to be lower (less preferred) than ARMv8 CE
67  * algos, but more preferred than generic software algos.
68  */
69 static int cipher_pri = 150;
70 module_param(cipher_pri, int, 0644);
71 MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
72
73 static int hash_pri = 100;
74 module_param(hash_pri, int, 0644);
75 MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
76
77 static int aead_pri = 150;
78 module_param(aead_pri, int, 0644);
79 MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
80
81 /* A type 3 BCM header, expected to precede the SPU header for SPU-M.
82  * Bits 3 and 4 in the first byte encode the channel number (the dma ringset).
83  * 0x60 - ring 0
84  * 0x68 - ring 1
85  * 0x70 - ring 2
86  * 0x78 - ring 3
87  */
88 static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
89 /*
90  * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
91  * is set dynamically after reading SPU type from device tree.
92  */
93 #define BCM_HDR_LEN  iproc_priv.bcm_hdr_len
94
95 /* min and max time to sleep before retrying when mbox queue is full. usec */
96 #define MBOX_SLEEP_MIN  800
97 #define MBOX_SLEEP_MAX 1000
98
99 /**
100  * select_channel() - Select a SPU channel to handle a crypto request. Selects
101  * channel in round robin order.
102  *
103  * Return:  channel index
104  */
105 static u8 select_channel(void)
106 {
107         u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
108
109         return chan_idx % iproc_priv.spu.num_chan;
110 }
111
112 /**
113  * spu_ablkcipher_rx_sg_create() - Build up the scatterlist of buffers used to
114  * receive a SPU response message for an ablkcipher request. Includes buffers to
115  * catch SPU message headers and the response data.
116  * @mssg:       mailbox message containing the receive sg
117  * @rctx:       crypto request context
118  * @rx_frag_num: number of scatterlist elements required to hold the
119  *              SPU response message
120  * @chunksize:  Number of bytes of response data expected
121  * @stat_pad_len: Number of bytes required to pad the STAT field to
122  *              a 4-byte boundary
123  *
124  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
125  * when the request completes, whether the request is handled successfully or
126  * there is an error.
127  *
128  * Returns:
129  *   0 if successful
130  *   < 0 if an error
131  */
132 static int
133 spu_ablkcipher_rx_sg_create(struct brcm_message *mssg,
134                             struct iproc_reqctx_s *rctx,
135                             u8 rx_frag_num,
136                             unsigned int chunksize, u32 stat_pad_len)
137 {
138         struct spu_hw *spu = &iproc_priv.spu;
139         struct scatterlist *sg; /* used to build sgs in mbox message */
140         struct iproc_ctx_s *ctx = rctx->ctx;
141         u32 datalen;            /* Number of bytes of response data expected */
142
143         mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
144                                 rctx->gfp);
145         if (!mssg->spu.dst)
146                 return -ENOMEM;
147
148         sg = mssg->spu.dst;
149         sg_init_table(sg, rx_frag_num);
150         /* Space for SPU message header */
151         sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
152
153         /* If XTS tweak in payload, add buffer to receive encrypted tweak */
154         if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
155             spu->spu_xts_tweak_in_payload())
156                 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
157                            SPU_XTS_TWEAK_SIZE);
158
159         /* Copy in each dst sg entry from request, up to chunksize */
160         datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
161                                  rctx->dst_nents, chunksize);
162         if (datalen < chunksize) {
163                 pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
164                        __func__, chunksize, datalen);
165                 return -EFAULT;
166         }
167
168         if (ctx->cipher.alg == CIPHER_ALG_RC4)
169                 /* Add buffer to catch 260-byte SUPDT field for RC4 */
170                 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN);
171
172         if (stat_pad_len)
173                 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
174
175         memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
176         sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
177
178         return 0;
179 }
180
181 /**
182  * spu_ablkcipher_tx_sg_create() - Build up the scatterlist of buffers used to
183  * send a SPU request message for an ablkcipher request. Includes SPU message
184  * headers and the request data.
185  * @mssg:       mailbox message containing the transmit sg
186  * @rctx:       crypto request context
187  * @tx_frag_num: number of scatterlist elements required to construct the
188  *              SPU request message
189  * @chunksize:  Number of bytes of request data
190  * @pad_len:    Number of pad bytes
191  *
192  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
193  * when the request completes, whether the request is handled successfully or
194  * there is an error.
195  *
196  * Returns:
197  *   0 if successful
198  *   < 0 if an error
199  */
200 static int
201 spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
202                             struct iproc_reqctx_s *rctx,
203                             u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
204 {
205         struct spu_hw *spu = &iproc_priv.spu;
206         struct scatterlist *sg; /* used to build sgs in mbox message */
207         struct iproc_ctx_s *ctx = rctx->ctx;
208         u32 datalen;            /* Number of bytes of response data expected */
209         u32 stat_len;
210
211         mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
212                                 rctx->gfp);
213         if (unlikely(!mssg->spu.src))
214                 return -ENOMEM;
215
216         sg = mssg->spu.src;
217         sg_init_table(sg, tx_frag_num);
218
219         sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
220                    BCM_HDR_LEN + ctx->spu_req_hdr_len);
221
222         /* if XTS tweak in payload, copy from IV (where crypto API puts it) */
223         if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
224             spu->spu_xts_tweak_in_payload())
225                 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
226
227         /* Copy in each src sg entry from request, up to chunksize */
228         datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
229                                  rctx->src_nents, chunksize);
230         if (unlikely(datalen < chunksize)) {
231                 pr_err("%s(): failed to copy src sg to mbox msg",
232                        __func__);
233                 return -EFAULT;
234         }
235
236         if (pad_len)
237                 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
238
239         stat_len = spu->spu_tx_status_len();
240         if (stat_len) {
241                 memset(rctx->msg_buf.tx_stat, 0, stat_len);
242                 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
243         }
244         return 0;
245 }
246
247 static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
248                                 u8 chan_idx)
249 {
250         int err;
251         int retry_cnt = 0;
252         struct device *dev = &(iproc_priv.pdev->dev);
253
254         err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
255         if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
256                 while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
257                         /*
258                          * Mailbox queue is full. Since MAY_SLEEP is set, assume
259                          * not in atomic context and we can wait and try again.
260                          */
261                         retry_cnt++;
262                         usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
263                         err = mbox_send_message(iproc_priv.mbox[chan_idx],
264                                                 mssg);
265                         atomic_inc(&iproc_priv.mb_no_spc);
266                 }
267         }
268         if (err < 0) {
269                 atomic_inc(&iproc_priv.mb_send_fail);
270                 return err;
271         }
272
273         /* Check error returned by mailbox controller */
274         err = mssg->error;
275         if (unlikely(err < 0)) {
276                 dev_err(dev, "message error %d", err);
277                 /* Signal txdone for mailbox channel */
278         }
279
280         /* Signal txdone for mailbox channel */
281         mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
282         return err;
283 }
284
285 /**
286  * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
287  * a single SPU request message, starting at the current position in the request
288  * data.
289  * @rctx:       Crypto request context
290  *
291  * This may be called on the crypto API thread, or, when a request is so large
292  * it must be broken into multiple SPU messages, on the thread used to invoke
293  * the response callback. When requests are broken into multiple SPU
294  * messages, we assume subsequent messages depend on previous results, and
295  * thus always wait for previous results before submitting the next message.
296  * Because requests are submitted in lock step like this, there is no need
297  * to synchronize access to request data structures.
298  *
299  * Return: -EINPROGRESS: request has been accepted and result will be returned
300  *                       asynchronously
301  *         Any other value indicates an error
302  */
303 static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
304 {
305         struct spu_hw *spu = &iproc_priv.spu;
306         struct crypto_async_request *areq = rctx->parent;
307         struct ablkcipher_request *req =
308             container_of(areq, struct ablkcipher_request, base);
309         struct iproc_ctx_s *ctx = rctx->ctx;
310         struct spu_cipher_parms cipher_parms;
311         int err = 0;
312         unsigned int chunksize = 0;     /* Num bytes of request to submit */
313         int remaining = 0;      /* Bytes of request still to process */
314         int chunk_start;        /* Beginning of data for current SPU msg */
315
316         /* IV or ctr value to use in this SPU msg */
317         u8 local_iv_ctr[MAX_IV_SIZE];
318         u32 stat_pad_len;       /* num bytes to align status field */
319         u32 pad_len;            /* total length of all padding */
320         bool update_key = false;
321         struct brcm_message *mssg;      /* mailbox message */
322
323         /* number of entries in src and dst sg in mailbox message. */
324         u8 rx_frag_num = 2;     /* response header and STATUS */
325         u8 tx_frag_num = 1;     /* request header */
326
327         flow_log("%s\n", __func__);
328
329         cipher_parms.alg = ctx->cipher.alg;
330         cipher_parms.mode = ctx->cipher.mode;
331         cipher_parms.type = ctx->cipher_type;
332         cipher_parms.key_len = ctx->enckeylen;
333         cipher_parms.key_buf = ctx->enckey;
334         cipher_parms.iv_buf = local_iv_ctr;
335         cipher_parms.iv_len = rctx->iv_ctr_len;
336
337         mssg = &rctx->mb_mssg;
338         chunk_start = rctx->src_sent;
339         remaining = rctx->total_todo - chunk_start;
340
341         /* determine the chunk we are breaking off and update the indexes */
342         if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
343             (remaining > ctx->max_payload))
344                 chunksize = ctx->max_payload;
345         else
346                 chunksize = remaining;
347
348         rctx->src_sent += chunksize;
349         rctx->total_sent = rctx->src_sent;
350
351         /* Count number of sg entries to be included in this request */
352         rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
353         rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
354
355         if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
356             rctx->is_encrypt && chunk_start)
357                 /*
358                  * Encrypting non-first first chunk. Copy last block of
359                  * previous result to IV for this chunk.
360                  */
361                 sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
362                                     rctx->iv_ctr_len,
363                                     chunk_start - rctx->iv_ctr_len);
364
365         if (rctx->iv_ctr_len) {
366                 /* get our local copy of the iv */
367                 __builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
368                                  rctx->iv_ctr_len);
369
370                 /* generate the next IV if possible */
371                 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
372                     !rctx->is_encrypt) {
373                         /*
374                          * CBC Decrypt: next IV is the last ciphertext block in
375                          * this chunk
376                          */
377                         sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
378                                             rctx->iv_ctr_len,
379                                             rctx->src_sent - rctx->iv_ctr_len);
380                 } else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
381                         /*
382                          * The SPU hardware increments the counter once for
383                          * each AES block of 16 bytes. So update the counter
384                          * for the next chunk, if there is one. Note that for
385                          * this chunk, the counter has already been copied to
386                          * local_iv_ctr. We can assume a block size of 16,
387                          * because we only support CTR mode for AES, not for
388                          * any other cipher alg.
389                          */
390                         add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
391                 }
392         }
393
394         if (ctx->cipher.alg == CIPHER_ALG_RC4) {
395                 rx_frag_num++;
396                 if (chunk_start) {
397                         /*
398                          * for non-first RC4 chunks, use SUPDT from previous
399                          * response as key for this chunk.
400                          */
401                         cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak;
402                         update_key = true;
403                         cipher_parms.type = CIPHER_TYPE_UPDT;
404                 } else if (!rctx->is_encrypt) {
405                         /*
406                          * First RC4 chunk. For decrypt, key in pre-built msg
407                          * header may have been changed if encrypt required
408                          * multiple chunks. So revert the key to the
409                          * ctx->enckey value.
410                          */
411                         update_key = true;
412                         cipher_parms.type = CIPHER_TYPE_INIT;
413                 }
414         }
415
416         if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
417                 flow_log("max_payload infinite\n");
418         else
419                 flow_log("max_payload %u\n", ctx->max_payload);
420
421         flow_log("sent:%u start:%u remains:%u size:%u\n",
422                  rctx->src_sent, chunk_start, remaining, chunksize);
423
424         /* Copy SPU header template created at setkey time */
425         memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
426                sizeof(rctx->msg_buf.bcm_spu_req_hdr));
427
428         /*
429          * Pass SUPDT field as key. Key field in finish() call is only used
430          * when update_key has been set above for RC4. Will be ignored in
431          * all other cases.
432          */
433         spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
434                                    ctx->spu_req_hdr_len, !(rctx->is_encrypt),
435                                    &cipher_parms, update_key, chunksize);
436
437         atomic64_add(chunksize, &iproc_priv.bytes_out);
438
439         stat_pad_len = spu->spu_wordalign_padlen(chunksize);
440         if (stat_pad_len)
441                 rx_frag_num++;
442         pad_len = stat_pad_len;
443         if (pad_len) {
444                 tx_frag_num++;
445                 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
446                                      0, ctx->auth.alg, ctx->auth.mode,
447                                      rctx->total_sent, stat_pad_len);
448         }
449
450         spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
451                               ctx->spu_req_hdr_len);
452         packet_log("payload:\n");
453         dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
454         packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
455
456         /*
457          * Build mailbox message containing SPU request msg and rx buffers
458          * to catch response message
459          */
460         memset(mssg, 0, sizeof(*mssg));
461         mssg->type = BRCM_MESSAGE_SPU;
462         mssg->ctx = rctx;       /* Will be returned in response */
463
464         /* Create rx scatterlist to catch result */
465         rx_frag_num += rctx->dst_nents;
466
467         if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
468             spu->spu_xts_tweak_in_payload())
469                 rx_frag_num++;  /* extra sg to insert tweak */
470
471         err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
472                                           stat_pad_len);
473         if (err)
474                 return err;
475
476         /* Create tx scatterlist containing SPU request message */
477         tx_frag_num += rctx->src_nents;
478         if (spu->spu_tx_status_len())
479                 tx_frag_num++;
480
481         if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
482             spu->spu_xts_tweak_in_payload())
483                 tx_frag_num++;  /* extra sg to insert tweak */
484
485         err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
486                                           pad_len);
487         if (err)
488                 return err;
489
490         err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
491         if (unlikely(err < 0))
492                 return err;
493
494         return -EINPROGRESS;
495 }
496
497 /**
498  * handle_ablkcipher_resp() - Process a block cipher SPU response. Updates the
499  * total received count for the request and updates global stats.
500  * @rctx:       Crypto request context
501  */
502 static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx)
503 {
504         struct spu_hw *spu = &iproc_priv.spu;
505 #ifdef DEBUG
506         struct crypto_async_request *areq = rctx->parent;
507         struct ablkcipher_request *req = ablkcipher_request_cast(areq);
508 #endif
509         struct iproc_ctx_s *ctx = rctx->ctx;
510         u32 payload_len;
511
512         /* See how much data was returned */
513         payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
514
515         /*
516          * In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the
517          * encrypted tweak ("i") value; we don't count those.
518          */
519         if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
520             spu->spu_xts_tweak_in_payload() &&
521             (payload_len >= SPU_XTS_TWEAK_SIZE))
522                 payload_len -= SPU_XTS_TWEAK_SIZE;
523
524         atomic64_add(payload_len, &iproc_priv.bytes_in);
525
526         flow_log("%s() offset: %u, bd_len: %u BD:\n",
527                  __func__, rctx->total_received, payload_len);
528
529         dump_sg(req->dst, rctx->total_received, payload_len);
530         if (ctx->cipher.alg == CIPHER_ALG_RC4)
531                 packet_dump("  supdt ", rctx->msg_buf.c.supdt_tweak,
532                             SPU_SUPDT_LEN);
533
534         rctx->total_received += payload_len;
535         if (rctx->total_received == rctx->total_todo) {
536                 atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
537                 atomic_inc(
538                    &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
539         }
540 }
541
542 /**
543  * spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to
544  * receive a SPU response message for an ahash request.
545  * @mssg:       mailbox message containing the receive sg
546  * @rctx:       crypto request context
547  * @rx_frag_num: number of scatterlist elements required to hold the
548  *              SPU response message
549  * @digestsize: length of hash digest, in bytes
550  * @stat_pad_len: Number of bytes required to pad the STAT field to
551  *              a 4-byte boundary
552  *
553  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
554  * when the request completes, whether the request is handled successfully or
555  * there is an error.
556  *
557  * Return:
558  *   0 if successful
559  *   < 0 if an error
560  */
561 static int
562 spu_ahash_rx_sg_create(struct brcm_message *mssg,
563                        struct iproc_reqctx_s *rctx,
564                        u8 rx_frag_num, unsigned int digestsize,
565                        u32 stat_pad_len)
566 {
567         struct spu_hw *spu = &iproc_priv.spu;
568         struct scatterlist *sg; /* used to build sgs in mbox message */
569         struct iproc_ctx_s *ctx = rctx->ctx;
570
571         mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
572                                 rctx->gfp);
573         if (!mssg->spu.dst)
574                 return -ENOMEM;
575
576         sg = mssg->spu.dst;
577         sg_init_table(sg, rx_frag_num);
578         /* Space for SPU message header */
579         sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
580
581         /* Space for digest */
582         sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
583
584         if (stat_pad_len)
585                 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
586
587         memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
588         sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
589         return 0;
590 }
591
592 /**
593  * spu_ahash_tx_sg_create() -  Build up the scatterlist of buffers used to send
594  * a SPU request message for an ahash request. Includes SPU message headers and
595  * the request data.
596  * @mssg:       mailbox message containing the transmit sg
597  * @rctx:       crypto request context
598  * @tx_frag_num: number of scatterlist elements required to construct the
599  *              SPU request message
600  * @spu_hdr_len: length in bytes of SPU message header
601  * @hash_carry_len: Number of bytes of data carried over from previous req
602  * @new_data_len: Number of bytes of new request data
603  * @pad_len:    Number of pad bytes
604  *
605  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
606  * when the request completes, whether the request is handled successfully or
607  * there is an error.
608  *
609  * Return:
610  *   0 if successful
611  *   < 0 if an error
612  */
613 static int
614 spu_ahash_tx_sg_create(struct brcm_message *mssg,
615                        struct iproc_reqctx_s *rctx,
616                        u8 tx_frag_num,
617                        u32 spu_hdr_len,
618                        unsigned int hash_carry_len,
619                        unsigned int new_data_len, u32 pad_len)
620 {
621         struct spu_hw *spu = &iproc_priv.spu;
622         struct scatterlist *sg; /* used to build sgs in mbox message */
623         u32 datalen;            /* Number of bytes of response data expected */
624         u32 stat_len;
625
626         mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
627                                 rctx->gfp);
628         if (!mssg->spu.src)
629                 return -ENOMEM;
630
631         sg = mssg->spu.src;
632         sg_init_table(sg, tx_frag_num);
633
634         sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
635                    BCM_HDR_LEN + spu_hdr_len);
636
637         if (hash_carry_len)
638                 sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
639
640         if (new_data_len) {
641                 /* Copy in each src sg entry from request, up to chunksize */
642                 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
643                                          rctx->src_nents, new_data_len);
644                 if (datalen < new_data_len) {
645                         pr_err("%s(): failed to copy src sg to mbox msg",
646                                __func__);
647                         return -EFAULT;
648                 }
649         }
650
651         if (pad_len)
652                 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
653
654         stat_len = spu->spu_tx_status_len();
655         if (stat_len) {
656                 memset(rctx->msg_buf.tx_stat, 0, stat_len);
657                 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
658         }
659
660         return 0;
661 }
662
663 /**
664  * handle_ahash_req() - Process an asynchronous hash request from the crypto
665  * API.
666  * @rctx:  Crypto request context
667  *
668  * Builds a SPU request message embedded in a mailbox message and submits the
669  * mailbox message on a selected mailbox channel. The SPU request message is
670  * constructed as a scatterlist, including entries from the crypto API's
671  * src scatterlist to avoid copying the data to be hashed. This function is
672  * called either on the thread from the crypto API, or, in the case that the
673  * crypto API request is too large to fit in a single SPU request message,
674  * on the thread that invokes the receive callback with a response message.
675  * Because some operations require the response from one chunk before the next
676  * chunk can be submitted, we always wait for the response for the previous
677  * chunk before submitting the next chunk. Because requests are submitted in
678  * lock step like this, there is no need to synchronize access to request data
679  * structures.
680  *
681  * Return:
682  *   -EINPROGRESS: request has been submitted to SPU and response will be
683  *                 returned asynchronously
684  *   -EAGAIN:      non-final request included a small amount of data, which for
685  *                 efficiency we did not submit to the SPU, but instead stored
686  *                 to be submitted to the SPU with the next part of the request
687  *   other:        an error code
688  */
689 static int handle_ahash_req(struct iproc_reqctx_s *rctx)
690 {
691         struct spu_hw *spu = &iproc_priv.spu;
692         struct crypto_async_request *areq = rctx->parent;
693         struct ahash_request *req = ahash_request_cast(areq);
694         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
695         struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
696         unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
697         struct iproc_ctx_s *ctx = rctx->ctx;
698
699         /* number of bytes still to be hashed in this req */
700         unsigned int nbytes_to_hash = 0;
701         int err = 0;
702         unsigned int chunksize = 0;     /* length of hash carry + new data */
703         /*
704          * length of new data, not from hash carry, to be submitted in
705          * this hw request
706          */
707         unsigned int new_data_len;
708
709         unsigned int __maybe_unused chunk_start = 0;
710         u32 db_size;     /* Length of data field, incl gcm and hash padding */
711         int pad_len = 0; /* total pad len, including gcm, hash, stat padding */
712         u32 data_pad_len = 0;   /* length of GCM/CCM padding */
713         u32 stat_pad_len = 0;   /* length of padding to align STATUS word */
714         struct brcm_message *mssg;      /* mailbox message */
715         struct spu_request_opts req_opts;
716         struct spu_cipher_parms cipher_parms;
717         struct spu_hash_parms hash_parms;
718         struct spu_aead_parms aead_parms;
719         unsigned int local_nbuf;
720         u32 spu_hdr_len;
721         unsigned int digestsize;
722         u16 rem = 0;
723
724         /*
725          * number of entries in src and dst sg. Always includes SPU msg header.
726          * rx always includes a buffer to catch digest and STATUS.
727          */
728         u8 rx_frag_num = 3;
729         u8 tx_frag_num = 1;
730
731         flow_log("total_todo %u, total_sent %u\n",
732                  rctx->total_todo, rctx->total_sent);
733
734         memset(&req_opts, 0, sizeof(req_opts));
735         memset(&cipher_parms, 0, sizeof(cipher_parms));
736         memset(&hash_parms, 0, sizeof(hash_parms));
737         memset(&aead_parms, 0, sizeof(aead_parms));
738
739         req_opts.bd_suppress = true;
740         hash_parms.alg = ctx->auth.alg;
741         hash_parms.mode = ctx->auth.mode;
742         hash_parms.type = HASH_TYPE_NONE;
743         hash_parms.key_buf = (u8 *)ctx->authkey;
744         hash_parms.key_len = ctx->authkeylen;
745
746         /*
747          * For hash algorithms below assignment looks bit odd but
748          * it's needed for AES-XCBC and AES-CMAC hash algorithms
749          * to differentiate between 128, 192, 256 bit key values.
750          * Based on the key values, hash algorithm is selected.
751          * For example for 128 bit key, hash algorithm is AES-128.
752          */
753         cipher_parms.type = ctx->cipher_type;
754
755         mssg = &rctx->mb_mssg;
756         chunk_start = rctx->src_sent;
757
758         /*
759          * Compute the amount remaining to hash. This may include data
760          * carried over from previous requests.
761          */
762         nbytes_to_hash = rctx->total_todo - rctx->total_sent;
763         chunksize = nbytes_to_hash;
764         if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
765             (chunksize > ctx->max_payload))
766                 chunksize = ctx->max_payload;
767
768         /*
769          * If this is not a final request and the request data is not a multiple
770          * of a full block, then simply park the extra data and prefix it to the
771          * data for the next request.
772          */
773         if (!rctx->is_final) {
774                 u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
775                 u16 new_len;  /* len of data to add to hash carry */
776
777                 rem = chunksize % blocksize;   /* remainder */
778                 if (rem) {
779                         /* chunksize not a multiple of blocksize */
780                         chunksize -= rem;
781                         if (chunksize == 0) {
782                                 /* Don't have a full block to submit to hw */
783                                 new_len = rem - rctx->hash_carry_len;
784                                 sg_copy_part_to_buf(req->src, dest, new_len,
785                                                     rctx->src_sent);
786                                 rctx->hash_carry_len = rem;
787                                 flow_log("Exiting with hash carry len: %u\n",
788                                          rctx->hash_carry_len);
789                                 packet_dump("  buf: ",
790                                             rctx->hash_carry,
791                                             rctx->hash_carry_len);
792                                 return -EAGAIN;
793                         }
794                 }
795         }
796
797         /* if we have hash carry, then prefix it to the data in this request */
798         local_nbuf = rctx->hash_carry_len;
799         rctx->hash_carry_len = 0;
800         if (local_nbuf)
801                 tx_frag_num++;
802         new_data_len = chunksize - local_nbuf;
803
804         /* Count number of sg entries to be used in this request */
805         rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
806                                        new_data_len);
807
808         /* AES hashing keeps key size in type field, so need to copy it here */
809         if (hash_parms.alg == HASH_ALG_AES)
810                 hash_parms.type = (enum hash_type)cipher_parms.type;
811         else
812                 hash_parms.type = spu->spu_hash_type(rctx->total_sent);
813
814         digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
815                                           hash_parms.type);
816         hash_parms.digestsize = digestsize;
817
818         /* update the indexes */
819         rctx->total_sent += chunksize;
820         /* if you sent a prebuf then that wasn't from this req->src */
821         rctx->src_sent += new_data_len;
822
823         if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
824                 hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
825                                                            hash_parms.mode,
826                                                            chunksize,
827                                                            blocksize);
828
829         /*
830          * If a non-first chunk, then include the digest returned from the
831          * previous chunk so that hw can add to it (except for AES types).
832          */
833         if ((hash_parms.type == HASH_TYPE_UPDT) &&
834             (hash_parms.alg != HASH_ALG_AES)) {
835                 hash_parms.key_buf = rctx->incr_hash;
836                 hash_parms.key_len = digestsize;
837         }
838
839         atomic64_add(chunksize, &iproc_priv.bytes_out);
840
841         flow_log("%s() final: %u nbuf: %u ",
842                  __func__, rctx->is_final, local_nbuf);
843
844         if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
845                 flow_log("max_payload infinite\n");
846         else
847                 flow_log("max_payload %u\n", ctx->max_payload);
848
849         flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
850
851         /* Prepend SPU header with type 3 BCM header */
852         memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
853
854         hash_parms.prebuf_len = local_nbuf;
855         spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
856                                               BCM_HDR_LEN,
857                                               &req_opts, &cipher_parms,
858                                               &hash_parms, &aead_parms,
859                                               new_data_len);
860
861         if (spu_hdr_len == 0) {
862                 pr_err("Failed to create SPU request header\n");
863                 return -EFAULT;
864         }
865
866         /*
867          * Determine total length of padding required. Put all padding in one
868          * buffer.
869          */
870         data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
871         db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
872                                    0, 0, hash_parms.pad_len);
873         if (spu->spu_tx_status_len())
874                 stat_pad_len = spu->spu_wordalign_padlen(db_size);
875         if (stat_pad_len)
876                 rx_frag_num++;
877         pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
878         if (pad_len) {
879                 tx_frag_num++;
880                 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
881                                      hash_parms.pad_len, ctx->auth.alg,
882                                      ctx->auth.mode, rctx->total_sent,
883                                      stat_pad_len);
884         }
885
886         spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
887                               spu_hdr_len);
888         packet_dump("    prebuf: ", rctx->hash_carry, local_nbuf);
889         flow_log("Data:\n");
890         dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
891         packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
892
893         /*
894          * Build mailbox message containing SPU request msg and rx buffers
895          * to catch response message
896          */
897         memset(mssg, 0, sizeof(*mssg));
898         mssg->type = BRCM_MESSAGE_SPU;
899         mssg->ctx = rctx;       /* Will be returned in response */
900
901         /* Create rx scatterlist to catch result */
902         err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
903                                      stat_pad_len);
904         if (err)
905                 return err;
906
907         /* Create tx scatterlist containing SPU request message */
908         tx_frag_num += rctx->src_nents;
909         if (spu->spu_tx_status_len())
910                 tx_frag_num++;
911         err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
912                                      local_nbuf, new_data_len, pad_len);
913         if (err)
914                 return err;
915
916         err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
917         if (unlikely(err < 0))
918                 return err;
919
920         return -EINPROGRESS;
921 }
922
923 /**
924  * spu_hmac_outer_hash() - Request synchonous software compute of the outer hash
925  * for an HMAC request.
926  * @req:  The HMAC request from the crypto API
927  * @ctx:  The session context
928  *
929  * Return: 0 if synchronous hash operation successful
930  *         -EINVAL if the hash algo is unrecognized
931  *         any other value indicates an error
932  */
933 static int spu_hmac_outer_hash(struct ahash_request *req,
934                                struct iproc_ctx_s *ctx)
935 {
936         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
937         unsigned int blocksize =
938                 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
939         int rc;
940
941         switch (ctx->auth.alg) {
942         case HASH_ALG_MD5:
943                 rc = do_shash("md5", req->result, ctx->opad, blocksize,
944                               req->result, ctx->digestsize, NULL, 0);
945                 break;
946         case HASH_ALG_SHA1:
947                 rc = do_shash("sha1", req->result, ctx->opad, blocksize,
948                               req->result, ctx->digestsize, NULL, 0);
949                 break;
950         case HASH_ALG_SHA224:
951                 rc = do_shash("sha224", req->result, ctx->opad, blocksize,
952                               req->result, ctx->digestsize, NULL, 0);
953                 break;
954         case HASH_ALG_SHA256:
955                 rc = do_shash("sha256", req->result, ctx->opad, blocksize,
956                               req->result, ctx->digestsize, NULL, 0);
957                 break;
958         case HASH_ALG_SHA384:
959                 rc = do_shash("sha384", req->result, ctx->opad, blocksize,
960                               req->result, ctx->digestsize, NULL, 0);
961                 break;
962         case HASH_ALG_SHA512:
963                 rc = do_shash("sha512", req->result, ctx->opad, blocksize,
964                               req->result, ctx->digestsize, NULL, 0);
965                 break;
966         default:
967                 pr_err("%s() Error : unknown hmac type\n", __func__);
968                 rc = -EINVAL;
969         }
970         return rc;
971 }
972
973 /**
974  * ahash_req_done() - Process a hash result from the SPU hardware.
975  * @rctx: Crypto request context
976  *
977  * Return: 0 if successful
978  *         < 0 if an error
979  */
980 static int ahash_req_done(struct iproc_reqctx_s *rctx)
981 {
982         struct spu_hw *spu = &iproc_priv.spu;
983         struct crypto_async_request *areq = rctx->parent;
984         struct ahash_request *req = ahash_request_cast(areq);
985         struct iproc_ctx_s *ctx = rctx->ctx;
986         int err;
987
988         memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
989
990         if (spu->spu_type == SPU_TYPE_SPUM) {
991                 /* byte swap the output from the UPDT function to network byte
992                  * order
993                  */
994                 if (ctx->auth.alg == HASH_ALG_MD5) {
995                         __swab32s((u32 *)req->result);
996                         __swab32s(((u32 *)req->result) + 1);
997                         __swab32s(((u32 *)req->result) + 2);
998                         __swab32s(((u32 *)req->result) + 3);
999                         __swab32s(((u32 *)req->result) + 4);
1000                 }
1001         }
1002
1003         flow_dump("  digest ", req->result, ctx->digestsize);
1004
1005         /* if this an HMAC then do the outer hash */
1006         if (rctx->is_sw_hmac) {
1007                 err = spu_hmac_outer_hash(req, ctx);
1008                 if (err < 0)
1009                         return err;
1010                 flow_dump("  hmac: ", req->result, ctx->digestsize);
1011         }
1012
1013         if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
1014                 atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
1015                 atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
1016         } else {
1017                 atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
1018                 atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
1019         }
1020
1021         return 0;
1022 }
1023
1024 /**
1025  * handle_ahash_resp() - Process a SPU response message for a hash request.
1026  * Checks if the entire crypto API request has been processed, and if so,
1027  * invokes post processing on the result.
1028  * @rctx: Crypto request context
1029  */
1030 static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
1031 {
1032         struct iproc_ctx_s *ctx = rctx->ctx;
1033 #ifdef DEBUG
1034         struct crypto_async_request *areq = rctx->parent;
1035         struct ahash_request *req = ahash_request_cast(areq);
1036         struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1037         unsigned int blocksize =
1038                 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
1039 #endif
1040         /*
1041          * Save hash to use as input to next op if incremental. Might be copying
1042          * too much, but that's easier than figuring out actual digest size here
1043          */
1044         memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1045
1046         flow_log("%s() blocksize:%u digestsize:%u\n",
1047                  __func__, blocksize, ctx->digestsize);
1048
1049         atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
1050
1051         if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1052                 ahash_req_done(rctx);
1053 }
1054
1055 /**
1056  * spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive
1057  * a SPU response message for an AEAD request. Includes buffers to catch SPU
1058  * message headers and the response data.
1059  * @mssg:       mailbox message containing the receive sg
1060  * @rctx:       crypto request context
1061  * @rx_frag_num: number of scatterlist elements required to hold the
1062  *              SPU response message
1063  * @assoc_len:  Length of associated data included in the crypto request
1064  * @ret_iv_len: Length of IV returned in response
1065  * @resp_len:   Number of bytes of response data expected to be written to
1066  *              dst buffer from crypto API
1067  * @digestsize: Length of hash digest, in bytes
1068  * @stat_pad_len: Number of bytes required to pad the STAT field to
1069  *              a 4-byte boundary
1070  *
1071  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1072  * when the request completes, whether the request is handled successfully or
1073  * there is an error.
1074  *
1075  * Returns:
1076  *   0 if successful
1077  *   < 0 if an error
1078  */
1079 static int spu_aead_rx_sg_create(struct brcm_message *mssg,
1080                                  struct aead_request *req,
1081                                  struct iproc_reqctx_s *rctx,
1082                                  u8 rx_frag_num,
1083                                  unsigned int assoc_len,
1084                                  u32 ret_iv_len, unsigned int resp_len,
1085                                  unsigned int digestsize, u32 stat_pad_len)
1086 {
1087         struct spu_hw *spu = &iproc_priv.spu;
1088         struct scatterlist *sg; /* used to build sgs in mbox message */
1089         struct iproc_ctx_s *ctx = rctx->ctx;
1090         u32 datalen;            /* Number of bytes of response data expected */
1091         u32 assoc_buf_len;
1092         u8 data_padlen = 0;
1093
1094         if (ctx->is_rfc4543) {
1095                 /* RFC4543: only pad after data, not after AAD */
1096                 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1097                                                           assoc_len + resp_len);
1098                 assoc_buf_len = assoc_len;
1099         } else {
1100                 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1101                                                           resp_len);
1102                 assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
1103                                                 assoc_len, ret_iv_len,
1104                                                 rctx->is_encrypt);
1105         }
1106
1107         if (ctx->cipher.mode == CIPHER_MODE_CCM)
1108                 /* ICV (after data) must be in the next 32-bit word for CCM */
1109                 data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
1110                                                          resp_len +
1111                                                          data_padlen);
1112
1113         if (data_padlen)
1114                 /* have to catch gcm pad in separate buffer */
1115                 rx_frag_num++;
1116
1117         mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
1118                                 rctx->gfp);
1119         if (!mssg->spu.dst)
1120                 return -ENOMEM;
1121
1122         sg = mssg->spu.dst;
1123         sg_init_table(sg, rx_frag_num);
1124
1125         /* Space for SPU message header */
1126         sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1127
1128         if (assoc_buf_len) {
1129                 /*
1130                  * Don't write directly to req->dst, because SPU may pad the
1131                  * assoc data in the response
1132                  */
1133                 memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1134                 sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1135         }
1136
1137         if (resp_len) {
1138                 /*
1139                  * Copy in each dst sg entry from request, up to chunksize.
1140                  * dst sg catches just the data. digest caught in separate buf.
1141                  */
1142                 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1143                                          rctx->dst_nents, resp_len);
1144                 if (datalen < (resp_len)) {
1145                         pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1146                                __func__, resp_len, datalen);
1147                         return -EFAULT;
1148                 }
1149         }
1150
1151         /* If GCM/CCM data is padded, catch padding in separate buffer */
1152         if (data_padlen) {
1153                 memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1154                 sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1155         }
1156
1157         /* Always catch ICV in separate buffer */
1158         sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1159
1160         flow_log("stat_pad_len %u\n", stat_pad_len);
1161         if (stat_pad_len) {
1162                 memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1163                 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1164         }
1165
1166         memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1167         sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1168
1169         return 0;
1170 }
1171
1172 /**
1173  * spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a
1174  * SPU request message for an AEAD request. Includes SPU message headers and the
1175  * request data.
1176  * @mssg:       mailbox message containing the transmit sg
1177  * @rctx:       crypto request context
1178  * @tx_frag_num: number of scatterlist elements required to construct the
1179  *              SPU request message
1180  * @spu_hdr_len: length of SPU message header in bytes
1181  * @assoc:      crypto API associated data scatterlist
1182  * @assoc_len:  length of associated data
1183  * @assoc_nents: number of scatterlist entries containing assoc data
1184  * @aead_iv_len: length of AEAD IV, if included
1185  * @chunksize:  Number of bytes of request data
1186  * @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM.
1187  * @pad_len:    Number of pad bytes
1188  * @incl_icv:   If true, write separate ICV buffer after data and
1189  *              any padding
1190  *
1191  * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1192  * when the request completes, whether the request is handled successfully or
1193  * there is an error.
1194  *
1195  * Return:
1196  *   0 if successful
1197  *   < 0 if an error
1198  */
1199 static int spu_aead_tx_sg_create(struct brcm_message *mssg,
1200                                  struct iproc_reqctx_s *rctx,
1201                                  u8 tx_frag_num,
1202                                  u32 spu_hdr_len,
1203                                  struct scatterlist *assoc,
1204                                  unsigned int assoc_len,
1205                                  int assoc_nents,
1206                                  unsigned int aead_iv_len,
1207                                  unsigned int chunksize,
1208                                  u32 aad_pad_len, u32 pad_len, bool incl_icv)
1209 {
1210         struct spu_hw *spu = &iproc_priv.spu;
1211         struct scatterlist *sg; /* used to build sgs in mbox message */
1212         struct scatterlist *assoc_sg = assoc;
1213         struct iproc_ctx_s *ctx = rctx->ctx;
1214         u32 datalen;            /* Number of bytes of data to write */
1215         u32 written;            /* Number of bytes of data written */
1216         u32 assoc_offset = 0;
1217         u32 stat_len;
1218
1219         mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
1220                                 rctx->gfp);
1221         if (!mssg->spu.src)
1222                 return -ENOMEM;
1223
1224         sg = mssg->spu.src;
1225         sg_init_table(sg, tx_frag_num);
1226
1227         sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1228                    BCM_HDR_LEN + spu_hdr_len);
1229
1230         if (assoc_len) {
1231                 /* Copy in each associated data sg entry from request */
1232                 written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
1233                                          assoc_nents, assoc_len);
1234                 if (written < assoc_len) {
1235                         pr_err("%s(): failed to copy assoc sg to mbox msg",
1236                                __func__);
1237                         return -EFAULT;
1238                 }
1239         }
1240
1241         if (aead_iv_len)
1242                 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1243
1244         if (aad_pad_len) {
1245                 memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1246                 sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1247         }
1248
1249         datalen = chunksize;
1250         if ((chunksize > ctx->digestsize) && incl_icv)
1251                 datalen -= ctx->digestsize;
1252         if (datalen) {
1253                 /* For aead, a single msg should consume the entire src sg */
1254                 written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1255                                          rctx->src_nents, datalen);
1256                 if (written < datalen) {
1257                         pr_err("%s(): failed to copy src sg to mbox msg",
1258                                __func__);
1259                         return -EFAULT;
1260                 }
1261         }
1262
1263         if (pad_len) {
1264                 memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1265                 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1266         }
1267
1268         if (incl_icv)
1269                 sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1270
1271         stat_len = spu->spu_tx_status_len();
1272         if (stat_len) {
1273                 memset(rctx->msg_buf.tx_stat, 0, stat_len);
1274                 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1275         }
1276         return 0;
1277 }
1278
1279 /**
1280  * handle_aead_req() - Submit a SPU request message for the next chunk of the
1281  * current AEAD request.
1282  * @rctx:  Crypto request context
1283  *
1284  * Unlike other operation types, we assume the length of the request fits in
1285  * a single SPU request message. aead_enqueue() makes sure this is true.
1286  * Comments for other op types regarding threads applies here as well.
1287  *
1288  * Unlike incremental hash ops, where the spu returns the entire hash for
1289  * truncated algs like sha-224, the SPU returns just the truncated hash in
1290  * response to aead requests. So digestsize is always ctx->digestsize here.
1291  *
1292  * Return: -EINPROGRESS: crypto request has been accepted and result will be
1293  *                       returned asynchronously
1294  *         Any other value indicates an error
1295  */
1296 static int handle_aead_req(struct iproc_reqctx_s *rctx)
1297 {
1298         struct spu_hw *spu = &iproc_priv.spu;
1299         struct crypto_async_request *areq = rctx->parent;
1300         struct aead_request *req = container_of(areq,
1301                                                 struct aead_request, base);
1302         struct iproc_ctx_s *ctx = rctx->ctx;
1303         int err;
1304         unsigned int chunksize;
1305         unsigned int resp_len;
1306         u32 spu_hdr_len;
1307         u32 db_size;
1308         u32 stat_pad_len;
1309         u32 pad_len;
1310         struct brcm_message *mssg;      /* mailbox message */
1311         struct spu_request_opts req_opts;
1312         struct spu_cipher_parms cipher_parms;
1313         struct spu_hash_parms hash_parms;
1314         struct spu_aead_parms aead_parms;
1315         int assoc_nents = 0;
1316         bool incl_icv = false;
1317         unsigned int digestsize = ctx->digestsize;
1318
1319         /* number of entries in src and dst sg. Always includes SPU msg header.
1320          */
1321         u8 rx_frag_num = 2;     /* and STATUS */
1322         u8 tx_frag_num = 1;
1323
1324         /* doing the whole thing at once */
1325         chunksize = rctx->total_todo;
1326
1327         flow_log("%s: chunksize %u\n", __func__, chunksize);
1328
1329         memset(&req_opts, 0, sizeof(req_opts));
1330         memset(&hash_parms, 0, sizeof(hash_parms));
1331         memset(&aead_parms, 0, sizeof(aead_parms));
1332
1333         req_opts.is_inbound = !(rctx->is_encrypt);
1334         req_opts.auth_first = ctx->auth_first;
1335         req_opts.is_aead = true;
1336         req_opts.is_esp = ctx->is_esp;
1337
1338         cipher_parms.alg = ctx->cipher.alg;
1339         cipher_parms.mode = ctx->cipher.mode;
1340         cipher_parms.type = ctx->cipher_type;
1341         cipher_parms.key_buf = ctx->enckey;
1342         cipher_parms.key_len = ctx->enckeylen;
1343         cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1344         cipher_parms.iv_len = rctx->iv_ctr_len;
1345
1346         hash_parms.alg = ctx->auth.alg;
1347         hash_parms.mode = ctx->auth.mode;
1348         hash_parms.type = HASH_TYPE_NONE;
1349         hash_parms.key_buf = (u8 *)ctx->authkey;
1350         hash_parms.key_len = ctx->authkeylen;
1351         hash_parms.digestsize = digestsize;
1352
1353         if ((ctx->auth.alg == HASH_ALG_SHA224) &&
1354             (ctx->authkeylen < SHA224_DIGEST_SIZE))
1355                 hash_parms.key_len = SHA224_DIGEST_SIZE;
1356
1357         aead_parms.assoc_size = req->assoclen;
1358         if (ctx->is_esp && !ctx->is_rfc4543) {
1359                 /*
1360                  * 8-byte IV is included assoc data in request. SPU2
1361                  * expects AAD to include just SPI and seqno. So
1362                  * subtract off the IV len.
1363                  */
1364                 aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
1365
1366                 if (rctx->is_encrypt) {
1367                         aead_parms.return_iv = true;
1368                         aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
1369                         aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
1370                 }
1371         } else {
1372                 aead_parms.ret_iv_len = 0;
1373         }
1374
1375         /*
1376          * Count number of sg entries from the crypto API request that are to
1377          * be included in this mailbox message. For dst sg, don't count space
1378          * for digest. Digest gets caught in a separate buffer and copied back
1379          * to dst sg when processing response.
1380          */
1381         rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1382         rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1383         if (aead_parms.assoc_size)
1384                 assoc_nents = spu_sg_count(rctx->assoc, 0,
1385                                            aead_parms.assoc_size);
1386
1387         mssg = &rctx->mb_mssg;
1388
1389         rctx->total_sent = chunksize;
1390         rctx->src_sent = chunksize;
1391         if (spu->spu_assoc_resp_len(ctx->cipher.mode,
1392                                     aead_parms.assoc_size,
1393                                     aead_parms.ret_iv_len,
1394                                     rctx->is_encrypt))
1395                 rx_frag_num++;
1396
1397         aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
1398                                                 rctx->iv_ctr_len);
1399
1400         if (ctx->auth.alg == HASH_ALG_AES)
1401                 hash_parms.type = (enum hash_type)ctx->cipher_type;
1402
1403         /* General case AAD padding (CCM and RFC4543 special cases below) */
1404         aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1405                                                  aead_parms.assoc_size);
1406
1407         /* General case data padding (CCM decrypt special case below) */
1408         aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1409                                                            chunksize);
1410
1411         if (ctx->cipher.mode == CIPHER_MODE_CCM) {
1412                 /*
1413                  * for CCM, AAD len + 2 (rather than AAD len) needs to be
1414                  * 128-bit aligned
1415                  */
1416                 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
1417                                          ctx->cipher.mode,
1418                                          aead_parms.assoc_size + 2);
1419
1420                 /*
1421                  * And when decrypting CCM, need to pad without including
1422                  * size of ICV which is tacked on to end of chunk
1423                  */
1424                 if (!rctx->is_encrypt)
1425                         aead_parms.data_pad_len =
1426                                 spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1427                                                         chunksize - digestsize);
1428
1429                 /* CCM also requires software to rewrite portions of IV: */
1430                 spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
1431                                        chunksize, rctx->is_encrypt,
1432                                        ctx->is_esp);
1433         }
1434
1435         if (ctx->is_rfc4543) {
1436                 /*
1437                  * RFC4543: data is included in AAD, so don't pad after AAD
1438                  * and pad data based on both AAD + data size
1439                  */
1440                 aead_parms.aad_pad_len = 0;
1441                 if (!rctx->is_encrypt)
1442                         aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1443                                         ctx->cipher.mode,
1444                                         aead_parms.assoc_size + chunksize -
1445                                         digestsize);
1446                 else
1447                         aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1448                                         ctx->cipher.mode,
1449                                         aead_parms.assoc_size + chunksize);
1450
1451                 req_opts.is_rfc4543 = true;
1452         }
1453
1454         if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1455                 incl_icv = true;
1456                 tx_frag_num++;
1457                 /* Copy ICV from end of src scatterlist to digest buf */
1458                 sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1459                                     req->assoclen + rctx->total_sent -
1460                                     digestsize);
1461         }
1462
1463         atomic64_add(chunksize, &iproc_priv.bytes_out);
1464
1465         flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
1466
1467         /* Prepend SPU header with type 3 BCM header */
1468         memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1469
1470         spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1471                                               BCM_HDR_LEN, &req_opts,
1472                                               &cipher_parms, &hash_parms,
1473                                               &aead_parms, chunksize);
1474
1475         /* Determine total length of padding. Put all padding in one buffer. */
1476         db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
1477                                    chunksize, aead_parms.aad_pad_len,
1478                                    aead_parms.data_pad_len, 0);
1479
1480         stat_pad_len = spu->spu_wordalign_padlen(db_size);
1481
1482         if (stat_pad_len)
1483                 rx_frag_num++;
1484         pad_len = aead_parms.data_pad_len + stat_pad_len;
1485         if (pad_len) {
1486                 tx_frag_num++;
1487                 spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1488                                      aead_parms.data_pad_len, 0,
1489                                      ctx->auth.alg, ctx->auth.mode,
1490                                      rctx->total_sent, stat_pad_len);
1491         }
1492
1493         spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1494                               spu_hdr_len);
1495         dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1496         packet_dump("    aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1497         packet_log("BD:\n");
1498         dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1499         packet_dump("   pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1500
1501         /*
1502          * Build mailbox message containing SPU request msg and rx buffers
1503          * to catch response message
1504          */
1505         memset(mssg, 0, sizeof(*mssg));
1506         mssg->type = BRCM_MESSAGE_SPU;
1507         mssg->ctx = rctx;       /* Will be returned in response */
1508
1509         /* Create rx scatterlist to catch result */
1510         rx_frag_num += rctx->dst_nents;
1511         resp_len = chunksize;
1512
1513         /*
1514          * Always catch ICV in separate buffer. Have to for GCM/CCM because of
1515          * padding. Have to for SHA-224 and other truncated SHAs because SPU
1516          * sends entire digest back.
1517          */
1518         rx_frag_num++;
1519
1520         if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
1521              (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1522                 /*
1523                  * Input is ciphertxt plus ICV, but ICV not incl
1524                  * in output.
1525                  */
1526                 resp_len -= ctx->digestsize;
1527                 if (resp_len == 0)
1528                         /* no rx frags to catch output data */
1529                         rx_frag_num -= rctx->dst_nents;
1530         }
1531
1532         err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1533                                     aead_parms.assoc_size,
1534                                     aead_parms.ret_iv_len, resp_len, digestsize,
1535                                     stat_pad_len);
1536         if (err)
1537                 return err;
1538
1539         /* Create tx scatterlist containing SPU request message */
1540         tx_frag_num += rctx->src_nents;
1541         tx_frag_num += assoc_nents;
1542         if (aead_parms.aad_pad_len)
1543                 tx_frag_num++;
1544         if (aead_parms.iv_len)
1545                 tx_frag_num++;
1546         if (spu->spu_tx_status_len())
1547                 tx_frag_num++;
1548         err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1549                                     rctx->assoc, aead_parms.assoc_size,
1550                                     assoc_nents, aead_parms.iv_len, chunksize,
1551                                     aead_parms.aad_pad_len, pad_len, incl_icv);
1552         if (err)
1553                 return err;
1554
1555         err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
1556         if (unlikely(err < 0))
1557                 return err;
1558
1559         return -EINPROGRESS;
1560 }
1561
1562 /**
1563  * handle_aead_resp() - Process a SPU response message for an AEAD request.
1564  * @rctx:  Crypto request context
1565  */
1566 static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1567 {
1568         struct spu_hw *spu = &iproc_priv.spu;
1569         struct crypto_async_request *areq = rctx->parent;
1570         struct aead_request *req = container_of(areq,
1571                                                 struct aead_request, base);
1572         struct iproc_ctx_s *ctx = rctx->ctx;
1573         u32 payload_len;
1574         unsigned int icv_offset;
1575         u32 result_len;
1576
1577         /* See how much data was returned */
1578         payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1579         flow_log("payload_len %u\n", payload_len);
1580
1581         /* only count payload */
1582         atomic64_add(payload_len, &iproc_priv.bytes_in);
1583
1584         if (req->assoclen)
1585                 packet_dump("  assoc_data ", rctx->msg_buf.a.resp_aad,
1586                             req->assoclen);
1587
1588         /*
1589          * Copy the ICV back to the destination
1590          * buffer. In decrypt case, SPU gives us back the digest, but crypto
1591          * API doesn't expect ICV in dst buffer.
1592          */
1593         result_len = req->cryptlen;
1594         if (rctx->is_encrypt) {
1595                 icv_offset = req->assoclen + rctx->total_sent;
1596                 packet_dump("  ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1597                 flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
1598                 sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1599                                       ctx->digestsize, icv_offset);
1600                 result_len += ctx->digestsize;
1601         }
1602
1603         packet_log("response data:  ");
1604         dump_sg(req->dst, req->assoclen, result_len);
1605
1606         atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
1607         if (ctx->cipher.alg == CIPHER_ALG_AES) {
1608                 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1609                         atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
1610                 else if (ctx->cipher.mode == CIPHER_MODE_GCM)
1611                         atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
1612                 else
1613                         atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1614         } else {
1615                 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1616         }
1617 }
1618
1619 /**
1620  * spu_chunk_cleanup() - Do cleanup after processing one chunk of a request
1621  * @rctx:  request context
1622  *
1623  * Mailbox scatterlists are allocated for each chunk. So free them after
1624  * processing each chunk.
1625  */
1626 static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1627 {
1628         /* mailbox message used to tx request */
1629         struct brcm_message *mssg = &rctx->mb_mssg;
1630
1631         kfree(mssg->spu.src);
1632         kfree(mssg->spu.dst);
1633         memset(mssg, 0, sizeof(struct brcm_message));
1634 }
1635
1636 /**
1637  * finish_req() - Used to invoke the complete callback from the requester when
1638  * a request has been handled asynchronously.
1639  * @rctx:  Request context
1640  * @err:   Indicates whether the request was successful or not
1641  *
1642  * Ensures that cleanup has been done for request
1643  */
1644 static void finish_req(struct iproc_reqctx_s *rctx, int err)
1645 {
1646         struct crypto_async_request *areq = rctx->parent;
1647
1648         flow_log("%s() err:%d\n\n", __func__, err);
1649
1650         /* No harm done if already called */
1651         spu_chunk_cleanup(rctx);
1652
1653         if (areq)
1654                 areq->complete(areq, err);
1655 }
1656
1657 /**
1658  * spu_rx_callback() - Callback from mailbox framework with a SPU response.
1659  * @cl:         mailbox client structure for SPU driver
1660  * @msg:        mailbox message containing SPU response
1661  */
1662 static void spu_rx_callback(struct mbox_client *cl, void *msg)
1663 {
1664         struct spu_hw *spu = &iproc_priv.spu;
1665         struct brcm_message *mssg = msg;
1666         struct iproc_reqctx_s *rctx;
1667         int err = 0;
1668
1669         rctx = mssg->ctx;
1670         if (unlikely(!rctx)) {
1671                 /* This is fatal */
1672                 pr_err("%s(): no request context", __func__);
1673                 err = -EFAULT;
1674                 goto cb_finish;
1675         }
1676
1677         /* process the SPU status */
1678         err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1679         if (err != 0) {
1680                 if (err == SPU_INVALID_ICV)
1681                         atomic_inc(&iproc_priv.bad_icv);
1682                 err = -EBADMSG;
1683                 goto cb_finish;
1684         }
1685
1686         /* Process the SPU response message */
1687         switch (rctx->ctx->alg->type) {
1688         case CRYPTO_ALG_TYPE_ABLKCIPHER:
1689                 handle_ablkcipher_resp(rctx);
1690                 break;
1691         case CRYPTO_ALG_TYPE_AHASH:
1692                 handle_ahash_resp(rctx);
1693                 break;
1694         case CRYPTO_ALG_TYPE_AEAD:
1695                 handle_aead_resp(rctx);
1696                 break;
1697         default:
1698                 err = -EINVAL;
1699                 goto cb_finish;
1700         }
1701
1702         /*
1703          * If this response does not complete the request, then send the next
1704          * request chunk.
1705          */
1706         if (rctx->total_sent < rctx->total_todo) {
1707                 /* Deallocate anything specific to previous chunk */
1708                 spu_chunk_cleanup(rctx);
1709
1710                 switch (rctx->ctx->alg->type) {
1711                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1712                         err = handle_ablkcipher_req(rctx);
1713                         break;
1714                 case CRYPTO_ALG_TYPE_AHASH:
1715                         err = handle_ahash_req(rctx);
1716                         if (err == -EAGAIN)
1717                                 /*
1718                                  * we saved data in hash carry, but tell crypto
1719                                  * API we successfully completed request.
1720                                  */
1721                                 err = 0;
1722                         break;
1723                 case CRYPTO_ALG_TYPE_AEAD:
1724                         err = handle_aead_req(rctx);
1725                         break;
1726                 default:
1727                         err = -EINVAL;
1728                 }
1729
1730                 if (err == -EINPROGRESS)
1731                         /* Successfully submitted request for next chunk */
1732                         return;
1733         }
1734
1735 cb_finish:
1736         finish_req(rctx, err);
1737 }
1738
1739 /* ==================== Kernel Cryptographic API ==================== */
1740
1741 /**
1742  * ablkcipher_enqueue() - Handle ablkcipher encrypt or decrypt request.
1743  * @req:        Crypto API request
1744  * @encrypt:    true if encrypting; false if decrypting
1745  *
1746  * Return: -EINPROGRESS if request accepted and result will be returned
1747  *                      asynchronously
1748  *         < 0 if an error
1749  */
1750 static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt)
1751 {
1752         struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req);
1753         struct iproc_ctx_s *ctx =
1754             crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
1755         int err;
1756
1757         flow_log("%s() enc:%u\n", __func__, encrypt);
1758
1759         rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1760                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1761         rctx->parent = &req->base;
1762         rctx->is_encrypt = encrypt;
1763         rctx->bd_suppress = false;
1764         rctx->total_todo = req->nbytes;
1765         rctx->src_sent = 0;
1766         rctx->total_sent = 0;
1767         rctx->total_received = 0;
1768         rctx->ctx = ctx;
1769
1770         /* Initialize current position in src and dst scatterlists */
1771         rctx->src_sg = req->src;
1772         rctx->src_nents = 0;
1773         rctx->src_skip = 0;
1774         rctx->dst_sg = req->dst;
1775         rctx->dst_nents = 0;
1776         rctx->dst_skip = 0;
1777
1778         if (ctx->cipher.mode == CIPHER_MODE_CBC ||
1779             ctx->cipher.mode == CIPHER_MODE_CTR ||
1780             ctx->cipher.mode == CIPHER_MODE_OFB ||
1781             ctx->cipher.mode == CIPHER_MODE_XTS ||
1782             ctx->cipher.mode == CIPHER_MODE_GCM ||
1783             ctx->cipher.mode == CIPHER_MODE_CCM) {
1784                 rctx->iv_ctr_len =
1785                     crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req));
1786                 memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len);
1787         } else {
1788                 rctx->iv_ctr_len = 0;
1789         }
1790
1791         /* Choose a SPU to process this request */
1792         rctx->chan_idx = select_channel();
1793         err = handle_ablkcipher_req(rctx);
1794         if (err != -EINPROGRESS)
1795                 /* synchronous result */
1796                 spu_chunk_cleanup(rctx);
1797
1798         return err;
1799 }
1800
1801 static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1802                       unsigned int keylen)
1803 {
1804         struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1805         u32 tmp[DES_EXPKEY_WORDS];
1806
1807         if (keylen == DES_KEY_SIZE) {
1808                 if (des_ekey(tmp, key) == 0) {
1809                         if (crypto_ablkcipher_get_flags(cipher) &
1810                             CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
1811                                 u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
1812
1813                                 crypto_ablkcipher_set_flags(cipher, flags);
1814                                 return -EINVAL;
1815                         }
1816                 }
1817
1818                 ctx->cipher_type = CIPHER_TYPE_DES;
1819         } else {
1820                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1821                 return -EINVAL;
1822         }
1823         return 0;
1824 }
1825
1826 static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1827                            unsigned int keylen)
1828 {
1829         struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1830
1831         if (keylen == (DES_KEY_SIZE * 3)) {
1832                 u32 flags;
1833                 int ret;
1834
1835                 flags = crypto_ablkcipher_get_flags(cipher);
1836                 ret = __des3_verify_key(&flags, key);
1837                 if (unlikely(ret)) {
1838                         crypto_ablkcipher_set_flags(cipher, flags);
1839                         return ret;
1840                 }
1841
1842                 ctx->cipher_type = CIPHER_TYPE_3DES;
1843         } else {
1844                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1845                 return -EINVAL;
1846         }
1847         return 0;
1848 }
1849
1850 static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1851                       unsigned int keylen)
1852 {
1853         struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1854
1855         if (ctx->cipher.mode == CIPHER_MODE_XTS)
1856                 /* XTS includes two keys of equal length */
1857                 keylen = keylen / 2;
1858
1859         switch (keylen) {
1860         case AES_KEYSIZE_128:
1861                 ctx->cipher_type = CIPHER_TYPE_AES128;
1862                 break;
1863         case AES_KEYSIZE_192:
1864                 ctx->cipher_type = CIPHER_TYPE_AES192;
1865                 break;
1866         case AES_KEYSIZE_256:
1867                 ctx->cipher_type = CIPHER_TYPE_AES256;
1868                 break;
1869         default:
1870                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1871                 return -EINVAL;
1872         }
1873         WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
1874                 ((ctx->max_payload % AES_BLOCK_SIZE) != 0));
1875         return 0;
1876 }
1877
1878 static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1879                       unsigned int keylen)
1880 {
1881         struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1882         int i;
1883
1884         ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE;
1885
1886         ctx->enckey[0] = 0x00;  /* 0x00 */
1887         ctx->enckey[1] = 0x00;  /* i    */
1888         ctx->enckey[2] = 0x00;  /* 0x00 */
1889         ctx->enckey[3] = 0x00;  /* j    */
1890         for (i = 0; i < ARC4_MAX_KEY_SIZE; i++)
1891                 ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen];
1892
1893         ctx->cipher_type = CIPHER_TYPE_INIT;
1894
1895         return 0;
1896 }
1897
1898 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1899                              unsigned int keylen)
1900 {
1901         struct spu_hw *spu = &iproc_priv.spu;
1902         struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher);
1903         struct spu_cipher_parms cipher_parms;
1904         u32 alloc_len = 0;
1905         int err;
1906
1907         flow_log("ablkcipher_setkey() keylen: %d\n", keylen);
1908         flow_dump("  key: ", key, keylen);
1909
1910         switch (ctx->cipher.alg) {
1911         case CIPHER_ALG_DES:
1912                 err = des_setkey(cipher, key, keylen);
1913                 break;
1914         case CIPHER_ALG_3DES:
1915                 err = threedes_setkey(cipher, key, keylen);
1916                 break;
1917         case CIPHER_ALG_AES:
1918                 err = aes_setkey(cipher, key, keylen);
1919                 break;
1920         case CIPHER_ALG_RC4:
1921                 err = rc4_setkey(cipher, key, keylen);
1922                 break;
1923         default:
1924                 pr_err("%s() Error: unknown cipher alg\n", __func__);
1925                 err = -EINVAL;
1926         }
1927         if (err)
1928                 return err;
1929
1930         /* RC4 already populated ctx->enkey */
1931         if (ctx->cipher.alg != CIPHER_ALG_RC4) {
1932                 memcpy(ctx->enckey, key, keylen);
1933                 ctx->enckeylen = keylen;
1934         }
1935         /* SPU needs XTS keys in the reverse order the crypto API presents */
1936         if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
1937             (ctx->cipher.mode == CIPHER_MODE_XTS)) {
1938                 unsigned int xts_keylen = keylen / 2;
1939
1940                 memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
1941                 memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
1942         }
1943
1944         if (spu->spu_type == SPU_TYPE_SPUM)
1945                 alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
1946         else if (spu->spu_type == SPU_TYPE_SPU2)
1947                 alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
1948         memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
1949         cipher_parms.iv_buf = NULL;
1950         cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher);
1951         flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
1952
1953         cipher_parms.alg = ctx->cipher.alg;
1954         cipher_parms.mode = ctx->cipher.mode;
1955         cipher_parms.type = ctx->cipher_type;
1956         cipher_parms.key_buf = ctx->enckey;
1957         cipher_parms.key_len = ctx->enckeylen;
1958
1959         /* Prepend SPU request message with BCM header */
1960         memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1961         ctx->spu_req_hdr_len =
1962             spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
1963                                      &cipher_parms);
1964
1965         ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
1966                                                           ctx->enckeylen,
1967                                                           false);
1968
1969         atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
1970
1971         return 0;
1972 }
1973
1974 static int ablkcipher_encrypt(struct ablkcipher_request *req)
1975 {
1976         flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes);
1977
1978         return ablkcipher_enqueue(req, true);
1979 }
1980
1981 static int ablkcipher_decrypt(struct ablkcipher_request *req)
1982 {
1983         flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes);
1984         return ablkcipher_enqueue(req, false);
1985 }
1986
1987 static int ahash_enqueue(struct ahash_request *req)
1988 {
1989         struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1990         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1991         struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
1992         int err = 0;
1993         const char *alg_name;
1994
1995         flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
1996
1997         rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1998                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1999         rctx->parent = &req->base;
2000         rctx->ctx = ctx;
2001         rctx->bd_suppress = true;
2002         memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2003
2004         /* Initialize position in src scatterlist */
2005         rctx->src_sg = req->src;
2006         rctx->src_skip = 0;
2007         rctx->src_nents = 0;
2008         rctx->dst_sg = NULL;
2009         rctx->dst_skip = 0;
2010         rctx->dst_nents = 0;
2011
2012         /* SPU2 hardware does not compute hash of zero length data */
2013         if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
2014             (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
2015                 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2016                 flow_log("Doing %sfinal %s zero-len hash request in software\n",
2017                          rctx->is_final ? "" : "non-", alg_name);
2018                 err = do_shash((unsigned char *)alg_name, req->result,
2019                                NULL, 0, NULL, 0, ctx->authkey,
2020                                ctx->authkeylen);
2021                 if (err < 0)
2022                         flow_log("Hash request failed with error %d\n", err);
2023                 return err;
2024         }
2025         /* Choose a SPU to process this request */
2026         rctx->chan_idx = select_channel();
2027
2028         err = handle_ahash_req(rctx);
2029         if (err != -EINPROGRESS)
2030                 /* synchronous result */
2031                 spu_chunk_cleanup(rctx);
2032
2033         if (err == -EAGAIN)
2034                 /*
2035                  * we saved data in hash carry, but tell crypto API
2036                  * we successfully completed request.
2037                  */
2038                 err = 0;
2039
2040         return err;
2041 }
2042
2043 static int __ahash_init(struct ahash_request *req)
2044 {
2045         struct spu_hw *spu = &iproc_priv.spu;
2046         struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2047         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2048         struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2049
2050         flow_log("%s()\n", __func__);
2051
2052         /* Initialize the context */
2053         rctx->hash_carry_len = 0;
2054         rctx->is_final = 0;
2055
2056         rctx->total_todo = 0;
2057         rctx->src_sent = 0;
2058         rctx->total_sent = 0;
2059         rctx->total_received = 0;
2060
2061         ctx->digestsize = crypto_ahash_digestsize(tfm);
2062         /* If we add a hash whose digest is larger, catch it here. */
2063         WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
2064
2065         rctx->is_sw_hmac = false;
2066
2067         ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
2068                                                           true);
2069
2070         return 0;
2071 }
2072
2073 /**
2074  * spu_no_incr_hash() - Determine whether incremental hashing is supported.
2075  * @ctx:  Crypto session context
2076  *
2077  * SPU-2 does not support incremental hashing (we'll have to revisit and
2078  * condition based on chip revision or device tree entry if future versions do
2079  * support incremental hash)
2080  *
2081  * SPU-M also doesn't support incremental hashing of AES-XCBC
2082  *
2083  * Return: true if incremental hashing is not supported
2084  *         false otherwise
2085  */
2086 static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2087 {
2088         struct spu_hw *spu = &iproc_priv.spu;
2089
2090         if (spu->spu_type == SPU_TYPE_SPU2)
2091                 return true;
2092
2093         if ((ctx->auth.alg == HASH_ALG_AES) &&
2094             (ctx->auth.mode == HASH_MODE_XCBC))
2095                 return true;
2096
2097         /* Otherwise, incremental hashing is supported */
2098         return false;
2099 }
2100
2101 static int ahash_init(struct ahash_request *req)
2102 {
2103         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2104         struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2105         const char *alg_name;
2106         struct crypto_shash *hash;
2107         int ret;
2108         gfp_t gfp;
2109
2110         if (spu_no_incr_hash(ctx)) {
2111                 /*
2112                  * If we get an incremental hashing request and it's not
2113                  * supported by the hardware, we need to handle it in software
2114                  * by calling synchronous hash functions.
2115                  */
2116                 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
2117                 hash = crypto_alloc_shash(alg_name, 0, 0);
2118                 if (IS_ERR(hash)) {
2119                         ret = PTR_ERR(hash);
2120                         goto err;
2121                 }
2122
2123                 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2124                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2125                 ctx->shash = kmalloc(sizeof(*ctx->shash) +
2126                                      crypto_shash_descsize(hash), gfp);
2127                 if (!ctx->shash) {
2128                         ret = -ENOMEM;
2129                         goto err_hash;
2130                 }
2131                 ctx->shash->tfm = hash;
2132
2133                 /* Set the key using data we already have from setkey */
2134                 if (ctx->authkeylen > 0) {
2135                         ret = crypto_shash_setkey(hash, ctx->authkey,
2136                                                   ctx->authkeylen);
2137                         if (ret)
2138                                 goto err_shash;
2139                 }
2140
2141                 /* Initialize hash w/ this key and other params */
2142                 ret = crypto_shash_init(ctx->shash);
2143                 if (ret)
2144                         goto err_shash;
2145         } else {
2146                 /* Otherwise call the internal function which uses SPU hw */
2147                 ret = __ahash_init(req);
2148         }
2149
2150         return ret;
2151
2152 err_shash:
2153         kfree(ctx->shash);
2154 err_hash:
2155         crypto_free_shash(hash);
2156 err:
2157         return ret;
2158 }
2159
2160 static int __ahash_update(struct ahash_request *req)
2161 {
2162         struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2163
2164         flow_log("ahash_update() nbytes:%u\n", req->nbytes);
2165
2166         if (!req->nbytes)
2167                 return 0;
2168         rctx->total_todo += req->nbytes;
2169         rctx->src_sent = 0;
2170
2171         return ahash_enqueue(req);
2172 }
2173
2174 static int ahash_update(struct ahash_request *req)
2175 {
2176         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2177         struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2178         u8 *tmpbuf;
2179         int ret;
2180         int nents;
2181         gfp_t gfp;
2182
2183         if (spu_no_incr_hash(ctx)) {
2184                 /*
2185                  * If we get an incremental hashing request and it's not
2186                  * supported by the hardware, we need to handle it in software
2187                  * by calling synchronous hash functions.
2188                  */
2189                 if (req->src)
2190                         nents = sg_nents(req->src);
2191                 else
2192                         return -EINVAL;
2193
2194                 /* Copy data from req scatterlist to tmp buffer */
2195                 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2196                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2197                 tmpbuf = kmalloc(req->nbytes, gfp);
2198                 if (!tmpbuf)
2199                         return -ENOMEM;
2200
2201                 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2202                                 req->nbytes) {
2203                         kfree(tmpbuf);
2204                         return -EINVAL;
2205                 }
2206
2207                 /* Call synchronous update */
2208                 ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
2209                 kfree(tmpbuf);
2210         } else {
2211                 /* Otherwise call the internal function which uses SPU hw */
2212                 ret = __ahash_update(req);
2213         }
2214
2215         return ret;
2216 }
2217
2218 static int __ahash_final(struct ahash_request *req)
2219 {
2220         struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2221
2222         flow_log("ahash_final() nbytes:%u\n", req->nbytes);
2223
2224         rctx->is_final = 1;
2225
2226         return ahash_enqueue(req);
2227 }
2228
2229 static int ahash_final(struct ahash_request *req)
2230 {
2231         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2232         struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2233         int ret;
2234
2235         if (spu_no_incr_hash(ctx)) {
2236                 /*
2237                  * If we get an incremental hashing request and it's not
2238                  * supported by the hardware, we need to handle it in software
2239                  * by calling synchronous hash functions.
2240                  */
2241                 ret = crypto_shash_final(ctx->shash, req->result);
2242
2243                 /* Done with hash, can deallocate it now */
2244                 crypto_free_shash(ctx->shash->tfm);
2245                 kfree(ctx->shash);
2246
2247         } else {
2248                 /* Otherwise call the internal function which uses SPU hw */
2249                 ret = __ahash_final(req);
2250         }
2251
2252         return ret;
2253 }
2254
2255 static int __ahash_finup(struct ahash_request *req)
2256 {
2257         struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2258
2259         flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
2260
2261         rctx->total_todo += req->nbytes;
2262         rctx->src_sent = 0;
2263         rctx->is_final = 1;
2264
2265         return ahash_enqueue(req);
2266 }
2267
2268 static int ahash_finup(struct ahash_request *req)
2269 {
2270         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2271         struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2272         u8 *tmpbuf;
2273         int ret;
2274         int nents;
2275         gfp_t gfp;
2276
2277         if (spu_no_incr_hash(ctx)) {
2278                 /*
2279                  * If we get an incremental hashing request and it's not
2280                  * supported by the hardware, we need to handle it in software
2281                  * by calling synchronous hash functions.
2282                  */
2283                 if (req->src) {
2284                         nents = sg_nents(req->src);
2285                 } else {
2286                         ret = -EINVAL;
2287                         goto ahash_finup_exit;
2288                 }
2289
2290                 /* Copy data from req scatterlist to tmp buffer */
2291                 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2292                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2293                 tmpbuf = kmalloc(req->nbytes, gfp);
2294                 if (!tmpbuf) {
2295                         ret = -ENOMEM;
2296                         goto ahash_finup_exit;
2297                 }
2298
2299                 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2300                                 req->nbytes) {
2301                         ret = -EINVAL;
2302                         goto ahash_finup_free;
2303                 }
2304
2305                 /* Call synchronous update */
2306                 ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
2307                                          req->result);
2308         } else {
2309                 /* Otherwise call the internal function which uses SPU hw */
2310                 return __ahash_finup(req);
2311         }
2312 ahash_finup_free:
2313         kfree(tmpbuf);
2314
2315 ahash_finup_exit:
2316         /* Done with hash, can deallocate it now */
2317         crypto_free_shash(ctx->shash->tfm);
2318         kfree(ctx->shash);
2319         return ret;
2320 }
2321
2322 static int ahash_digest(struct ahash_request *req)
2323 {
2324         int err = 0;
2325
2326         flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
2327
2328         /* whole thing at once */
2329         err = __ahash_init(req);
2330         if (!err)
2331                 err = __ahash_finup(req);
2332
2333         return err;
2334 }
2335
2336 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2337                         unsigned int keylen)
2338 {
2339         struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2340
2341         flow_log("%s() ahash:%p key:%p keylen:%u\n",
2342                  __func__, ahash, key, keylen);
2343         flow_dump("  key: ", key, keylen);
2344
2345         if (ctx->auth.alg == HASH_ALG_AES) {
2346                 switch (keylen) {
2347                 case AES_KEYSIZE_128:
2348                         ctx->cipher_type = CIPHER_TYPE_AES128;
2349                         break;
2350                 case AES_KEYSIZE_192:
2351                         ctx->cipher_type = CIPHER_TYPE_AES192;
2352                         break;
2353                 case AES_KEYSIZE_256:
2354                         ctx->cipher_type = CIPHER_TYPE_AES256;
2355                         break;
2356                 default:
2357                         pr_err("%s() Error: Invalid key length\n", __func__);
2358                         return -EINVAL;
2359                 }
2360         } else {
2361                 pr_err("%s() Error: unknown hash alg\n", __func__);
2362                 return -EINVAL;
2363         }
2364         memcpy(ctx->authkey, key, keylen);
2365         ctx->authkeylen = keylen;
2366
2367         return 0;
2368 }
2369
2370 static int ahash_export(struct ahash_request *req, void *out)
2371 {
2372         const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2373         struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
2374
2375         spu_exp->total_todo = rctx->total_todo;
2376         spu_exp->total_sent = rctx->total_sent;
2377         spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2378         memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2379         spu_exp->hash_carry_len = rctx->hash_carry_len;
2380         memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2381
2382         return 0;
2383 }
2384
2385 static int ahash_import(struct ahash_request *req, const void *in)
2386 {
2387         struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2388         struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
2389
2390         rctx->total_todo = spu_exp->total_todo;
2391         rctx->total_sent = spu_exp->total_sent;
2392         rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2393         memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2394         rctx->hash_carry_len = spu_exp->hash_carry_len;
2395         memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2396
2397         return 0;
2398 }
2399
2400 static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
2401                              unsigned int keylen)
2402 {
2403         struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2404         unsigned int blocksize =
2405                 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
2406         unsigned int digestsize = crypto_ahash_digestsize(ahash);
2407         unsigned int index;
2408         int rc;
2409
2410         flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2411                  __func__, ahash, key, keylen, blocksize, digestsize);
2412         flow_dump("  key: ", key, keylen);
2413
2414         if (keylen > blocksize) {
2415                 switch (ctx->auth.alg) {
2416                 case HASH_ALG_MD5:
2417                         rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
2418                                       0, NULL, 0);
2419                         break;
2420                 case HASH_ALG_SHA1:
2421                         rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
2422                                       0, NULL, 0);
2423                         break;
2424                 case HASH_ALG_SHA224:
2425                         rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
2426                                       0, NULL, 0);
2427                         break;
2428                 case HASH_ALG_SHA256:
2429                         rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
2430                                       0, NULL, 0);
2431                         break;
2432                 case HASH_ALG_SHA384:
2433                         rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
2434                                       0, NULL, 0);
2435                         break;
2436                 case HASH_ALG_SHA512:
2437                         rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
2438                                       0, NULL, 0);
2439                         break;
2440                 case HASH_ALG_SHA3_224:
2441                         rc = do_shash("sha3-224", ctx->authkey, key, keylen,
2442                                       NULL, 0, NULL, 0);
2443                         break;
2444                 case HASH_ALG_SHA3_256:
2445                         rc = do_shash("sha3-256", ctx->authkey, key, keylen,
2446                                       NULL, 0, NULL, 0);
2447                         break;
2448                 case HASH_ALG_SHA3_384:
2449                         rc = do_shash("sha3-384", ctx->authkey, key, keylen,
2450                                       NULL, 0, NULL, 0);
2451                         break;
2452                 case HASH_ALG_SHA3_512:
2453                         rc = do_shash("sha3-512", ctx->authkey, key, keylen,
2454                                       NULL, 0, NULL, 0);
2455                         break;
2456                 default:
2457                         pr_err("%s() Error: unknown hash alg\n", __func__);
2458                         return -EINVAL;
2459                 }
2460                 if (rc < 0) {
2461                         pr_err("%s() Error %d computing shash for %s\n",
2462                                __func__, rc, hash_alg_name[ctx->auth.alg]);
2463                         return rc;
2464                 }
2465                 ctx->authkeylen = digestsize;
2466
2467                 flow_log("  keylen > digestsize... hashed\n");
2468                 flow_dump("  newkey: ", ctx->authkey, ctx->authkeylen);
2469         } else {
2470                 memcpy(ctx->authkey, key, keylen);
2471                 ctx->authkeylen = keylen;
2472         }
2473
2474         /*
2475          * Full HMAC operation in SPUM is not verified,
2476          * So keeping the generation of IPAD, OPAD and
2477          * outer hashing in software.
2478          */
2479         if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
2480                 memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
2481                 memset(ctx->ipad + ctx->authkeylen, 0,
2482                        blocksize - ctx->authkeylen);
2483                 ctx->authkeylen = 0;
2484                 memcpy(ctx->opad, ctx->ipad, blocksize);
2485
2486                 for (index = 0; index < blocksize; index++) {
2487                         ctx->ipad[index] ^= HMAC_IPAD_VALUE;
2488                         ctx->opad[index] ^= HMAC_OPAD_VALUE;
2489                 }
2490
2491                 flow_dump("  ipad: ", ctx->ipad, blocksize);
2492                 flow_dump("  opad: ", ctx->opad, blocksize);
2493         }
2494         ctx->digestsize = digestsize;
2495         atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
2496
2497         return 0;
2498 }
2499
2500 static int ahash_hmac_init(struct ahash_request *req)
2501 {
2502         struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2503         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2504         struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2505         unsigned int blocksize =
2506                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2507
2508         flow_log("ahash_hmac_init()\n");
2509
2510         /* init the context as a hash */
2511         ahash_init(req);
2512
2513         if (!spu_no_incr_hash(ctx)) {
2514                 /* SPU-M can do incr hashing but needs sw for outer HMAC */
2515                 rctx->is_sw_hmac = true;
2516                 ctx->auth.mode = HASH_MODE_HASH;
2517                 /* start with a prepended ipad */
2518                 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2519                 rctx->hash_carry_len = blocksize;
2520                 rctx->total_todo += blocksize;
2521         }
2522
2523         return 0;
2524 }
2525
2526 static int ahash_hmac_update(struct ahash_request *req)
2527 {
2528         flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
2529
2530         if (!req->nbytes)
2531                 return 0;
2532
2533         return ahash_update(req);
2534 }
2535
2536 static int ahash_hmac_final(struct ahash_request *req)
2537 {
2538         flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
2539
2540         return ahash_final(req);
2541 }
2542
2543 static int ahash_hmac_finup(struct ahash_request *req)
2544 {
2545         flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
2546
2547         return ahash_finup(req);
2548 }
2549
2550 static int ahash_hmac_digest(struct ahash_request *req)
2551 {
2552         struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2553         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2554         struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2555         unsigned int blocksize =
2556                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2557
2558         flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
2559
2560         /* Perform initialization and then call finup */
2561         __ahash_init(req);
2562
2563         if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
2564                 /*
2565                  * SPU2 supports full HMAC implementation in the
2566                  * hardware, need not to generate IPAD, OPAD and
2567                  * outer hash in software.
2568                  * Only for hash key len > hash block size, SPU2
2569                  * expects to perform hashing on the key, shorten
2570                  * it to digest size and feed it as hash key.
2571                  */
2572                 rctx->is_sw_hmac = false;
2573                 ctx->auth.mode = HASH_MODE_HMAC;
2574         } else {
2575                 rctx->is_sw_hmac = true;
2576                 ctx->auth.mode = HASH_MODE_HASH;
2577                 /* start with a prepended ipad */
2578                 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2579                 rctx->hash_carry_len = blocksize;
2580                 rctx->total_todo += blocksize;
2581         }
2582
2583         return __ahash_finup(req);
2584 }
2585
2586 /* aead helpers */
2587
2588 static int aead_need_fallback(struct aead_request *req)
2589 {
2590         struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2591         struct spu_hw *spu = &iproc_priv.spu;
2592         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2593         struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2594         u32 payload_len;
2595
2596         /*
2597          * SPU hardware cannot handle the AES-GCM/CCM case where plaintext
2598          * and AAD are both 0 bytes long. So use fallback in this case.
2599          */
2600         if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
2601              (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
2602             (req->assoclen == 0)) {
2603                 if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2604                     (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2605                         flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2606                         return 1;
2607                 }
2608         }
2609
2610         /* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */
2611         if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2612             (spu->spu_type == SPU_TYPE_SPUM) &&
2613             (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
2614             (ctx->digestsize != 16)) {
2615                 flow_log("%s() AES CCM needs fallback for digest size %d\n",
2616                          __func__, ctx->digestsize);
2617                 return 1;
2618         }
2619
2620         /*
2621          * SPU-M on NSP has an issue where AES-CCM hash is not correct
2622          * when AAD size is 0
2623          */
2624         if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2625             (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
2626             (req->assoclen == 0)) {
2627                 flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2628                          __func__);
2629                 return 1;
2630         }
2631
2632         payload_len = req->cryptlen;
2633         if (spu->spu_type == SPU_TYPE_SPUM)
2634                 payload_len += req->assoclen;
2635
2636         flow_log("%s() payload len: %u\n", __func__, payload_len);
2637
2638         if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2639                 return 0;
2640         else
2641                 return payload_len > ctx->max_payload;
2642 }
2643
2644 static void aead_complete(struct crypto_async_request *areq, int err)
2645 {
2646         struct aead_request *req =
2647             container_of(areq, struct aead_request, base);
2648         struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2649         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2650
2651         flow_log("%s() err:%d\n", __func__, err);
2652
2653         areq->tfm = crypto_aead_tfm(aead);
2654
2655         areq->complete = rctx->old_complete;
2656         areq->data = rctx->old_data;
2657
2658         areq->complete(areq, err);
2659 }
2660
2661 static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
2662 {
2663         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2664         struct crypto_tfm *tfm = crypto_aead_tfm(aead);
2665         struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2666         struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
2667         int err;
2668         u32 req_flags;
2669
2670         flow_log("%s() enc:%u\n", __func__, is_encrypt);
2671
2672         if (ctx->fallback_cipher) {
2673                 /* Store the cipher tfm and then use the fallback tfm */
2674                 rctx->old_tfm = tfm;
2675                 aead_request_set_tfm(req, ctx->fallback_cipher);
2676                 /*
2677                  * Save the callback and chain ourselves in, so we can restore
2678                  * the tfm
2679                  */
2680                 rctx->old_complete = req->base.complete;
2681                 rctx->old_data = req->base.data;
2682                 req_flags = aead_request_flags(req);
2683                 aead_request_set_callback(req, req_flags, aead_complete, req);
2684                 err = is_encrypt ? crypto_aead_encrypt(req) :
2685                     crypto_aead_decrypt(req);
2686
2687                 if (err == 0) {
2688                         /*
2689                          * fallback was synchronous (did not return
2690                          * -EINPROGRESS). So restore request state here.
2691                          */
2692                         aead_request_set_callback(req, req_flags,
2693                                                   rctx->old_complete, req);
2694                         req->base.data = rctx->old_data;
2695                         aead_request_set_tfm(req, aead);
2696                         flow_log("%s() fallback completed successfully\n\n",
2697                                  __func__);
2698                 }
2699         } else {
2700                 err = -EINVAL;
2701         }
2702
2703         return err;
2704 }
2705
2706 static int aead_enqueue(struct aead_request *req, bool is_encrypt)
2707 {
2708         struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2709         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2710         struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2711         int err;
2712
2713         flow_log("%s() enc:%u\n", __func__, is_encrypt);
2714
2715         if (req->assoclen > MAX_ASSOC_SIZE) {
2716                 pr_err
2717                     ("%s() Error: associated data too long. (%u > %u bytes)\n",
2718                      __func__, req->assoclen, MAX_ASSOC_SIZE);
2719                 return -EINVAL;
2720         }
2721
2722         rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2723                        CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2724         rctx->parent = &req->base;
2725         rctx->is_encrypt = is_encrypt;
2726         rctx->bd_suppress = false;
2727         rctx->total_todo = req->cryptlen;
2728         rctx->src_sent = 0;
2729         rctx->total_sent = 0;
2730         rctx->total_received = 0;
2731         rctx->is_sw_hmac = false;
2732         rctx->ctx = ctx;
2733         memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2734
2735         /* assoc data is at start of src sg */
2736         rctx->assoc = req->src;
2737
2738         /*
2739          * Init current position in src scatterlist to be after assoc data.
2740          * src_skip set to buffer offset where data begins. (Assoc data could
2741          * end in the middle of a buffer.)
2742          */
2743         if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2744                              &rctx->src_skip) < 0) {
2745                 pr_err("%s() Error: Unable to find start of src data\n",
2746                        __func__);
2747                 return -EINVAL;
2748         }
2749
2750         rctx->src_nents = 0;
2751         rctx->dst_nents = 0;
2752         if (req->dst == req->src) {
2753                 rctx->dst_sg = rctx->src_sg;
2754                 rctx->dst_skip = rctx->src_skip;
2755         } else {
2756                 /*
2757                  * Expect req->dst to have room for assoc data followed by
2758                  * output data and ICV, if encrypt. So initialize dst_sg
2759                  * to point beyond assoc len offset.
2760                  */
2761                 if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2762                                      &rctx->dst_skip) < 0) {
2763                         pr_err("%s() Error: Unable to find start of dst data\n",
2764                                __func__);
2765                         return -EINVAL;
2766                 }
2767         }
2768
2769         if (ctx->cipher.mode == CIPHER_MODE_CBC ||
2770             ctx->cipher.mode == CIPHER_MODE_CTR ||
2771             ctx->cipher.mode == CIPHER_MODE_OFB ||
2772             ctx->cipher.mode == CIPHER_MODE_XTS ||
2773             ctx->cipher.mode == CIPHER_MODE_GCM) {
2774                 rctx->iv_ctr_len =
2775                         ctx->salt_len +
2776                         crypto_aead_ivsize(crypto_aead_reqtfm(req));
2777         } else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
2778                 rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2779         } else {
2780                 rctx->iv_ctr_len = 0;
2781         }
2782
2783         rctx->hash_carry_len = 0;
2784
2785         flow_log("  src sg: %p\n", req->src);
2786         flow_log("  rctx->src_sg: %p, src_skip %u\n",
2787                  rctx->src_sg, rctx->src_skip);
2788         flow_log("  assoc:  %p, assoclen %u\n", rctx->assoc, req->assoclen);
2789         flow_log("  dst sg: %p\n", req->dst);
2790         flow_log("  rctx->dst_sg: %p, dst_skip %u\n",
2791                  rctx->dst_sg, rctx->dst_skip);
2792         flow_log("  iv_ctr_len:%u\n", rctx->iv_ctr_len);
2793         flow_dump("  iv: ", req->iv, rctx->iv_ctr_len);
2794         flow_log("  authkeylen:%u\n", ctx->authkeylen);
2795         flow_log("  is_esp: %s\n", ctx->is_esp ? "yes" : "no");
2796
2797         if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2798                 flow_log("  max_payload infinite");
2799         else
2800                 flow_log("  max_payload: %u\n", ctx->max_payload);
2801
2802         if (unlikely(aead_need_fallback(req)))
2803                 return aead_do_fallback(req, is_encrypt);
2804
2805         /*
2806          * Do memory allocations for request after fallback check, because if we
2807          * do fallback, we won't call finish_req() to dealloc.
2808          */
2809         if (rctx->iv_ctr_len) {
2810                 if (ctx->salt_len)
2811                         memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2812                                ctx->salt, ctx->salt_len);
2813                 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2814                        req->iv,
2815                        rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2816         }
2817
2818         rctx->chan_idx = select_channel();
2819         err = handle_aead_req(rctx);
2820         if (err != -EINPROGRESS)
2821                 /* synchronous result */
2822                 spu_chunk_cleanup(rctx);
2823
2824         return err;
2825 }
2826
2827 static int aead_authenc_setkey(struct crypto_aead *cipher,
2828                                const u8 *key, unsigned int keylen)
2829 {
2830         struct spu_hw *spu = &iproc_priv.spu;
2831         struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2832         struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2833         struct crypto_authenc_keys keys;
2834         int ret;
2835
2836         flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2837                  keylen);
2838         flow_dump("  key: ", key, keylen);
2839
2840         ret = crypto_authenc_extractkeys(&keys, key, keylen);
2841         if (ret)
2842                 goto badkey;
2843
2844         if (keys.enckeylen > MAX_KEY_SIZE ||
2845             keys.authkeylen > MAX_KEY_SIZE)
2846                 goto badkey;
2847
2848         ctx->enckeylen = keys.enckeylen;
2849         ctx->authkeylen = keys.authkeylen;
2850
2851         memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
2852         /* May end up padding auth key. So make sure it's zeroed. */
2853         memset(ctx->authkey, 0, sizeof(ctx->authkey));
2854         memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
2855
2856         switch (ctx->alg->cipher_info.alg) {
2857         case CIPHER_ALG_DES:
2858                 if (ctx->enckeylen == DES_KEY_SIZE) {
2859                         u32 tmp[DES_EXPKEY_WORDS];
2860                         u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
2861
2862                         if (des_ekey(tmp, keys.enckey) == 0) {
2863                                 if (crypto_aead_get_flags(cipher) &
2864                                     CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
2865                                         crypto_aead_set_flags(cipher, flags);
2866                                         return -EINVAL;
2867                                 }
2868                         }
2869
2870                         ctx->cipher_type = CIPHER_TYPE_DES;
2871                 } else {
2872                         goto badkey;
2873                 }
2874                 break;
2875         case CIPHER_ALG_3DES:
2876                 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
2877                         u32 flags;
2878
2879                         flags = crypto_aead_get_flags(cipher);
2880                         ret = __des3_verify_key(&flags, keys.enckey);
2881                         if (unlikely(ret)) {
2882                                 crypto_aead_set_flags(cipher, flags);
2883                                 return ret;
2884                         }
2885
2886                         ctx->cipher_type = CIPHER_TYPE_3DES;
2887                 } else {
2888                         crypto_aead_set_flags(cipher,
2889                                               CRYPTO_TFM_RES_BAD_KEY_LEN);
2890                         return -EINVAL;
2891                 }
2892                 break;
2893         case CIPHER_ALG_AES:
2894                 switch (ctx->enckeylen) {
2895                 case AES_KEYSIZE_128:
2896                         ctx->cipher_type = CIPHER_TYPE_AES128;
2897                         break;
2898                 case AES_KEYSIZE_192:
2899                         ctx->cipher_type = CIPHER_TYPE_AES192;
2900                         break;
2901                 case AES_KEYSIZE_256:
2902                         ctx->cipher_type = CIPHER_TYPE_AES256;
2903                         break;
2904                 default:
2905                         goto badkey;
2906                 }
2907                 break;
2908         case CIPHER_ALG_RC4:
2909                 ctx->cipher_type = CIPHER_TYPE_INIT;
2910                 break;
2911         default:
2912                 pr_err("%s() Error: Unknown cipher alg\n", __func__);
2913                 return -EINVAL;
2914         }
2915
2916         flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2917                  ctx->authkeylen);
2918         flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
2919         flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
2920
2921         /* setkey the fallback just in case we needto use it */
2922         if (ctx->fallback_cipher) {
2923                 flow_log("  running fallback setkey()\n");
2924
2925                 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2926                 ctx->fallback_cipher->base.crt_flags |=
2927                     tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2928                 ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
2929                 if (ret) {
2930                         flow_log("  fallback setkey() returned:%d\n", ret);
2931                         tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
2932                         tfm->crt_flags |=
2933                             (ctx->fallback_cipher->base.crt_flags &
2934                              CRYPTO_TFM_RES_MASK);
2935                 }
2936         }
2937
2938         ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2939                                                           ctx->enckeylen,
2940                                                           false);
2941
2942         atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2943
2944         return ret;
2945
2946 badkey:
2947         ctx->enckeylen = 0;
2948         ctx->authkeylen = 0;
2949         ctx->digestsize = 0;
2950
2951         crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2952         return -EINVAL;
2953 }
2954
2955 static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
2956                                const u8 *key, unsigned int keylen)
2957 {
2958         struct spu_hw *spu = &iproc_priv.spu;
2959         struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2960         struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2961
2962         int ret = 0;
2963
2964         flow_log("%s() keylen:%u\n", __func__, keylen);
2965         flow_dump("  key: ", key, keylen);
2966
2967         if (!ctx->is_esp)
2968                 ctx->digestsize = keylen;
2969
2970         ctx->enckeylen = keylen;
2971         ctx->authkeylen = 0;
2972         memcpy(ctx->enckey, key, ctx->enckeylen);
2973
2974         switch (ctx->enckeylen) {
2975         case AES_KEYSIZE_128:
2976                 ctx->cipher_type = CIPHER_TYPE_AES128;
2977                 break;
2978         case AES_KEYSIZE_192:
2979                 ctx->cipher_type = CIPHER_TYPE_AES192;
2980                 break;
2981         case AES_KEYSIZE_256:
2982                 ctx->cipher_type = CIPHER_TYPE_AES256;
2983                 break;
2984         default:
2985                 goto badkey;
2986         }
2987
2988         flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2989                  ctx->authkeylen);
2990         flow_dump("  enc: ", ctx->enckey, ctx->enckeylen);
2991         flow_dump("  auth: ", ctx->authkey, ctx->authkeylen);
2992
2993         /* setkey the fallback just in case we need to use it */
2994         if (ctx->fallback_cipher) {
2995                 flow_log("  running fallback setkey()\n");
2996
2997                 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2998                 ctx->fallback_cipher->base.crt_flags |=
2999                     tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
3000                 ret = crypto_aead_setkey(ctx->fallback_cipher, key,
3001                                          keylen + ctx->salt_len);
3002                 if (ret) {
3003                         flow_log("  fallback setkey() returned:%d\n", ret);
3004                         tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
3005                         tfm->crt_flags |=
3006                             (ctx->fallback_cipher->base.crt_flags &
3007                              CRYPTO_TFM_RES_MASK);
3008                 }
3009         }
3010
3011         ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
3012                                                           ctx->enckeylen,
3013                                                           false);
3014
3015         atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
3016
3017         flow_log("  enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
3018                  ctx->authkeylen);
3019
3020         return ret;
3021
3022 badkey:
3023         ctx->enckeylen = 0;
3024         ctx->authkeylen = 0;
3025         ctx->digestsize = 0;
3026
3027         crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
3028         return -EINVAL;
3029 }
3030
3031 /**
3032  * aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES.
3033  * @cipher: AEAD structure
3034  * @key:    Key followed by 4 bytes of salt
3035  * @keylen: Length of key plus salt, in bytes
3036  *
3037  * Extracts salt from key and stores it to be prepended to IV on each request.
3038  * Digest is always 16 bytes
3039  *
3040  * Return: Value from generic gcm setkey.
3041  */
3042 static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
3043                                const u8 *key, unsigned int keylen)
3044 {
3045         struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3046
3047         flow_log("%s\n", __func__);
3048         ctx->salt_len = GCM_ESP_SALT_SIZE;
3049         ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3050         memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3051         keylen -= GCM_ESP_SALT_SIZE;
3052         ctx->digestsize = GCM_ESP_DIGESTSIZE;
3053         ctx->is_esp = true;
3054         flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3055
3056         return aead_gcm_ccm_setkey(cipher, key, keylen);
3057 }
3058
3059 /**
3060  * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
3061  * cipher: AEAD structure
3062  * key:    Key followed by 4 bytes of salt
3063  * keylen: Length of key plus salt, in bytes
3064  *
3065  * Extracts salt from key and stores it to be prepended to IV on each request.
3066  * Digest is always 16 bytes
3067  *
3068  * Return: Value from generic gcm setkey.
3069  */
3070 static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
3071                                   const u8 *key, unsigned int keylen)
3072 {
3073         struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3074
3075         flow_log("%s\n", __func__);
3076         ctx->salt_len = GCM_ESP_SALT_SIZE;
3077         ctx->salt_offset = GCM_ESP_SALT_OFFSET;
3078         memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
3079         keylen -= GCM_ESP_SALT_SIZE;
3080         ctx->digestsize = GCM_ESP_DIGESTSIZE;
3081         ctx->is_esp = true;
3082         ctx->is_rfc4543 = true;
3083         flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
3084
3085         return aead_gcm_ccm_setkey(cipher, key, keylen);
3086 }
3087
3088 /**
3089  * aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES.
3090  * @cipher: AEAD structure
3091  * @key:    Key followed by 4 bytes of salt
3092  * @keylen: Length of key plus salt, in bytes
3093  *
3094  * Extracts salt from key and stores it to be prepended to IV on each request.
3095  * Digest is always 16 bytes
3096  *
3097  * Return: Value from generic ccm setkey.
3098  */
3099 static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
3100                                const u8 *key, unsigned int keylen)
3101 {
3102         struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3103
3104         flow_log("%s\n", __func__);
3105         ctx->salt_len = CCM_ESP_SALT_SIZE;
3106         ctx->salt_offset = CCM_ESP_SALT_OFFSET;
3107         memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
3108         keylen -= CCM_ESP_SALT_SIZE;
3109         ctx->is_esp = true;
3110         flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
3111
3112         return aead_gcm_ccm_setkey(cipher, key, keylen);
3113 }
3114
3115 static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
3116 {
3117         struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
3118         int ret = 0;
3119
3120         flow_log("%s() authkeylen:%u authsize:%u\n",
3121                  __func__, ctx->authkeylen, authsize);
3122
3123         ctx->digestsize = authsize;
3124
3125         /* setkey the fallback just in case we needto use it */
3126         if (ctx->fallback_cipher) {
3127                 flow_log("  running fallback setauth()\n");
3128
3129                 ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
3130                 if (ret)
3131                         flow_log("  fallback setauth() returned:%d\n", ret);
3132         }
3133
3134         return ret;
3135 }
3136
3137 static int aead_encrypt(struct aead_request *req)
3138 {
3139         flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
3140                  req->cryptlen);
3141         dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3142         flow_log("  assoc_len:%u\n", req->assoclen);
3143
3144         return aead_enqueue(req, true);
3145 }
3146
3147 static int aead_decrypt(struct aead_request *req)
3148 {
3149         flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
3150         dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3151         flow_log("  assoc_len:%u\n", req->assoclen);
3152
3153         return aead_enqueue(req, false);
3154 }
3155
3156 /* ==================== Supported Cipher Algorithms ==================== */
3157
3158 static struct iproc_alg_s driver_algs[] = {
3159         {
3160          .type = CRYPTO_ALG_TYPE_AEAD,
3161          .alg.aead = {
3162                  .base = {
3163                         .cra_name = "gcm(aes)",
3164                         .cra_driver_name = "gcm-aes-iproc",
3165                         .cra_blocksize = AES_BLOCK_SIZE,
3166                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3167                  },
3168                  .setkey = aead_gcm_ccm_setkey,
3169                  .ivsize = GCM_AES_IV_SIZE,
3170                 .maxauthsize = AES_BLOCK_SIZE,
3171          },
3172          .cipher_info = {
3173                          .alg = CIPHER_ALG_AES,
3174                          .mode = CIPHER_MODE_GCM,
3175                          },
3176          .auth_info = {
3177                        .alg = HASH_ALG_AES,
3178                        .mode = HASH_MODE_GCM,
3179                        },
3180          .auth_first = 0,
3181          },
3182         {
3183          .type = CRYPTO_ALG_TYPE_AEAD,
3184          .alg.aead = {
3185                  .base = {
3186                         .cra_name = "ccm(aes)",
3187                         .cra_driver_name = "ccm-aes-iproc",
3188                         .cra_blocksize = AES_BLOCK_SIZE,
3189                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3190                  },
3191                  .setkey = aead_gcm_ccm_setkey,
3192                  .ivsize = CCM_AES_IV_SIZE,
3193                 .maxauthsize = AES_BLOCK_SIZE,
3194          },
3195          .cipher_info = {
3196                          .alg = CIPHER_ALG_AES,
3197                          .mode = CIPHER_MODE_CCM,
3198                          },
3199          .auth_info = {
3200                        .alg = HASH_ALG_AES,
3201                        .mode = HASH_MODE_CCM,
3202                        },
3203          .auth_first = 0,
3204          },
3205         {
3206          .type = CRYPTO_ALG_TYPE_AEAD,
3207          .alg.aead = {
3208                  .base = {
3209                         .cra_name = "rfc4106(gcm(aes))",
3210                         .cra_driver_name = "gcm-aes-esp-iproc",
3211                         .cra_blocksize = AES_BLOCK_SIZE,
3212                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3213                  },
3214                  .setkey = aead_gcm_esp_setkey,
3215                  .ivsize = GCM_RFC4106_IV_SIZE,
3216                  .maxauthsize = AES_BLOCK_SIZE,
3217          },
3218          .cipher_info = {
3219                          .alg = CIPHER_ALG_AES,
3220                          .mode = CIPHER_MODE_GCM,
3221                          },
3222          .auth_info = {
3223                        .alg = HASH_ALG_AES,
3224                        .mode = HASH_MODE_GCM,
3225                        },
3226          .auth_first = 0,
3227          },
3228         {
3229          .type = CRYPTO_ALG_TYPE_AEAD,
3230          .alg.aead = {
3231                  .base = {
3232                         .cra_name = "rfc4309(ccm(aes))",
3233                         .cra_driver_name = "ccm-aes-esp-iproc",
3234                         .cra_blocksize = AES_BLOCK_SIZE,
3235                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3236                  },
3237                  .setkey = aead_ccm_esp_setkey,
3238                  .ivsize = CCM_AES_IV_SIZE,
3239                  .maxauthsize = AES_BLOCK_SIZE,
3240          },
3241          .cipher_info = {
3242                          .alg = CIPHER_ALG_AES,
3243                          .mode = CIPHER_MODE_CCM,
3244                          },
3245          .auth_info = {
3246                        .alg = HASH_ALG_AES,
3247                        .mode = HASH_MODE_CCM,
3248                        },
3249          .auth_first = 0,
3250          },
3251         {
3252          .type = CRYPTO_ALG_TYPE_AEAD,
3253          .alg.aead = {
3254                  .base = {
3255                         .cra_name = "rfc4543(gcm(aes))",
3256                         .cra_driver_name = "gmac-aes-esp-iproc",
3257                         .cra_blocksize = AES_BLOCK_SIZE,
3258                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3259                  },
3260                  .setkey = rfc4543_gcm_esp_setkey,
3261                  .ivsize = GCM_RFC4106_IV_SIZE,
3262                  .maxauthsize = AES_BLOCK_SIZE,
3263          },
3264          .cipher_info = {
3265                          .alg = CIPHER_ALG_AES,
3266                          .mode = CIPHER_MODE_GCM,
3267                          },
3268          .auth_info = {
3269                        .alg = HASH_ALG_AES,
3270                        .mode = HASH_MODE_GCM,
3271                        },
3272          .auth_first = 0,
3273          },
3274         {
3275          .type = CRYPTO_ALG_TYPE_AEAD,
3276          .alg.aead = {
3277                  .base = {
3278                         .cra_name = "authenc(hmac(md5),cbc(aes))",
3279                         .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
3280                         .cra_blocksize = AES_BLOCK_SIZE,
3281                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3282                  },
3283                  .setkey = aead_authenc_setkey,
3284                 .ivsize = AES_BLOCK_SIZE,
3285                 .maxauthsize = MD5_DIGEST_SIZE,
3286          },
3287          .cipher_info = {
3288                          .alg = CIPHER_ALG_AES,
3289                          .mode = CIPHER_MODE_CBC,
3290                          },
3291          .auth_info = {
3292                        .alg = HASH_ALG_MD5,
3293                        .mode = HASH_MODE_HMAC,
3294                        },
3295          .auth_first = 0,
3296          },
3297         {
3298          .type = CRYPTO_ALG_TYPE_AEAD,
3299          .alg.aead = {
3300                  .base = {
3301                         .cra_name = "authenc(hmac(sha1),cbc(aes))",
3302                         .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
3303                         .cra_blocksize = AES_BLOCK_SIZE,
3304                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3305                  },
3306                  .setkey = aead_authenc_setkey,
3307                  .ivsize = AES_BLOCK_SIZE,
3308                  .maxauthsize = SHA1_DIGEST_SIZE,
3309          },
3310          .cipher_info = {
3311                          .alg = CIPHER_ALG_AES,
3312                          .mode = CIPHER_MODE_CBC,
3313                          },
3314          .auth_info = {
3315                        .alg = HASH_ALG_SHA1,
3316                        .mode = HASH_MODE_HMAC,
3317                        },
3318          .auth_first = 0,
3319          },
3320         {
3321          .type = CRYPTO_ALG_TYPE_AEAD,
3322          .alg.aead = {
3323                  .base = {
3324                         .cra_name = "authenc(hmac(sha256),cbc(aes))",
3325                         .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
3326                         .cra_blocksize = AES_BLOCK_SIZE,
3327                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3328                  },
3329                  .setkey = aead_authenc_setkey,
3330                  .ivsize = AES_BLOCK_SIZE,
3331                  .maxauthsize = SHA256_DIGEST_SIZE,
3332          },
3333          .cipher_info = {
3334                          .alg = CIPHER_ALG_AES,
3335                          .mode = CIPHER_MODE_CBC,
3336                          },
3337          .auth_info = {
3338                        .alg = HASH_ALG_SHA256,
3339                        .mode = HASH_MODE_HMAC,
3340                        },
3341          .auth_first = 0,
3342          },
3343         {
3344          .type = CRYPTO_ALG_TYPE_AEAD,
3345          .alg.aead = {
3346                  .base = {
3347                         .cra_name = "authenc(hmac(md5),cbc(des))",
3348                         .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
3349                         .cra_blocksize = DES_BLOCK_SIZE,
3350                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3351                  },
3352                  .setkey = aead_authenc_setkey,
3353                  .ivsize = DES_BLOCK_SIZE,
3354                  .maxauthsize = MD5_DIGEST_SIZE,
3355          },
3356          .cipher_info = {
3357                          .alg = CIPHER_ALG_DES,
3358                          .mode = CIPHER_MODE_CBC,
3359                          },
3360          .auth_info = {
3361                        .alg = HASH_ALG_MD5,
3362                        .mode = HASH_MODE_HMAC,
3363                        },
3364          .auth_first = 0,
3365          },
3366         {
3367          .type = CRYPTO_ALG_TYPE_AEAD,
3368          .alg.aead = {
3369                  .base = {
3370                         .cra_name = "authenc(hmac(sha1),cbc(des))",
3371                         .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
3372                         .cra_blocksize = DES_BLOCK_SIZE,
3373                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3374                  },
3375                  .setkey = aead_authenc_setkey,
3376                  .ivsize = DES_BLOCK_SIZE,
3377                  .maxauthsize = SHA1_DIGEST_SIZE,
3378          },
3379          .cipher_info = {
3380                          .alg = CIPHER_ALG_DES,
3381                          .mode = CIPHER_MODE_CBC,
3382                          },
3383          .auth_info = {
3384                        .alg = HASH_ALG_SHA1,
3385                        .mode = HASH_MODE_HMAC,
3386                        },
3387          .auth_first = 0,
3388          },
3389         {
3390          .type = CRYPTO_ALG_TYPE_AEAD,
3391          .alg.aead = {
3392                  .base = {
3393                         .cra_name = "authenc(hmac(sha224),cbc(des))",
3394                         .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
3395                         .cra_blocksize = DES_BLOCK_SIZE,
3396                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3397                  },
3398                  .setkey = aead_authenc_setkey,
3399                  .ivsize = DES_BLOCK_SIZE,
3400                  .maxauthsize = SHA224_DIGEST_SIZE,
3401          },
3402          .cipher_info = {
3403                          .alg = CIPHER_ALG_DES,
3404                          .mode = CIPHER_MODE_CBC,
3405                          },
3406          .auth_info = {
3407                        .alg = HASH_ALG_SHA224,
3408                        .mode = HASH_MODE_HMAC,
3409                        },
3410          .auth_first = 0,
3411          },
3412         {
3413          .type = CRYPTO_ALG_TYPE_AEAD,
3414          .alg.aead = {
3415                  .base = {
3416                         .cra_name = "authenc(hmac(sha256),cbc(des))",
3417                         .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
3418                         .cra_blocksize = DES_BLOCK_SIZE,
3419                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3420                  },
3421                  .setkey = aead_authenc_setkey,
3422                  .ivsize = DES_BLOCK_SIZE,
3423                  .maxauthsize = SHA256_DIGEST_SIZE,
3424          },
3425          .cipher_info = {
3426                          .alg = CIPHER_ALG_DES,
3427                          .mode = CIPHER_MODE_CBC,
3428                          },
3429          .auth_info = {
3430                        .alg = HASH_ALG_SHA256,
3431                        .mode = HASH_MODE_HMAC,
3432                        },
3433          .auth_first = 0,
3434          },
3435         {
3436          .type = CRYPTO_ALG_TYPE_AEAD,
3437          .alg.aead = {
3438                  .base = {
3439                         .cra_name = "authenc(hmac(sha384),cbc(des))",
3440                         .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
3441                         .cra_blocksize = DES_BLOCK_SIZE,
3442                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3443                  },
3444                  .setkey = aead_authenc_setkey,
3445                  .ivsize = DES_BLOCK_SIZE,
3446                  .maxauthsize = SHA384_DIGEST_SIZE,
3447          },
3448          .cipher_info = {
3449                          .alg = CIPHER_ALG_DES,
3450                          .mode = CIPHER_MODE_CBC,
3451                          },
3452          .auth_info = {
3453                        .alg = HASH_ALG_SHA384,
3454                        .mode = HASH_MODE_HMAC,
3455                        },
3456          .auth_first = 0,
3457          },
3458         {
3459          .type = CRYPTO_ALG_TYPE_AEAD,
3460          .alg.aead = {
3461                  .base = {
3462                         .cra_name = "authenc(hmac(sha512),cbc(des))",
3463                         .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
3464                         .cra_blocksize = DES_BLOCK_SIZE,
3465                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3466                  },
3467                  .setkey = aead_authenc_setkey,
3468                  .ivsize = DES_BLOCK_SIZE,
3469                  .maxauthsize = SHA512_DIGEST_SIZE,
3470          },
3471          .cipher_info = {
3472                          .alg = CIPHER_ALG_DES,
3473                          .mode = CIPHER_MODE_CBC,
3474                          },
3475          .auth_info = {
3476                        .alg = HASH_ALG_SHA512,
3477                        .mode = HASH_MODE_HMAC,
3478                        },
3479          .auth_first = 0,
3480          },
3481         {
3482          .type = CRYPTO_ALG_TYPE_AEAD,
3483          .alg.aead = {
3484                  .base = {
3485                         .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3486                         .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
3487                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3488                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3489                  },
3490                  .setkey = aead_authenc_setkey,
3491                  .ivsize = DES3_EDE_BLOCK_SIZE,
3492                  .maxauthsize = MD5_DIGEST_SIZE,
3493          },
3494          .cipher_info = {
3495                          .alg = CIPHER_ALG_3DES,
3496                          .mode = CIPHER_MODE_CBC,
3497                          },
3498          .auth_info = {
3499                        .alg = HASH_ALG_MD5,
3500                        .mode = HASH_MODE_HMAC,
3501                        },
3502          .auth_first = 0,
3503          },
3504         {
3505          .type = CRYPTO_ALG_TYPE_AEAD,
3506          .alg.aead = {
3507                  .base = {
3508                         .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3509                         .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
3510                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3511                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3512                  },
3513                  .setkey = aead_authenc_setkey,
3514                  .ivsize = DES3_EDE_BLOCK_SIZE,
3515                  .maxauthsize = SHA1_DIGEST_SIZE,
3516          },
3517          .cipher_info = {
3518                          .alg = CIPHER_ALG_3DES,
3519                          .mode = CIPHER_MODE_CBC,
3520                          },
3521          .auth_info = {
3522                        .alg = HASH_ALG_SHA1,
3523                        .mode = HASH_MODE_HMAC,
3524                        },
3525          .auth_first = 0,
3526          },
3527         {
3528          .type = CRYPTO_ALG_TYPE_AEAD,
3529          .alg.aead = {
3530                  .base = {
3531                         .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
3532                         .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
3533                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3534                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3535                  },
3536                  .setkey = aead_authenc_setkey,
3537                  .ivsize = DES3_EDE_BLOCK_SIZE,
3538                  .maxauthsize = SHA224_DIGEST_SIZE,
3539          },
3540          .cipher_info = {
3541                          .alg = CIPHER_ALG_3DES,
3542                          .mode = CIPHER_MODE_CBC,
3543                          },
3544          .auth_info = {
3545                        .alg = HASH_ALG_SHA224,
3546                        .mode = HASH_MODE_HMAC,
3547                        },
3548          .auth_first = 0,
3549          },
3550         {
3551          .type = CRYPTO_ALG_TYPE_AEAD,
3552          .alg.aead = {
3553                  .base = {
3554                         .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
3555                         .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
3556                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3557                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3558                  },
3559                  .setkey = aead_authenc_setkey,
3560                  .ivsize = DES3_EDE_BLOCK_SIZE,
3561                  .maxauthsize = SHA256_DIGEST_SIZE,
3562          },
3563          .cipher_info = {
3564                          .alg = CIPHER_ALG_3DES,
3565                          .mode = CIPHER_MODE_CBC,
3566                          },
3567          .auth_info = {
3568                        .alg = HASH_ALG_SHA256,
3569                        .mode = HASH_MODE_HMAC,
3570                        },
3571          .auth_first = 0,
3572          },
3573         {
3574          .type = CRYPTO_ALG_TYPE_AEAD,
3575          .alg.aead = {
3576                  .base = {
3577                         .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
3578                         .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
3579                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3580                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3581                  },
3582                  .setkey = aead_authenc_setkey,
3583                  .ivsize = DES3_EDE_BLOCK_SIZE,
3584                  .maxauthsize = SHA384_DIGEST_SIZE,
3585          },
3586          .cipher_info = {
3587                          .alg = CIPHER_ALG_3DES,
3588                          .mode = CIPHER_MODE_CBC,
3589                          },
3590          .auth_info = {
3591                        .alg = HASH_ALG_SHA384,
3592                        .mode = HASH_MODE_HMAC,
3593                        },
3594          .auth_first = 0,
3595          },
3596         {
3597          .type = CRYPTO_ALG_TYPE_AEAD,
3598          .alg.aead = {
3599                  .base = {
3600                         .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
3601                         .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
3602                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3603                         .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC
3604                  },
3605                  .setkey = aead_authenc_setkey,
3606                  .ivsize = DES3_EDE_BLOCK_SIZE,
3607                  .maxauthsize = SHA512_DIGEST_SIZE,
3608          },
3609          .cipher_info = {
3610                          .alg = CIPHER_ALG_3DES,
3611                          .mode = CIPHER_MODE_CBC,
3612                          },
3613          .auth_info = {
3614                        .alg = HASH_ALG_SHA512,
3615                        .mode = HASH_MODE_HMAC,
3616                        },
3617          .auth_first = 0,
3618          },
3619
3620 /* ABLKCIPHER algorithms. */
3621         {
3622          .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3623          .alg.crypto = {
3624                         .cra_name = "ecb(arc4)",
3625                         .cra_driver_name = "ecb-arc4-iproc",
3626                         .cra_blocksize = ARC4_BLOCK_SIZE,
3627                         .cra_ablkcipher = {
3628                                            .min_keysize = ARC4_MIN_KEY_SIZE,
3629                                            .max_keysize = ARC4_MAX_KEY_SIZE,
3630                                            .ivsize = 0,
3631                                         }
3632                         },
3633          .cipher_info = {
3634                          .alg = CIPHER_ALG_RC4,
3635                          .mode = CIPHER_MODE_NONE,
3636                          },
3637          .auth_info = {
3638                        .alg = HASH_ALG_NONE,
3639                        .mode = HASH_MODE_NONE,
3640                        },
3641          },
3642         {
3643          .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3644          .alg.crypto = {
3645                         .cra_name = "ofb(des)",
3646                         .cra_driver_name = "ofb-des-iproc",
3647                         .cra_blocksize = DES_BLOCK_SIZE,
3648                         .cra_ablkcipher = {
3649                                            .min_keysize = DES_KEY_SIZE,
3650                                            .max_keysize = DES_KEY_SIZE,
3651                                            .ivsize = DES_BLOCK_SIZE,
3652                                         }
3653                         },
3654          .cipher_info = {
3655                          .alg = CIPHER_ALG_DES,
3656                          .mode = CIPHER_MODE_OFB,
3657                          },
3658          .auth_info = {
3659                        .alg = HASH_ALG_NONE,
3660                        .mode = HASH_MODE_NONE,
3661                        },
3662          },
3663         {
3664          .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3665          .alg.crypto = {
3666                         .cra_name = "cbc(des)",
3667                         .cra_driver_name = "cbc-des-iproc",
3668                         .cra_blocksize = DES_BLOCK_SIZE,
3669                         .cra_ablkcipher = {
3670                                            .min_keysize = DES_KEY_SIZE,
3671                                            .max_keysize = DES_KEY_SIZE,
3672                                            .ivsize = DES_BLOCK_SIZE,
3673                                         }
3674                         },
3675          .cipher_info = {
3676                          .alg = CIPHER_ALG_DES,
3677                          .mode = CIPHER_MODE_CBC,
3678                          },
3679          .auth_info = {
3680                        .alg = HASH_ALG_NONE,
3681                        .mode = HASH_MODE_NONE,
3682                        },
3683          },
3684         {
3685          .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3686          .alg.crypto = {
3687                         .cra_name = "ecb(des)",
3688                         .cra_driver_name = "ecb-des-iproc",
3689                         .cra_blocksize = DES_BLOCK_SIZE,
3690                         .cra_ablkcipher = {
3691                                            .min_keysize = DES_KEY_SIZE,
3692                                            .max_keysize = DES_KEY_SIZE,
3693                                            .ivsize = 0,
3694                                         }
3695                         },
3696          .cipher_info = {
3697                          .alg = CIPHER_ALG_DES,
3698                          .mode = CIPHER_MODE_ECB,
3699                          },
3700          .auth_info = {
3701                        .alg = HASH_ALG_NONE,
3702                        .mode = HASH_MODE_NONE,
3703                        },
3704          },
3705         {
3706          .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3707          .alg.crypto = {
3708                         .cra_name = "ofb(des3_ede)",
3709                         .cra_driver_name = "ofb-des3-iproc",
3710                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3711                         .cra_ablkcipher = {
3712                                            .min_keysize = DES3_EDE_KEY_SIZE,
3713                                            .max_keysize = DES3_EDE_KEY_SIZE,
3714                                            .ivsize = DES3_EDE_BLOCK_SIZE,
3715                                         }
3716                         },
3717          .cipher_info = {
3718                          .alg = CIPHER_ALG_3DES,
3719                          .mode = CIPHER_MODE_OFB,
3720                          },
3721          .auth_info = {
3722                        .alg = HASH_ALG_NONE,
3723                        .mode = HASH_MODE_NONE,
3724                        },
3725          },
3726         {
3727          .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3728          .alg.crypto = {
3729                         .cra_name = "cbc(des3_ede)",
3730                         .cra_driver_name = "cbc-des3-iproc",
3731                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3732                         .cra_ablkcipher = {
3733                                            .min_keysize = DES3_EDE_KEY_SIZE,
3734                                            .max_keysize = DES3_EDE_KEY_SIZE,
3735                                            .ivsize = DES3_EDE_BLOCK_SIZE,
3736                                         }
3737                         },
3738          .cipher_info = {
3739                          .alg = CIPHER_ALG_3DES,
3740                          .mode = CIPHER_MODE_CBC,
3741                          },
3742          .auth_info = {
3743                        .alg = HASH_ALG_NONE,
3744                        .mode = HASH_MODE_NONE,
3745                        },
3746          },
3747         {
3748          .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3749          .alg.crypto = {
3750                         .cra_name = "ecb(des3_ede)",
3751                         .cra_driver_name = "ecb-des3-iproc",
3752                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3753                         .cra_ablkcipher = {
3754                                            .min_keysize = DES3_EDE_KEY_SIZE,
3755                                            .max_keysize = DES3_EDE_KEY_SIZE,
3756                                            .ivsize = 0,
3757                                         }
3758                         },
3759          .cipher_info = {
3760                          .alg = CIPHER_ALG_3DES,
3761                          .mode = CIPHER_MODE_ECB,
3762                          },
3763          .auth_info = {
3764                        .alg = HASH_ALG_NONE,
3765                        .mode = HASH_MODE_NONE,
3766                        },
3767          },
3768         {
3769          .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3770          .alg.crypto = {
3771                         .cra_name = "ofb(aes)",
3772                         .cra_driver_name = "ofb-aes-iproc",
3773                         .cra_blocksize = AES_BLOCK_SIZE,
3774                         .cra_ablkcipher = {
3775                                            .min_keysize = AES_MIN_KEY_SIZE,
3776                                            .max_keysize = AES_MAX_KEY_SIZE,
3777                                            .ivsize = AES_BLOCK_SIZE,
3778                                         }
3779                         },
3780          .cipher_info = {
3781                          .alg = CIPHER_ALG_AES,
3782                          .mode = CIPHER_MODE_OFB,
3783                          },
3784          .auth_info = {
3785                        .alg = HASH_ALG_NONE,
3786                        .mode = HASH_MODE_NONE,
3787                        },
3788          },
3789         {
3790          .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3791          .alg.crypto = {
3792                         .cra_name = "cbc(aes)",
3793                         .cra_driver_name = "cbc-aes-iproc",
3794                         .cra_blocksize = AES_BLOCK_SIZE,
3795                         .cra_ablkcipher = {
3796                                            .min_keysize = AES_MIN_KEY_SIZE,
3797                                            .max_keysize = AES_MAX_KEY_SIZE,
3798                                            .ivsize = AES_BLOCK_SIZE,
3799                                         }
3800                         },
3801          .cipher_info = {
3802                          .alg = CIPHER_ALG_AES,
3803                          .mode = CIPHER_MODE_CBC,
3804                          },
3805          .auth_info = {
3806                        .alg = HASH_ALG_NONE,
3807                        .mode = HASH_MODE_NONE,
3808                        },
3809          },
3810         {
3811          .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3812          .alg.crypto = {
3813                         .cra_name = "ecb(aes)",
3814                         .cra_driver_name = "ecb-aes-iproc",
3815                         .cra_blocksize = AES_BLOCK_SIZE,
3816                         .cra_ablkcipher = {
3817                                            .min_keysize = AES_MIN_KEY_SIZE,
3818                                            .max_keysize = AES_MAX_KEY_SIZE,
3819                                            .ivsize = 0,
3820                                         }
3821                         },
3822          .cipher_info = {
3823                          .alg = CIPHER_ALG_AES,
3824                          .mode = CIPHER_MODE_ECB,
3825                          },
3826          .auth_info = {
3827                        .alg = HASH_ALG_NONE,
3828                        .mode = HASH_MODE_NONE,
3829                        },
3830          },
3831         {
3832          .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3833          .alg.crypto = {
3834                         .cra_name = "ctr(aes)",
3835                         .cra_driver_name = "ctr-aes-iproc",
3836                         .cra_blocksize = AES_BLOCK_SIZE,
3837                         .cra_ablkcipher = {
3838                                            .min_keysize = AES_MIN_KEY_SIZE,
3839                                            .max_keysize = AES_MAX_KEY_SIZE,
3840                                            .ivsize = AES_BLOCK_SIZE,
3841                                         }
3842                         },
3843          .cipher_info = {
3844                          .alg = CIPHER_ALG_AES,
3845                          .mode = CIPHER_MODE_CTR,
3846                          },
3847          .auth_info = {
3848                        .alg = HASH_ALG_NONE,
3849                        .mode = HASH_MODE_NONE,
3850                        },
3851          },
3852 {
3853          .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
3854          .alg.crypto = {
3855                         .cra_name = "xts(aes)",
3856                         .cra_driver_name = "xts-aes-iproc",
3857                         .cra_blocksize = AES_BLOCK_SIZE,
3858                         .cra_ablkcipher = {
3859                                 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3860                                 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3861                                 .ivsize = AES_BLOCK_SIZE,
3862                                 }
3863                         },
3864          .cipher_info = {
3865                          .alg = CIPHER_ALG_AES,
3866                          .mode = CIPHER_MODE_XTS,
3867                          },
3868          .auth_info = {
3869                        .alg = HASH_ALG_NONE,
3870                        .mode = HASH_MODE_NONE,
3871                        },
3872          },
3873
3874 /* AHASH algorithms. */
3875         {
3876          .type = CRYPTO_ALG_TYPE_AHASH,
3877          .alg.hash = {
3878                       .halg.digestsize = MD5_DIGEST_SIZE,
3879                       .halg.base = {
3880                                     .cra_name = "md5",
3881                                     .cra_driver_name = "md5-iproc",
3882                                     .cra_blocksize = MD5_BLOCK_WORDS * 4,
3883                                     .cra_flags = CRYPTO_ALG_ASYNC,
3884                                 }
3885                       },
3886          .cipher_info = {
3887                          .alg = CIPHER_ALG_NONE,
3888                          .mode = CIPHER_MODE_NONE,
3889                          },
3890          .auth_info = {
3891                        .alg = HASH_ALG_MD5,
3892                        .mode = HASH_MODE_HASH,
3893                        },
3894          },
3895         {
3896          .type = CRYPTO_ALG_TYPE_AHASH,
3897          .alg.hash = {
3898                       .halg.digestsize = MD5_DIGEST_SIZE,
3899                       .halg.base = {
3900                                     .cra_name = "hmac(md5)",
3901                                     .cra_driver_name = "hmac-md5-iproc",
3902                                     .cra_blocksize = MD5_BLOCK_WORDS * 4,
3903                                 }
3904                       },
3905          .cipher_info = {
3906                          .alg = CIPHER_ALG_NONE,
3907                          .mode = CIPHER_MODE_NONE,
3908                          },
3909          .auth_info = {
3910                        .alg = HASH_ALG_MD5,
3911                        .mode = HASH_MODE_HMAC,
3912                        },
3913          },
3914         {.type = CRYPTO_ALG_TYPE_AHASH,
3915          .alg.hash = {
3916                       .halg.digestsize = SHA1_DIGEST_SIZE,
3917                       .halg.base = {
3918                                     .cra_name = "sha1",
3919                                     .cra_driver_name = "sha1-iproc",
3920                                     .cra_blocksize = SHA1_BLOCK_SIZE,
3921                                 }
3922                       },
3923          .cipher_info = {
3924                          .alg = CIPHER_ALG_NONE,
3925                          .mode = CIPHER_MODE_NONE,
3926                          },
3927          .auth_info = {
3928                        .alg = HASH_ALG_SHA1,
3929                        .mode = HASH_MODE_HASH,
3930                        },
3931          },
3932         {.type = CRYPTO_ALG_TYPE_AHASH,
3933          .alg.hash = {
3934                       .halg.digestsize = SHA1_DIGEST_SIZE,
3935                       .halg.base = {
3936                                     .cra_name = "hmac(sha1)",
3937                                     .cra_driver_name = "hmac-sha1-iproc",
3938                                     .cra_blocksize = SHA1_BLOCK_SIZE,
3939                                 }
3940                       },
3941          .cipher_info = {
3942                          .alg = CIPHER_ALG_NONE,
3943                          .mode = CIPHER_MODE_NONE,
3944                          },
3945          .auth_info = {
3946                        .alg = HASH_ALG_SHA1,
3947                        .mode = HASH_MODE_HMAC,
3948                        },
3949          },
3950         {.type = CRYPTO_ALG_TYPE_AHASH,
3951          .alg.hash = {
3952                         .halg.digestsize = SHA224_DIGEST_SIZE,
3953                         .halg.base = {
3954                                     .cra_name = "sha224",
3955                                     .cra_driver_name = "sha224-iproc",
3956                                     .cra_blocksize = SHA224_BLOCK_SIZE,
3957                         }
3958                       },
3959          .cipher_info = {
3960                          .alg = CIPHER_ALG_NONE,
3961                          .mode = CIPHER_MODE_NONE,
3962                          },
3963          .auth_info = {
3964                        .alg = HASH_ALG_SHA224,
3965                        .mode = HASH_MODE_HASH,
3966                        },
3967          },
3968         {.type = CRYPTO_ALG_TYPE_AHASH,
3969          .alg.hash = {
3970                       .halg.digestsize = SHA224_DIGEST_SIZE,
3971                       .halg.base = {
3972                                     .cra_name = "hmac(sha224)",
3973                                     .cra_driver_name = "hmac-sha224-iproc",
3974                                     .cra_blocksize = SHA224_BLOCK_SIZE,
3975                                 }
3976                       },
3977          .cipher_info = {
3978                          .alg = CIPHER_ALG_NONE,
3979                          .mode = CIPHER_MODE_NONE,
3980                          },
3981          .auth_info = {
3982                        .alg = HASH_ALG_SHA224,
3983                        .mode = HASH_MODE_HMAC,
3984                        },
3985          },
3986         {.type = CRYPTO_ALG_TYPE_AHASH,
3987          .alg.hash = {
3988                       .halg.digestsize = SHA256_DIGEST_SIZE,
3989                       .halg.base = {
3990                                     .cra_name = "sha256",
3991                                     .cra_driver_name = "sha256-iproc",
3992                                     .cra_blocksize = SHA256_BLOCK_SIZE,
3993                                 }
3994                       },
3995          .cipher_info = {
3996                          .alg = CIPHER_ALG_NONE,
3997                          .mode = CIPHER_MODE_NONE,
3998                          },
3999          .auth_info = {
4000                        .alg = HASH_ALG_SHA256,
4001                        .mode = HASH_MODE_HASH,
4002                        },
4003          },
4004         {.type = CRYPTO_ALG_TYPE_AHASH,
4005          .alg.hash = {
4006                       .halg.digestsize = SHA256_DIGEST_SIZE,
4007                       .halg.base = {
4008                                     .cra_name = "hmac(sha256)",
4009                                     .cra_driver_name = "hmac-sha256-iproc",
4010                                     .cra_blocksize = SHA256_BLOCK_SIZE,
4011                                 }
4012                       },
4013          .cipher_info = {
4014                          .alg = CIPHER_ALG_NONE,
4015                          .mode = CIPHER_MODE_NONE,
4016                          },
4017          .auth_info = {
4018                        .alg = HASH_ALG_SHA256,
4019                        .mode = HASH_MODE_HMAC,
4020                        },
4021          },
4022         {
4023         .type = CRYPTO_ALG_TYPE_AHASH,
4024          .alg.hash = {
4025                       .halg.digestsize = SHA384_DIGEST_SIZE,
4026                       .halg.base = {
4027                                     .cra_name = "sha384",
4028                                     .cra_driver_name = "sha384-iproc",
4029                                     .cra_blocksize = SHA384_BLOCK_SIZE,
4030                                 }
4031                       },
4032          .cipher_info = {
4033                          .alg = CIPHER_ALG_NONE,
4034                          .mode = CIPHER_MODE_NONE,
4035                          },
4036          .auth_info = {
4037                        .alg = HASH_ALG_SHA384,
4038                        .mode = HASH_MODE_HASH,
4039                        },
4040          },
4041         {
4042          .type = CRYPTO_ALG_TYPE_AHASH,
4043          .alg.hash = {
4044                       .halg.digestsize = SHA384_DIGEST_SIZE,
4045                       .halg.base = {
4046                                     .cra_name = "hmac(sha384)",
4047                                     .cra_driver_name = "hmac-sha384-iproc",
4048                                     .cra_blocksize = SHA384_BLOCK_SIZE,
4049                                 }
4050                       },
4051          .cipher_info = {
4052                          .alg = CIPHER_ALG_NONE,
4053                          .mode = CIPHER_MODE_NONE,
4054                          },
4055          .auth_info = {
4056                        .alg = HASH_ALG_SHA384,
4057                        .mode = HASH_MODE_HMAC,
4058                        },
4059          },
4060         {
4061          .type = CRYPTO_ALG_TYPE_AHASH,
4062          .alg.hash = {
4063                       .halg.digestsize = SHA512_DIGEST_SIZE,
4064                       .halg.base = {
4065                                     .cra_name = "sha512",
4066                                     .cra_driver_name = "sha512-iproc",
4067                                     .cra_blocksize = SHA512_BLOCK_SIZE,
4068                                 }
4069                       },
4070          .cipher_info = {
4071                          .alg = CIPHER_ALG_NONE,
4072                          .mode = CIPHER_MODE_NONE,
4073                          },
4074          .auth_info = {
4075                        .alg = HASH_ALG_SHA512,
4076                        .mode = HASH_MODE_HASH,
4077                        },
4078          },
4079         {
4080          .type = CRYPTO_ALG_TYPE_AHASH,
4081          .alg.hash = {
4082                       .halg.digestsize = SHA512_DIGEST_SIZE,
4083                       .halg.base = {
4084                                     .cra_name = "hmac(sha512)",
4085                                     .cra_driver_name = "hmac-sha512-iproc",
4086                                     .cra_blocksize = SHA512_BLOCK_SIZE,
4087                                 }
4088                       },
4089          .cipher_info = {
4090                          .alg = CIPHER_ALG_NONE,
4091                          .mode = CIPHER_MODE_NONE,
4092                          },
4093          .auth_info = {
4094                        .alg = HASH_ALG_SHA512,
4095                        .mode = HASH_MODE_HMAC,
4096                        },
4097          },
4098         {
4099          .type = CRYPTO_ALG_TYPE_AHASH,
4100          .alg.hash = {
4101                       .halg.digestsize = SHA3_224_DIGEST_SIZE,
4102                       .halg.base = {
4103                                     .cra_name = "sha3-224",
4104                                     .cra_driver_name = "sha3-224-iproc",
4105                                     .cra_blocksize = SHA3_224_BLOCK_SIZE,
4106                                 }
4107                       },
4108          .cipher_info = {
4109                          .alg = CIPHER_ALG_NONE,
4110                          .mode = CIPHER_MODE_NONE,
4111                          },
4112          .auth_info = {
4113                        .alg = HASH_ALG_SHA3_224,
4114                        .mode = HASH_MODE_HASH,
4115                        },
4116          },
4117         {
4118          .type = CRYPTO_ALG_TYPE_AHASH,
4119          .alg.hash = {
4120                       .halg.digestsize = SHA3_224_DIGEST_SIZE,
4121                       .halg.base = {
4122                                     .cra_name = "hmac(sha3-224)",
4123                                     .cra_driver_name = "hmac-sha3-224-iproc",
4124                                     .cra_blocksize = SHA3_224_BLOCK_SIZE,
4125                                 }
4126                       },
4127          .cipher_info = {
4128                          .alg = CIPHER_ALG_NONE,
4129                          .mode = CIPHER_MODE_NONE,
4130                          },
4131          .auth_info = {
4132                        .alg = HASH_ALG_SHA3_224,
4133                        .mode = HASH_MODE_HMAC
4134                        },
4135          },
4136         {
4137          .type = CRYPTO_ALG_TYPE_AHASH,
4138          .alg.hash = {
4139                       .halg.digestsize = SHA3_256_DIGEST_SIZE,
4140                       .halg.base = {
4141                                     .cra_name = "sha3-256",
4142                                     .cra_driver_name = "sha3-256-iproc",
4143                                     .cra_blocksize = SHA3_256_BLOCK_SIZE,
4144                                 }
4145                       },
4146          .cipher_info = {
4147                          .alg = CIPHER_ALG_NONE,
4148                          .mode = CIPHER_MODE_NONE,
4149                          },
4150          .auth_info = {
4151                        .alg = HASH_ALG_SHA3_256,
4152                        .mode = HASH_MODE_HASH,
4153                        },
4154          },
4155         {
4156          .type = CRYPTO_ALG_TYPE_AHASH,
4157          .alg.hash = {
4158                       .halg.digestsize = SHA3_256_DIGEST_SIZE,
4159                       .halg.base = {
4160                                     .cra_name = "hmac(sha3-256)",
4161                                     .cra_driver_name = "hmac-sha3-256-iproc",
4162                                     .cra_blocksize = SHA3_256_BLOCK_SIZE,
4163                                 }
4164                       },
4165          .cipher_info = {
4166                          .alg = CIPHER_ALG_NONE,
4167                          .mode = CIPHER_MODE_NONE,
4168                          },
4169          .auth_info = {
4170                        .alg = HASH_ALG_SHA3_256,
4171                        .mode = HASH_MODE_HMAC,
4172                        },
4173          },
4174         {
4175          .type = CRYPTO_ALG_TYPE_AHASH,
4176          .alg.hash = {
4177                       .halg.digestsize = SHA3_384_DIGEST_SIZE,
4178                       .halg.base = {
4179                                     .cra_name = "sha3-384",
4180                                     .cra_driver_name = "sha3-384-iproc",
4181                                     .cra_blocksize = SHA3_224_BLOCK_SIZE,
4182                                 }
4183                       },
4184          .cipher_info = {
4185                          .alg = CIPHER_ALG_NONE,
4186                          .mode = CIPHER_MODE_NONE,
4187                          },
4188          .auth_info = {
4189                        .alg = HASH_ALG_SHA3_384,
4190                        .mode = HASH_MODE_HASH,
4191                        },
4192          },
4193         {
4194          .type = CRYPTO_ALG_TYPE_AHASH,
4195          .alg.hash = {
4196                       .halg.digestsize = SHA3_384_DIGEST_SIZE,
4197                       .halg.base = {
4198                                     .cra_name = "hmac(sha3-384)",
4199                                     .cra_driver_name = "hmac-sha3-384-iproc",
4200                                     .cra_blocksize = SHA3_384_BLOCK_SIZE,
4201                                 }
4202                       },
4203          .cipher_info = {
4204                          .alg = CIPHER_ALG_NONE,
4205                          .mode = CIPHER_MODE_NONE,
4206                          },
4207          .auth_info = {
4208                        .alg = HASH_ALG_SHA3_384,
4209                        .mode = HASH_MODE_HMAC,
4210                        },
4211          },
4212         {
4213          .type = CRYPTO_ALG_TYPE_AHASH,
4214          .alg.hash = {
4215                       .halg.digestsize = SHA3_512_DIGEST_SIZE,
4216                       .halg.base = {
4217                                     .cra_name = "sha3-512",
4218                                     .cra_driver_name = "sha3-512-iproc",
4219                                     .cra_blocksize = SHA3_512_BLOCK_SIZE,
4220                                 }
4221                       },
4222          .cipher_info = {
4223                          .alg = CIPHER_ALG_NONE,
4224                          .mode = CIPHER_MODE_NONE,
4225                          },
4226          .auth_info = {
4227                        .alg = HASH_ALG_SHA3_512,
4228                        .mode = HASH_MODE_HASH,
4229                        },
4230          },
4231         {
4232          .type = CRYPTO_ALG_TYPE_AHASH,
4233          .alg.hash = {
4234                       .halg.digestsize = SHA3_512_DIGEST_SIZE,
4235                       .halg.base = {
4236                                     .cra_name = "hmac(sha3-512)",
4237                                     .cra_driver_name = "hmac-sha3-512-iproc",
4238                                     .cra_blocksize = SHA3_512_BLOCK_SIZE,
4239                                 }
4240                       },
4241          .cipher_info = {
4242                          .alg = CIPHER_ALG_NONE,
4243                          .mode = CIPHER_MODE_NONE,
4244                          },
4245          .auth_info = {
4246                        .alg = HASH_ALG_SHA3_512,
4247                        .mode = HASH_MODE_HMAC,
4248                        },
4249          },
4250         {
4251          .type = CRYPTO_ALG_TYPE_AHASH,
4252          .alg.hash = {
4253                       .halg.digestsize = AES_BLOCK_SIZE,
4254                       .halg.base = {
4255                                     .cra_name = "xcbc(aes)",
4256                                     .cra_driver_name = "xcbc-aes-iproc",
4257                                     .cra_blocksize = AES_BLOCK_SIZE,
4258                                 }
4259                       },
4260          .cipher_info = {
4261                          .alg = CIPHER_ALG_NONE,
4262                          .mode = CIPHER_MODE_NONE,
4263                          },
4264          .auth_info = {
4265                        .alg = HASH_ALG_AES,
4266                        .mode = HASH_MODE_XCBC,
4267                        },
4268          },
4269         {
4270          .type = CRYPTO_ALG_TYPE_AHASH,
4271          .alg.hash = {
4272                       .halg.digestsize = AES_BLOCK_SIZE,
4273                       .halg.base = {
4274                                     .cra_name = "cmac(aes)",
4275                                     .cra_driver_name = "cmac-aes-iproc",
4276                                     .cra_blocksize = AES_BLOCK_SIZE,
4277                                 }
4278                       },
4279          .cipher_info = {
4280                          .alg = CIPHER_ALG_NONE,
4281                          .mode = CIPHER_MODE_NONE,
4282                          },
4283          .auth_info = {
4284                        .alg = HASH_ALG_AES,
4285                        .mode = HASH_MODE_CMAC,
4286                        },
4287          },
4288 };
4289
4290 static int generic_cra_init(struct crypto_tfm *tfm,
4291                             struct iproc_alg_s *cipher_alg)
4292 {
4293         struct spu_hw *spu = &iproc_priv.spu;
4294         struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4295         unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
4296
4297         flow_log("%s()\n", __func__);
4298
4299         ctx->alg = cipher_alg;
4300         ctx->cipher = cipher_alg->cipher_info;
4301         ctx->auth = cipher_alg->auth_info;
4302         ctx->auth_first = cipher_alg->auth_first;
4303         ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
4304                                                     ctx->cipher.mode,
4305                                                     blocksize);
4306         ctx->fallback_cipher = NULL;
4307
4308         ctx->enckeylen = 0;
4309         ctx->authkeylen = 0;
4310
4311         atomic_inc(&iproc_priv.stream_count);
4312         atomic_inc(&iproc_priv.session_count);
4313
4314         return 0;
4315 }
4316
4317 static int ablkcipher_cra_init(struct crypto_tfm *tfm)
4318 {
4319         struct crypto_alg *alg = tfm->__crt_alg;
4320         struct iproc_alg_s *cipher_alg;
4321
4322         flow_log("%s()\n", __func__);
4323
4324         tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s);
4325
4326         cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto);
4327         return generic_cra_init(tfm, cipher_alg);
4328 }
4329
4330 static int ahash_cra_init(struct crypto_tfm *tfm)
4331 {
4332         int err;
4333         struct crypto_alg *alg = tfm->__crt_alg;
4334         struct iproc_alg_s *cipher_alg;
4335
4336         cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
4337                                   alg.hash);
4338
4339         err = generic_cra_init(tfm, cipher_alg);
4340         flow_log("%s()\n", __func__);
4341
4342         /*
4343          * export state size has to be < 512 bytes. So don't include msg bufs
4344          * in state size.
4345          */
4346         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4347                                  sizeof(struct iproc_reqctx_s));
4348
4349         return err;
4350 }
4351
4352 static int aead_cra_init(struct crypto_aead *aead)
4353 {
4354         struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4355         struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4356         struct crypto_alg *alg = tfm->__crt_alg;
4357         struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
4358         struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
4359                                                       alg.aead);
4360
4361         int err = generic_cra_init(tfm, cipher_alg);
4362
4363         flow_log("%s()\n", __func__);
4364
4365         crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
4366         ctx->is_esp = false;
4367         ctx->salt_len = 0;
4368         ctx->salt_offset = 0;
4369
4370         /* random first IV */
4371         get_random_bytes(ctx->iv, MAX_IV_SIZE);
4372         flow_dump("  iv: ", ctx->iv, MAX_IV_SIZE);
4373
4374         if (!err) {
4375                 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
4376                         flow_log("%s() creating fallback cipher\n", __func__);
4377
4378                         ctx->fallback_cipher =
4379                             crypto_alloc_aead(alg->cra_name, 0,
4380                                               CRYPTO_ALG_ASYNC |
4381                                               CRYPTO_ALG_NEED_FALLBACK);
4382                         if (IS_ERR(ctx->fallback_cipher)) {
4383                                 pr_err("%s() Error: failed to allocate fallback for %s\n",
4384                                        __func__, alg->cra_name);
4385                                 return PTR_ERR(ctx->fallback_cipher);
4386                         }
4387                 }
4388         }
4389
4390         return err;
4391 }
4392
4393 static void generic_cra_exit(struct crypto_tfm *tfm)
4394 {
4395         atomic_dec(&iproc_priv.session_count);
4396 }
4397
4398 static void aead_cra_exit(struct crypto_aead *aead)
4399 {
4400         struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4401         struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4402
4403         generic_cra_exit(tfm);
4404
4405         if (ctx->fallback_cipher) {
4406                 crypto_free_aead(ctx->fallback_cipher);
4407                 ctx->fallback_cipher = NULL;
4408         }
4409 }
4410
4411 /**
4412  * spu_functions_register() - Specify hardware-specific SPU functions based on
4413  * SPU type read from device tree.
4414  * @dev:        device structure
4415  * @spu_type:   SPU hardware generation
4416  * @spu_subtype: SPU hardware version
4417  */
4418 static void spu_functions_register(struct device *dev,
4419                                    enum spu_spu_type spu_type,
4420                                    enum spu_spu_subtype spu_subtype)
4421 {
4422         struct spu_hw *spu = &iproc_priv.spu;
4423
4424         if (spu_type == SPU_TYPE_SPUM) {
4425                 dev_dbg(dev, "Registering SPUM functions");
4426                 spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
4427                 spu->spu_payload_length = spum_payload_length;
4428                 spu->spu_response_hdr_len = spum_response_hdr_len;
4429                 spu->spu_hash_pad_len = spum_hash_pad_len;
4430                 spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
4431                 spu->spu_assoc_resp_len = spum_assoc_resp_len;
4432                 spu->spu_aead_ivlen = spum_aead_ivlen;
4433                 spu->spu_hash_type = spum_hash_type;
4434                 spu->spu_digest_size = spum_digest_size;
4435                 spu->spu_create_request = spum_create_request;
4436                 spu->spu_cipher_req_init = spum_cipher_req_init;
4437                 spu->spu_cipher_req_finish = spum_cipher_req_finish;
4438                 spu->spu_request_pad = spum_request_pad;
4439                 spu->spu_tx_status_len = spum_tx_status_len;
4440                 spu->spu_rx_status_len = spum_rx_status_len;
4441                 spu->spu_status_process = spum_status_process;
4442                 spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
4443                 spu->spu_ccm_update_iv = spum_ccm_update_iv;
4444                 spu->spu_wordalign_padlen = spum_wordalign_padlen;
4445                 if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
4446                         spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
4447                 else
4448                         spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
4449         } else {
4450                 dev_dbg(dev, "Registering SPU2 functions");
4451                 spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
4452                 spu->spu_ctx_max_payload = spu2_ctx_max_payload;
4453                 spu->spu_payload_length = spu2_payload_length;
4454                 spu->spu_response_hdr_len = spu2_response_hdr_len;
4455                 spu->spu_hash_pad_len = spu2_hash_pad_len;
4456                 spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
4457                 spu->spu_assoc_resp_len = spu2_assoc_resp_len;
4458                 spu->spu_aead_ivlen = spu2_aead_ivlen;
4459                 spu->spu_hash_type = spu2_hash_type;
4460                 spu->spu_digest_size = spu2_digest_size;
4461                 spu->spu_create_request = spu2_create_request;
4462                 spu->spu_cipher_req_init = spu2_cipher_req_init;
4463                 spu->spu_cipher_req_finish = spu2_cipher_req_finish;
4464                 spu->spu_request_pad = spu2_request_pad;
4465                 spu->spu_tx_status_len = spu2_tx_status_len;
4466                 spu->spu_rx_status_len = spu2_rx_status_len;
4467                 spu->spu_status_process = spu2_status_process;
4468                 spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
4469                 spu->spu_ccm_update_iv = spu2_ccm_update_iv;
4470                 spu->spu_wordalign_padlen = spu2_wordalign_padlen;
4471         }
4472 }
4473
4474 /**
4475  * spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox
4476  * channel for the SPU being probed.
4477  * @dev:  SPU driver device structure
4478  *
4479  * Return: 0 if successful
4480  *         < 0 otherwise
4481  */
4482 static int spu_mb_init(struct device *dev)
4483 {
4484         struct mbox_client *mcl = &iproc_priv.mcl;
4485         int err, i;
4486
4487         iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
4488                                   sizeof(struct mbox_chan *), GFP_KERNEL);
4489         if (!iproc_priv.mbox)
4490                 return -ENOMEM;
4491
4492         mcl->dev = dev;
4493         mcl->tx_block = false;
4494         mcl->tx_tout = 0;
4495         mcl->knows_txdone = true;
4496         mcl->rx_callback = spu_rx_callback;
4497         mcl->tx_done = NULL;
4498
4499         for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4500                 iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
4501                 if (IS_ERR(iproc_priv.mbox[i])) {
4502                         err = (int)PTR_ERR(iproc_priv.mbox[i]);
4503                         dev_err(dev,
4504                                 "Mbox channel %d request failed with err %d",
4505                                 i, err);
4506                         iproc_priv.mbox[i] = NULL;
4507                         goto free_channels;
4508                 }
4509         }
4510
4511         return 0;
4512 free_channels:
4513         for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4514                 if (iproc_priv.mbox[i])
4515                         mbox_free_channel(iproc_priv.mbox[i]);
4516         }
4517
4518         return err;
4519 }
4520
4521 static void spu_mb_release(struct platform_device *pdev)
4522 {
4523         int i;
4524
4525         for (i = 0; i < iproc_priv.spu.num_chan; i++)
4526                 mbox_free_channel(iproc_priv.mbox[i]);
4527 }
4528
4529 static void spu_counters_init(void)
4530 {
4531         int i;
4532         int j;
4533
4534         atomic_set(&iproc_priv.session_count, 0);
4535         atomic_set(&iproc_priv.stream_count, 0);
4536         atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
4537         atomic64_set(&iproc_priv.bytes_in, 0);
4538         atomic64_set(&iproc_priv.bytes_out, 0);
4539         for (i = 0; i < SPU_OP_NUM; i++) {
4540                 atomic_set(&iproc_priv.op_counts[i], 0);
4541                 atomic_set(&iproc_priv.setkey_cnt[i], 0);
4542         }
4543         for (i = 0; i < CIPHER_ALG_LAST; i++)
4544                 for (j = 0; j < CIPHER_MODE_LAST; j++)
4545                         atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
4546
4547         for (i = 0; i < HASH_ALG_LAST; i++) {
4548                 atomic_set(&iproc_priv.hash_cnt[i], 0);
4549                 atomic_set(&iproc_priv.hmac_cnt[i], 0);
4550         }
4551         for (i = 0; i < AEAD_TYPE_LAST; i++)
4552                 atomic_set(&iproc_priv.aead_cnt[i], 0);
4553
4554         atomic_set(&iproc_priv.mb_no_spc, 0);
4555         atomic_set(&iproc_priv.mb_send_fail, 0);
4556         atomic_set(&iproc_priv.bad_icv, 0);
4557 }
4558
4559 static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
4560 {
4561         struct spu_hw *spu = &iproc_priv.spu;
4562         struct crypto_alg *crypto = &driver_alg->alg.crypto;
4563         int err;
4564
4565         /* SPU2 does not support RC4 */
4566         if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) &&
4567             (spu->spu_type == SPU_TYPE_SPU2))
4568                 return 0;
4569
4570         crypto->cra_module = THIS_MODULE;
4571         crypto->cra_priority = cipher_pri;
4572         crypto->cra_alignmask = 0;
4573         crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
4574
4575         crypto->cra_init = ablkcipher_cra_init;
4576         crypto->cra_exit = generic_cra_exit;
4577         crypto->cra_type = &crypto_ablkcipher_type;
4578         crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4579                                 CRYPTO_ALG_KERN_DRIVER_ONLY;
4580
4581         crypto->cra_ablkcipher.setkey = ablkcipher_setkey;
4582         crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt;
4583         crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt;
4584
4585         err = crypto_register_alg(crypto);
4586         /* Mark alg as having been registered, if successful */
4587         if (err == 0)
4588                 driver_alg->registered = true;
4589         pr_debug("  registered ablkcipher %s\n", crypto->cra_driver_name);
4590         return err;
4591 }
4592
4593 static int spu_register_ahash(struct iproc_alg_s *driver_alg)
4594 {
4595         struct spu_hw *spu = &iproc_priv.spu;
4596         struct ahash_alg *hash = &driver_alg->alg.hash;
4597         int err;
4598
4599         /* AES-XCBC is the only AES hash type currently supported on SPU-M */
4600         if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4601             (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
4602             (spu->spu_type == SPU_TYPE_SPUM))
4603                 return 0;
4604
4605         /* SHA3 algorithm variants are not registered for SPU-M or SPU2. */
4606         if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
4607             (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
4608                 return 0;
4609
4610         hash->halg.base.cra_module = THIS_MODULE;
4611         hash->halg.base.cra_priority = hash_pri;
4612         hash->halg.base.cra_alignmask = 0;
4613         hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4614         hash->halg.base.cra_init = ahash_cra_init;
4615         hash->halg.base.cra_exit = generic_cra_exit;
4616         hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4617         hash->halg.statesize = sizeof(struct spu_hash_export_s);
4618
4619         if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
4620                 hash->init = ahash_init;
4621                 hash->update = ahash_update;
4622                 hash->final = ahash_final;
4623                 hash->finup = ahash_finup;
4624                 hash->digest = ahash_digest;
4625                 if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4626                     ((driver_alg->auth_info.mode == HASH_MODE_XCBC) ||
4627                     (driver_alg->auth_info.mode == HASH_MODE_CMAC))) {
4628                         hash->setkey = ahash_setkey;
4629                 }
4630         } else {
4631                 hash->setkey = ahash_hmac_setkey;
4632                 hash->init = ahash_hmac_init;
4633                 hash->update = ahash_hmac_update;
4634                 hash->final = ahash_hmac_final;
4635                 hash->finup = ahash_hmac_finup;
4636                 hash->digest = ahash_hmac_digest;
4637         }
4638         hash->export = ahash_export;
4639         hash->import = ahash_import;
4640
4641         err = crypto_register_ahash(hash);
4642         /* Mark alg as having been registered, if successful */
4643         if (err == 0)
4644                 driver_alg->registered = true;
4645         pr_debug("  registered ahash %s\n",
4646                  hash->halg.base.cra_driver_name);
4647         return err;
4648 }
4649
4650 static int spu_register_aead(struct iproc_alg_s *driver_alg)
4651 {
4652         struct aead_alg *aead = &driver_alg->alg.aead;
4653         int err;
4654
4655         aead->base.cra_module = THIS_MODULE;
4656         aead->base.cra_priority = aead_pri;
4657         aead->base.cra_alignmask = 0;
4658         aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4659
4660         aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
4661         /* setkey set in alg initialization */
4662         aead->setauthsize = aead_setauthsize;
4663         aead->encrypt = aead_encrypt;
4664         aead->decrypt = aead_decrypt;
4665         aead->init = aead_cra_init;
4666         aead->exit = aead_cra_exit;
4667
4668         err = crypto_register_aead(aead);
4669         /* Mark alg as having been registered, if successful */
4670         if (err == 0)
4671                 driver_alg->registered = true;
4672         pr_debug("  registered aead %s\n", aead->base.cra_driver_name);
4673         return err;
4674 }
4675
4676 /* register crypto algorithms the device supports */
4677 static int spu_algs_register(struct device *dev)
4678 {
4679         int i, j;
4680         int err;
4681
4682         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4683                 switch (driver_algs[i].type) {
4684                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4685                         err = spu_register_ablkcipher(&driver_algs[i]);
4686                         break;
4687                 case CRYPTO_ALG_TYPE_AHASH:
4688                         err = spu_register_ahash(&driver_algs[i]);
4689                         break;
4690                 case CRYPTO_ALG_TYPE_AEAD:
4691                         err = spu_register_aead(&driver_algs[i]);
4692                         break;
4693                 default:
4694                         dev_err(dev,
4695                                 "iproc-crypto: unknown alg type: %d",
4696                                 driver_algs[i].type);
4697                         err = -EINVAL;
4698                 }
4699
4700                 if (err) {
4701                         dev_err(dev, "alg registration failed with error %d\n",
4702                                 err);
4703                         goto err_algs;
4704                 }
4705         }
4706
4707         return 0;
4708
4709 err_algs:
4710         for (j = 0; j < i; j++) {
4711                 /* Skip any algorithm not registered */
4712                 if (!driver_algs[j].registered)
4713                         continue;
4714                 switch (driver_algs[j].type) {
4715                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4716                         crypto_unregister_alg(&driver_algs[j].alg.crypto);
4717                         driver_algs[j].registered = false;
4718                         break;
4719                 case CRYPTO_ALG_TYPE_AHASH:
4720                         crypto_unregister_ahash(&driver_algs[j].alg.hash);
4721                         driver_algs[j].registered = false;
4722                         break;
4723                 case CRYPTO_ALG_TYPE_AEAD:
4724                         crypto_unregister_aead(&driver_algs[j].alg.aead);
4725                         driver_algs[j].registered = false;
4726                         break;
4727                 }
4728         }
4729         return err;
4730 }
4731
4732 /* ==================== Kernel Platform API ==================== */
4733
4734 static struct spu_type_subtype spum_ns2_types = {
4735         SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
4736 };
4737
4738 static struct spu_type_subtype spum_nsp_types = {
4739         SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
4740 };
4741
4742 static struct spu_type_subtype spu2_types = {
4743         SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
4744 };
4745
4746 static struct spu_type_subtype spu2_v2_types = {
4747         SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
4748 };
4749
4750 static const struct of_device_id bcm_spu_dt_ids[] = {
4751         {
4752                 .compatible = "brcm,spum-crypto",
4753                 .data = &spum_ns2_types,
4754         },
4755         {
4756                 .compatible = "brcm,spum-nsp-crypto",
4757                 .data = &spum_nsp_types,
4758         },
4759         {
4760                 .compatible = "brcm,spu2-crypto",
4761                 .data = &spu2_types,
4762         },
4763         {
4764                 .compatible = "brcm,spu2-v2-crypto",
4765                 .data = &spu2_v2_types,
4766         },
4767         { /* sentinel */ }
4768 };
4769
4770 MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
4771
4772 static int spu_dt_read(struct platform_device *pdev)
4773 {
4774         struct device *dev = &pdev->dev;
4775         struct spu_hw *spu = &iproc_priv.spu;
4776         struct resource *spu_ctrl_regs;
4777         const struct spu_type_subtype *matched_spu_type;
4778         struct device_node *dn = pdev->dev.of_node;
4779         int err, i;
4780
4781         /* Count number of mailbox channels */
4782         spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
4783
4784         matched_spu_type = of_device_get_match_data(dev);
4785         if (!matched_spu_type) {
4786                 dev_err(&pdev->dev, "Failed to match device\n");
4787                 return -ENODEV;
4788         }
4789
4790         spu->spu_type = matched_spu_type->type;
4791         spu->spu_subtype = matched_spu_type->subtype;
4792
4793         i = 0;
4794         for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
4795                 platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
4796
4797                 spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
4798                 if (IS_ERR(spu->reg_vbase[i])) {
4799                         err = PTR_ERR(spu->reg_vbase[i]);
4800                         dev_err(&pdev->dev, "Failed to map registers: %d\n",
4801                                 err);
4802                         spu->reg_vbase[i] = NULL;
4803                         return err;
4804                 }
4805         }
4806         spu->num_spu = i;
4807         dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
4808
4809         return 0;
4810 }
4811
4812 static int bcm_spu_probe(struct platform_device *pdev)
4813 {
4814         struct device *dev = &pdev->dev;
4815         struct spu_hw *spu = &iproc_priv.spu;
4816         int err = 0;
4817
4818         iproc_priv.pdev  = pdev;
4819         platform_set_drvdata(iproc_priv.pdev,
4820                              &iproc_priv);
4821
4822         err = spu_dt_read(pdev);
4823         if (err < 0)
4824                 goto failure;
4825
4826         err = spu_mb_init(&pdev->dev);
4827         if (err < 0)
4828                 goto failure;
4829
4830         if (spu->spu_type == SPU_TYPE_SPUM)
4831                 iproc_priv.bcm_hdr_len = 8;
4832         else if (spu->spu_type == SPU_TYPE_SPU2)
4833                 iproc_priv.bcm_hdr_len = 0;
4834
4835         spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype);
4836
4837         spu_counters_init();
4838
4839         spu_setup_debugfs();
4840
4841         err = spu_algs_register(dev);
4842         if (err < 0)
4843                 goto fail_reg;
4844
4845         return 0;
4846
4847 fail_reg:
4848         spu_free_debugfs();
4849 failure:
4850         spu_mb_release(pdev);
4851         dev_err(dev, "%s failed with error %d.\n", __func__, err);
4852
4853         return err;
4854 }
4855
4856 static int bcm_spu_remove(struct platform_device *pdev)
4857 {
4858         int i;
4859         struct device *dev = &pdev->dev;
4860         char *cdn;
4861
4862         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4863                 /*
4864                  * Not all algorithms were registered, depending on whether
4865                  * hardware is SPU or SPU2.  So here we make sure to skip
4866                  * those algorithms that were not previously registered.
4867                  */
4868                 if (!driver_algs[i].registered)
4869                         continue;
4870
4871                 switch (driver_algs[i].type) {
4872                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4873                         crypto_unregister_alg(&driver_algs[i].alg.crypto);
4874                         dev_dbg(dev, "  unregistered cipher %s\n",
4875                                 driver_algs[i].alg.crypto.cra_driver_name);
4876                         driver_algs[i].registered = false;
4877                         break;
4878                 case CRYPTO_ALG_TYPE_AHASH:
4879                         crypto_unregister_ahash(&driver_algs[i].alg.hash);
4880                         cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
4881                         dev_dbg(dev, "  unregistered hash %s\n", cdn);
4882                         driver_algs[i].registered = false;
4883                         break;
4884                 case CRYPTO_ALG_TYPE_AEAD:
4885                         crypto_unregister_aead(&driver_algs[i].alg.aead);
4886                         dev_dbg(dev, "  unregistered aead %s\n",
4887                                 driver_algs[i].alg.aead.base.cra_driver_name);
4888                         driver_algs[i].registered = false;
4889                         break;
4890                 }
4891         }
4892         spu_free_debugfs();
4893         spu_mb_release(pdev);
4894         return 0;
4895 }
4896
4897 /* ===== Kernel Module API ===== */
4898
4899 static struct platform_driver bcm_spu_pdriver = {
4900         .driver = {
4901                    .name = "brcm-spu-crypto",
4902                    .of_match_table = of_match_ptr(bcm_spu_dt_ids),
4903                    },
4904         .probe = bcm_spu_probe,
4905         .remove = bcm_spu_remove,
4906 };
4907 module_platform_driver(bcm_spu_pdriver);
4908
4909 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
4910 MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4911 MODULE_LICENSE("GPL v2");