1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2016 Broadcom
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/errno.h>
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/platform_device.h>
13 #include <linux/scatterlist.h>
14 #include <linux/crypto.h>
15 #include <linux/kthread.h>
16 #include <linux/rtnetlink.h>
17 #include <linux/sched.h>
18 #include <linux/of_address.h>
19 #include <linux/of_device.h>
21 #include <linux/bitops.h>
23 #include <crypto/algapi.h>
24 #include <crypto/aead.h>
25 #include <crypto/internal/aead.h>
26 #include <crypto/aes.h>
27 #include <crypto/internal/des.h>
28 #include <crypto/hmac.h>
29 #include <crypto/md5.h>
30 #include <crypto/authenc.h>
31 #include <crypto/skcipher.h>
32 #include <crypto/hash.h>
33 #include <crypto/sha1.h>
34 #include <crypto/sha2.h>
35 #include <crypto/sha3.h>
43 /* ================= Device Structure ================== */
45 struct bcm_device_private iproc_priv;
47 /* ==================== Parameters ===================== */
49 int flow_debug_logging;
50 module_param(flow_debug_logging, int, 0644);
51 MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging");
53 int packet_debug_logging;
54 module_param(packet_debug_logging, int, 0644);
55 MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging");
57 int debug_logging_sleep;
58 module_param(debug_logging_sleep, int, 0644);
59 MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep");
62 * The value of these module parameters is used to set the priority for each
63 * algo type when this driver registers algos with the kernel crypto API.
64 * To use a priority other than the default, set the priority in the insmod or
65 * modprobe. Changing the module priority after init time has no effect.
67 * The default priorities are chosen to be lower (less preferred) than ARMv8 CE
68 * algos, but more preferred than generic software algos.
70 static int cipher_pri = 150;
71 module_param(cipher_pri, int, 0644);
72 MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos");
74 static int hash_pri = 100;
75 module_param(hash_pri, int, 0644);
76 MODULE_PARM_DESC(hash_pri, "Priority for hash algos");
78 static int aead_pri = 150;
79 module_param(aead_pri, int, 0644);
80 MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos");
82 /* A type 3 BCM header, expected to precede the SPU header for SPU-M.
83 * Bits 3 and 4 in the first byte encode the channel number (the dma ringset).
89 static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 };
91 * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN
92 * is set dynamically after reading SPU type from device tree.
94 #define BCM_HDR_LEN iproc_priv.bcm_hdr_len
96 /* min and max time to sleep before retrying when mbox queue is full. usec */
97 #define MBOX_SLEEP_MIN 800
98 #define MBOX_SLEEP_MAX 1000
101 * select_channel() - Select a SPU channel to handle a crypto request. Selects
102 * channel in round robin order.
104 * Return: channel index
106 static u8 select_channel(void)
108 u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan);
110 return chan_idx % iproc_priv.spu.num_chan;
114 * spu_skcipher_rx_sg_create() - Build up the scatterlist of buffers used to
115 * receive a SPU response message for an skcipher request. Includes buffers to
116 * catch SPU message headers and the response data.
117 * @mssg: mailbox message containing the receive sg
118 * @rctx: crypto request context
119 * @rx_frag_num: number of scatterlist elements required to hold the
120 * SPU response message
121 * @chunksize: Number of bytes of response data expected
122 * @stat_pad_len: Number of bytes required to pad the STAT field to
125 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
126 * when the request completes, whether the request is handled successfully or
134 spu_skcipher_rx_sg_create(struct brcm_message *mssg,
135 struct iproc_reqctx_s *rctx,
137 unsigned int chunksize, u32 stat_pad_len)
139 struct spu_hw *spu = &iproc_priv.spu;
140 struct scatterlist *sg; /* used to build sgs in mbox message */
141 struct iproc_ctx_s *ctx = rctx->ctx;
142 u32 datalen; /* Number of bytes of response data expected */
144 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
150 sg_init_table(sg, rx_frag_num);
151 /* Space for SPU message header */
152 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
154 /* If XTS tweak in payload, add buffer to receive encrypted tweak */
155 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
156 spu->spu_xts_tweak_in_payload())
157 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak,
160 /* Copy in each dst sg entry from request, up to chunksize */
161 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
162 rctx->dst_nents, chunksize);
163 if (datalen < chunksize) {
164 pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u",
165 __func__, chunksize, datalen);
170 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
172 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
173 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
179 * spu_skcipher_tx_sg_create() - Build up the scatterlist of buffers used to
180 * send a SPU request message for an skcipher request. Includes SPU message
181 * headers and the request data.
182 * @mssg: mailbox message containing the transmit sg
183 * @rctx: crypto request context
184 * @tx_frag_num: number of scatterlist elements required to construct the
185 * SPU request message
186 * @chunksize: Number of bytes of request data
187 * @pad_len: Number of pad bytes
189 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
190 * when the request completes, whether the request is handled successfully or
198 spu_skcipher_tx_sg_create(struct brcm_message *mssg,
199 struct iproc_reqctx_s *rctx,
200 u8 tx_frag_num, unsigned int chunksize, u32 pad_len)
202 struct spu_hw *spu = &iproc_priv.spu;
203 struct scatterlist *sg; /* used to build sgs in mbox message */
204 struct iproc_ctx_s *ctx = rctx->ctx;
205 u32 datalen; /* Number of bytes of response data expected */
208 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
210 if (unlikely(!mssg->spu.src))
214 sg_init_table(sg, tx_frag_num);
216 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
217 BCM_HDR_LEN + ctx->spu_req_hdr_len);
219 /* if XTS tweak in payload, copy from IV (where crypto API puts it) */
220 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
221 spu->spu_xts_tweak_in_payload())
222 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE);
224 /* Copy in each src sg entry from request, up to chunksize */
225 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
226 rctx->src_nents, chunksize);
227 if (unlikely(datalen < chunksize)) {
228 pr_err("%s(): failed to copy src sg to mbox msg",
234 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
236 stat_len = spu->spu_tx_status_len();
238 memset(rctx->msg_buf.tx_stat, 0, stat_len);
239 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
244 static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
249 struct device *dev = &(iproc_priv.pdev->dev);
251 err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
252 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
253 while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
255 * Mailbox queue is full. Since MAY_SLEEP is set, assume
256 * not in atomic context and we can wait and try again.
259 usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
260 err = mbox_send_message(iproc_priv.mbox[chan_idx],
262 atomic_inc(&iproc_priv.mb_no_spc);
266 atomic_inc(&iproc_priv.mb_send_fail);
270 /* Check error returned by mailbox controller */
272 if (unlikely(err < 0)) {
273 dev_err(dev, "message error %d", err);
274 /* Signal txdone for mailbox channel */
277 /* Signal txdone for mailbox channel */
278 mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
283 * handle_skcipher_req() - Submit as much of a block cipher request as fits in
284 * a single SPU request message, starting at the current position in the request
286 * @rctx: Crypto request context
288 * This may be called on the crypto API thread, or, when a request is so large
289 * it must be broken into multiple SPU messages, on the thread used to invoke
290 * the response callback. When requests are broken into multiple SPU
291 * messages, we assume subsequent messages depend on previous results, and
292 * thus always wait for previous results before submitting the next message.
293 * Because requests are submitted in lock step like this, there is no need
294 * to synchronize access to request data structures.
296 * Return: -EINPROGRESS: request has been accepted and result will be returned
298 * Any other value indicates an error
300 static int handle_skcipher_req(struct iproc_reqctx_s *rctx)
302 struct spu_hw *spu = &iproc_priv.spu;
303 struct crypto_async_request *areq = rctx->parent;
304 struct skcipher_request *req =
305 container_of(areq, struct skcipher_request, base);
306 struct iproc_ctx_s *ctx = rctx->ctx;
307 struct spu_cipher_parms cipher_parms;
309 unsigned int chunksize; /* Num bytes of request to submit */
310 int remaining; /* Bytes of request still to process */
311 int chunk_start; /* Beginning of data for current SPU msg */
313 /* IV or ctr value to use in this SPU msg */
314 u8 local_iv_ctr[MAX_IV_SIZE];
315 u32 stat_pad_len; /* num bytes to align status field */
316 u32 pad_len; /* total length of all padding */
317 struct brcm_message *mssg; /* mailbox message */
319 /* number of entries in src and dst sg in mailbox message. */
320 u8 rx_frag_num = 2; /* response header and STATUS */
321 u8 tx_frag_num = 1; /* request header */
323 flow_log("%s\n", __func__);
325 cipher_parms.alg = ctx->cipher.alg;
326 cipher_parms.mode = ctx->cipher.mode;
327 cipher_parms.type = ctx->cipher_type;
328 cipher_parms.key_len = ctx->enckeylen;
329 cipher_parms.key_buf = ctx->enckey;
330 cipher_parms.iv_buf = local_iv_ctr;
331 cipher_parms.iv_len = rctx->iv_ctr_len;
333 mssg = &rctx->mb_mssg;
334 chunk_start = rctx->src_sent;
335 remaining = rctx->total_todo - chunk_start;
337 /* determine the chunk we are breaking off and update the indexes */
338 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
339 (remaining > ctx->max_payload))
340 chunksize = ctx->max_payload;
342 chunksize = remaining;
344 rctx->src_sent += chunksize;
345 rctx->total_sent = rctx->src_sent;
347 /* Count number of sg entries to be included in this request */
348 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
349 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
351 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
352 rctx->is_encrypt && chunk_start)
354 * Encrypting non-first first chunk. Copy last block of
355 * previous result to IV for this chunk.
357 sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr,
359 chunk_start - rctx->iv_ctr_len);
361 if (rctx->iv_ctr_len) {
362 /* get our local copy of the iv */
363 __builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr,
366 /* generate the next IV if possible */
367 if ((ctx->cipher.mode == CIPHER_MODE_CBC) &&
370 * CBC Decrypt: next IV is the last ciphertext block in
373 sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr,
375 rctx->src_sent - rctx->iv_ctr_len);
376 } else if (ctx->cipher.mode == CIPHER_MODE_CTR) {
378 * The SPU hardware increments the counter once for
379 * each AES block of 16 bytes. So update the counter
380 * for the next chunk, if there is one. Note that for
381 * this chunk, the counter has already been copied to
382 * local_iv_ctr. We can assume a block size of 16,
383 * because we only support CTR mode for AES, not for
384 * any other cipher alg.
386 add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4);
390 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
391 flow_log("max_payload infinite\n");
393 flow_log("max_payload %u\n", ctx->max_payload);
395 flow_log("sent:%u start:%u remains:%u size:%u\n",
396 rctx->src_sent, chunk_start, remaining, chunksize);
398 /* Copy SPU header template created at setkey time */
399 memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr,
400 sizeof(rctx->msg_buf.bcm_spu_req_hdr));
402 spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
403 ctx->spu_req_hdr_len, !(rctx->is_encrypt),
404 &cipher_parms, chunksize);
406 atomic64_add(chunksize, &iproc_priv.bytes_out);
408 stat_pad_len = spu->spu_wordalign_padlen(chunksize);
411 pad_len = stat_pad_len;
414 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0,
415 0, ctx->auth.alg, ctx->auth.mode,
416 rctx->total_sent, stat_pad_len);
419 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
420 ctx->spu_req_hdr_len);
421 packet_log("payload:\n");
422 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
423 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
426 * Build mailbox message containing SPU request msg and rx buffers
427 * to catch response message
429 memset(mssg, 0, sizeof(*mssg));
430 mssg->type = BRCM_MESSAGE_SPU;
431 mssg->ctx = rctx; /* Will be returned in response */
433 /* Create rx scatterlist to catch result */
434 rx_frag_num += rctx->dst_nents;
436 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
437 spu->spu_xts_tweak_in_payload())
438 rx_frag_num++; /* extra sg to insert tweak */
440 err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize,
445 /* Create tx scatterlist containing SPU request message */
446 tx_frag_num += rctx->src_nents;
447 if (spu->spu_tx_status_len())
450 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
451 spu->spu_xts_tweak_in_payload())
452 tx_frag_num++; /* extra sg to insert tweak */
454 err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize,
459 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
460 if (unlikely(err < 0))
467 * handle_skcipher_resp() - Process a block cipher SPU response. Updates the
468 * total received count for the request and updates global stats.
469 * @rctx: Crypto request context
471 static void handle_skcipher_resp(struct iproc_reqctx_s *rctx)
473 struct spu_hw *spu = &iproc_priv.spu;
474 struct crypto_async_request *areq = rctx->parent;
475 struct skcipher_request *req = skcipher_request_cast(areq);
476 struct iproc_ctx_s *ctx = rctx->ctx;
479 /* See how much data was returned */
480 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
483 * In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the
484 * encrypted tweak ("i") value; we don't count those.
486 if ((ctx->cipher.mode == CIPHER_MODE_XTS) &&
487 spu->spu_xts_tweak_in_payload() &&
488 (payload_len >= SPU_XTS_TWEAK_SIZE))
489 payload_len -= SPU_XTS_TWEAK_SIZE;
491 atomic64_add(payload_len, &iproc_priv.bytes_in);
493 flow_log("%s() offset: %u, bd_len: %u BD:\n",
494 __func__, rctx->total_received, payload_len);
496 dump_sg(req->dst, rctx->total_received, payload_len);
498 rctx->total_received += payload_len;
499 if (rctx->total_received == rctx->total_todo) {
500 atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]);
502 &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]);
507 * spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to
508 * receive a SPU response message for an ahash request.
509 * @mssg: mailbox message containing the receive sg
510 * @rctx: crypto request context
511 * @rx_frag_num: number of scatterlist elements required to hold the
512 * SPU response message
513 * @digestsize: length of hash digest, in bytes
514 * @stat_pad_len: Number of bytes required to pad the STAT field to
517 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
518 * when the request completes, whether the request is handled successfully or
526 spu_ahash_rx_sg_create(struct brcm_message *mssg,
527 struct iproc_reqctx_s *rctx,
528 u8 rx_frag_num, unsigned int digestsize,
531 struct spu_hw *spu = &iproc_priv.spu;
532 struct scatterlist *sg; /* used to build sgs in mbox message */
533 struct iproc_ctx_s *ctx = rctx->ctx;
535 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
541 sg_init_table(sg, rx_frag_num);
542 /* Space for SPU message header */
543 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
545 /* Space for digest */
546 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
549 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
551 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
552 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
557 * spu_ahash_tx_sg_create() - Build up the scatterlist of buffers used to send
558 * a SPU request message for an ahash request. Includes SPU message headers and
560 * @mssg: mailbox message containing the transmit sg
561 * @rctx: crypto request context
562 * @tx_frag_num: number of scatterlist elements required to construct the
563 * SPU request message
564 * @spu_hdr_len: length in bytes of SPU message header
565 * @hash_carry_len: Number of bytes of data carried over from previous req
566 * @new_data_len: Number of bytes of new request data
567 * @pad_len: Number of pad bytes
569 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
570 * when the request completes, whether the request is handled successfully or
578 spu_ahash_tx_sg_create(struct brcm_message *mssg,
579 struct iproc_reqctx_s *rctx,
582 unsigned int hash_carry_len,
583 unsigned int new_data_len, u32 pad_len)
585 struct spu_hw *spu = &iproc_priv.spu;
586 struct scatterlist *sg; /* used to build sgs in mbox message */
587 u32 datalen; /* Number of bytes of response data expected */
590 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
596 sg_init_table(sg, tx_frag_num);
598 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
599 BCM_HDR_LEN + spu_hdr_len);
602 sg_set_buf(sg++, rctx->hash_carry, hash_carry_len);
605 /* Copy in each src sg entry from request, up to chunksize */
606 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
607 rctx->src_nents, new_data_len);
608 if (datalen < new_data_len) {
609 pr_err("%s(): failed to copy src sg to mbox msg",
616 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
618 stat_len = spu->spu_tx_status_len();
620 memset(rctx->msg_buf.tx_stat, 0, stat_len);
621 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
628 * handle_ahash_req() - Process an asynchronous hash request from the crypto
630 * @rctx: Crypto request context
632 * Builds a SPU request message embedded in a mailbox message and submits the
633 * mailbox message on a selected mailbox channel. The SPU request message is
634 * constructed as a scatterlist, including entries from the crypto API's
635 * src scatterlist to avoid copying the data to be hashed. This function is
636 * called either on the thread from the crypto API, or, in the case that the
637 * crypto API request is too large to fit in a single SPU request message,
638 * on the thread that invokes the receive callback with a response message.
639 * Because some operations require the response from one chunk before the next
640 * chunk can be submitted, we always wait for the response for the previous
641 * chunk before submitting the next chunk. Because requests are submitted in
642 * lock step like this, there is no need to synchronize access to request data
646 * -EINPROGRESS: request has been submitted to SPU and response will be
647 * returned asynchronously
648 * -EAGAIN: non-final request included a small amount of data, which for
649 * efficiency we did not submit to the SPU, but instead stored
650 * to be submitted to the SPU with the next part of the request
651 * other: an error code
653 static int handle_ahash_req(struct iproc_reqctx_s *rctx)
655 struct spu_hw *spu = &iproc_priv.spu;
656 struct crypto_async_request *areq = rctx->parent;
657 struct ahash_request *req = ahash_request_cast(areq);
658 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
659 struct crypto_tfm *tfm = crypto_ahash_tfm(ahash);
660 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
661 struct iproc_ctx_s *ctx = rctx->ctx;
663 /* number of bytes still to be hashed in this req */
664 unsigned int nbytes_to_hash = 0;
666 unsigned int chunksize = 0; /* length of hash carry + new data */
668 * length of new data, not from hash carry, to be submitted in
671 unsigned int new_data_len;
673 unsigned int __maybe_unused chunk_start = 0;
674 u32 db_size; /* Length of data field, incl gcm and hash padding */
675 int pad_len = 0; /* total pad len, including gcm, hash, stat padding */
676 u32 data_pad_len = 0; /* length of GCM/CCM padding */
677 u32 stat_pad_len = 0; /* length of padding to align STATUS word */
678 struct brcm_message *mssg; /* mailbox message */
679 struct spu_request_opts req_opts;
680 struct spu_cipher_parms cipher_parms;
681 struct spu_hash_parms hash_parms;
682 struct spu_aead_parms aead_parms;
683 unsigned int local_nbuf;
685 unsigned int digestsize;
689 * number of entries in src and dst sg. Always includes SPU msg header.
690 * rx always includes a buffer to catch digest and STATUS.
695 flow_log("total_todo %u, total_sent %u\n",
696 rctx->total_todo, rctx->total_sent);
698 memset(&req_opts, 0, sizeof(req_opts));
699 memset(&cipher_parms, 0, sizeof(cipher_parms));
700 memset(&hash_parms, 0, sizeof(hash_parms));
701 memset(&aead_parms, 0, sizeof(aead_parms));
703 req_opts.bd_suppress = true;
704 hash_parms.alg = ctx->auth.alg;
705 hash_parms.mode = ctx->auth.mode;
706 hash_parms.type = HASH_TYPE_NONE;
707 hash_parms.key_buf = (u8 *)ctx->authkey;
708 hash_parms.key_len = ctx->authkeylen;
711 * For hash algorithms below assignment looks bit odd but
712 * it's needed for AES-XCBC and AES-CMAC hash algorithms
713 * to differentiate between 128, 192, 256 bit key values.
714 * Based on the key values, hash algorithm is selected.
715 * For example for 128 bit key, hash algorithm is AES-128.
717 cipher_parms.type = ctx->cipher_type;
719 mssg = &rctx->mb_mssg;
720 chunk_start = rctx->src_sent;
723 * Compute the amount remaining to hash. This may include data
724 * carried over from previous requests.
726 nbytes_to_hash = rctx->total_todo - rctx->total_sent;
727 chunksize = nbytes_to_hash;
728 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
729 (chunksize > ctx->max_payload))
730 chunksize = ctx->max_payload;
733 * If this is not a final request and the request data is not a multiple
734 * of a full block, then simply park the extra data and prefix it to the
735 * data for the next request.
737 if (!rctx->is_final) {
738 u8 *dest = rctx->hash_carry + rctx->hash_carry_len;
739 u16 new_len; /* len of data to add to hash carry */
741 rem = chunksize % blocksize; /* remainder */
743 /* chunksize not a multiple of blocksize */
745 if (chunksize == 0) {
746 /* Don't have a full block to submit to hw */
747 new_len = rem - rctx->hash_carry_len;
748 sg_copy_part_to_buf(req->src, dest, new_len,
750 rctx->hash_carry_len = rem;
751 flow_log("Exiting with hash carry len: %u\n",
752 rctx->hash_carry_len);
753 packet_dump(" buf: ",
755 rctx->hash_carry_len);
761 /* if we have hash carry, then prefix it to the data in this request */
762 local_nbuf = rctx->hash_carry_len;
763 rctx->hash_carry_len = 0;
766 new_data_len = chunksize - local_nbuf;
768 /* Count number of sg entries to be used in this request */
769 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip,
772 /* AES hashing keeps key size in type field, so need to copy it here */
773 if (hash_parms.alg == HASH_ALG_AES)
774 hash_parms.type = (enum hash_type)cipher_parms.type;
776 hash_parms.type = spu->spu_hash_type(rctx->total_sent);
778 digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg,
780 hash_parms.digestsize = digestsize;
782 /* update the indexes */
783 rctx->total_sent += chunksize;
784 /* if you sent a prebuf then that wasn't from this req->src */
785 rctx->src_sent += new_data_len;
787 if ((rctx->total_sent == rctx->total_todo) && rctx->is_final)
788 hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg,
794 * If a non-first chunk, then include the digest returned from the
795 * previous chunk so that hw can add to it (except for AES types).
797 if ((hash_parms.type == HASH_TYPE_UPDT) &&
798 (hash_parms.alg != HASH_ALG_AES)) {
799 hash_parms.key_buf = rctx->incr_hash;
800 hash_parms.key_len = digestsize;
803 atomic64_add(chunksize, &iproc_priv.bytes_out);
805 flow_log("%s() final: %u nbuf: %u ",
806 __func__, rctx->is_final, local_nbuf);
808 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
809 flow_log("max_payload infinite\n");
811 flow_log("max_payload %u\n", ctx->max_payload);
813 flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize);
815 /* Prepend SPU header with type 3 BCM header */
816 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
818 hash_parms.prebuf_len = local_nbuf;
819 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
821 &req_opts, &cipher_parms,
822 &hash_parms, &aead_parms,
825 if (spu_hdr_len == 0) {
826 pr_err("Failed to create SPU request header\n");
831 * Determine total length of padding required. Put all padding in one
834 data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize);
835 db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len,
836 0, 0, hash_parms.pad_len);
837 if (spu->spu_tx_status_len())
838 stat_pad_len = spu->spu_wordalign_padlen(db_size);
841 pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len;
844 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len,
845 hash_parms.pad_len, ctx->auth.alg,
846 ctx->auth.mode, rctx->total_sent,
850 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
852 packet_dump(" prebuf: ", rctx->hash_carry, local_nbuf);
854 dump_sg(rctx->src_sg, rctx->src_skip, new_data_len);
855 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
858 * Build mailbox message containing SPU request msg and rx buffers
859 * to catch response message
861 memset(mssg, 0, sizeof(*mssg));
862 mssg->type = BRCM_MESSAGE_SPU;
863 mssg->ctx = rctx; /* Will be returned in response */
865 /* Create rx scatterlist to catch result */
866 err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize,
871 /* Create tx scatterlist containing SPU request message */
872 tx_frag_num += rctx->src_nents;
873 if (spu->spu_tx_status_len())
875 err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
876 local_nbuf, new_data_len, pad_len);
880 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
881 if (unlikely(err < 0))
888 * spu_hmac_outer_hash() - Request synchonous software compute of the outer hash
889 * for an HMAC request.
890 * @req: The HMAC request from the crypto API
891 * @ctx: The session context
893 * Return: 0 if synchronous hash operation successful
894 * -EINVAL if the hash algo is unrecognized
895 * any other value indicates an error
897 static int spu_hmac_outer_hash(struct ahash_request *req,
898 struct iproc_ctx_s *ctx)
900 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
901 unsigned int blocksize =
902 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
905 switch (ctx->auth.alg) {
907 rc = do_shash("md5", req->result, ctx->opad, blocksize,
908 req->result, ctx->digestsize, NULL, 0);
911 rc = do_shash("sha1", req->result, ctx->opad, blocksize,
912 req->result, ctx->digestsize, NULL, 0);
914 case HASH_ALG_SHA224:
915 rc = do_shash("sha224", req->result, ctx->opad, blocksize,
916 req->result, ctx->digestsize, NULL, 0);
918 case HASH_ALG_SHA256:
919 rc = do_shash("sha256", req->result, ctx->opad, blocksize,
920 req->result, ctx->digestsize, NULL, 0);
922 case HASH_ALG_SHA384:
923 rc = do_shash("sha384", req->result, ctx->opad, blocksize,
924 req->result, ctx->digestsize, NULL, 0);
926 case HASH_ALG_SHA512:
927 rc = do_shash("sha512", req->result, ctx->opad, blocksize,
928 req->result, ctx->digestsize, NULL, 0);
931 pr_err("%s() Error : unknown hmac type\n", __func__);
938 * ahash_req_done() - Process a hash result from the SPU hardware.
939 * @rctx: Crypto request context
941 * Return: 0 if successful
944 static int ahash_req_done(struct iproc_reqctx_s *rctx)
946 struct spu_hw *spu = &iproc_priv.spu;
947 struct crypto_async_request *areq = rctx->parent;
948 struct ahash_request *req = ahash_request_cast(areq);
949 struct iproc_ctx_s *ctx = rctx->ctx;
952 memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize);
954 if (spu->spu_type == SPU_TYPE_SPUM) {
955 /* byte swap the output from the UPDT function to network byte
958 if (ctx->auth.alg == HASH_ALG_MD5) {
959 __swab32s((u32 *)req->result);
960 __swab32s(((u32 *)req->result) + 1);
961 __swab32s(((u32 *)req->result) + 2);
962 __swab32s(((u32 *)req->result) + 3);
963 __swab32s(((u32 *)req->result) + 4);
967 flow_dump(" digest ", req->result, ctx->digestsize);
969 /* if this an HMAC then do the outer hash */
970 if (rctx->is_sw_hmac) {
971 err = spu_hmac_outer_hash(req, ctx);
974 flow_dump(" hmac: ", req->result, ctx->digestsize);
977 if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) {
978 atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]);
979 atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]);
981 atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]);
982 atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]);
989 * handle_ahash_resp() - Process a SPU response message for a hash request.
990 * Checks if the entire crypto API request has been processed, and if so,
991 * invokes post processing on the result.
992 * @rctx: Crypto request context
994 static void handle_ahash_resp(struct iproc_reqctx_s *rctx)
996 struct iproc_ctx_s *ctx = rctx->ctx;
997 struct crypto_async_request *areq = rctx->parent;
998 struct ahash_request *req = ahash_request_cast(areq);
999 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1000 unsigned int blocksize =
1001 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
1003 * Save hash to use as input to next op if incremental. Might be copying
1004 * too much, but that's easier than figuring out actual digest size here
1006 memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE);
1008 flow_log("%s() blocksize:%u digestsize:%u\n",
1009 __func__, blocksize, ctx->digestsize);
1011 atomic64_add(ctx->digestsize, &iproc_priv.bytes_in);
1013 if (rctx->is_final && (rctx->total_sent == rctx->total_todo))
1014 ahash_req_done(rctx);
1018 * spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive
1019 * a SPU response message for an AEAD request. Includes buffers to catch SPU
1020 * message headers and the response data.
1021 * @mssg: mailbox message containing the receive sg
1022 * @req: Crypto API request
1023 * @rctx: crypto request context
1024 * @rx_frag_num: number of scatterlist elements required to hold the
1025 * SPU response message
1026 * @assoc_len: Length of associated data included in the crypto request
1027 * @ret_iv_len: Length of IV returned in response
1028 * @resp_len: Number of bytes of response data expected to be written to
1029 * dst buffer from crypto API
1030 * @digestsize: Length of hash digest, in bytes
1031 * @stat_pad_len: Number of bytes required to pad the STAT field to
1034 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1035 * when the request completes, whether the request is handled successfully or
1036 * there is an error.
1042 static int spu_aead_rx_sg_create(struct brcm_message *mssg,
1043 struct aead_request *req,
1044 struct iproc_reqctx_s *rctx,
1046 unsigned int assoc_len,
1047 u32 ret_iv_len, unsigned int resp_len,
1048 unsigned int digestsize, u32 stat_pad_len)
1050 struct spu_hw *spu = &iproc_priv.spu;
1051 struct scatterlist *sg; /* used to build sgs in mbox message */
1052 struct iproc_ctx_s *ctx = rctx->ctx;
1053 u32 datalen; /* Number of bytes of response data expected */
1057 if (ctx->is_rfc4543) {
1058 /* RFC4543: only pad after data, not after AAD */
1059 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1060 assoc_len + resp_len);
1061 assoc_buf_len = assoc_len;
1063 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1065 assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode,
1066 assoc_len, ret_iv_len,
1070 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1071 /* ICV (after data) must be in the next 32-bit word for CCM */
1072 data_padlen += spu->spu_wordalign_padlen(assoc_buf_len +
1077 /* have to catch gcm pad in separate buffer */
1080 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist),
1086 sg_init_table(sg, rx_frag_num);
1088 /* Space for SPU message header */
1089 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len);
1091 if (assoc_buf_len) {
1093 * Don't write directly to req->dst, because SPU may pad the
1094 * assoc data in the response
1096 memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len);
1097 sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len);
1102 * Copy in each dst sg entry from request, up to chunksize.
1103 * dst sg catches just the data. digest caught in separate buf.
1105 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip,
1106 rctx->dst_nents, resp_len);
1107 if (datalen < (resp_len)) {
1108 pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u",
1109 __func__, resp_len, datalen);
1114 /* If GCM/CCM data is padded, catch padding in separate buffer */
1116 memset(rctx->msg_buf.a.gcmpad, 0, data_padlen);
1117 sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen);
1120 /* Always catch ICV in separate buffer */
1121 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize);
1123 flow_log("stat_pad_len %u\n", stat_pad_len);
1125 memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len);
1126 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len);
1129 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN);
1130 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len());
1136 * spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a
1137 * SPU request message for an AEAD request. Includes SPU message headers and the
1139 * @mssg: mailbox message containing the transmit sg
1140 * @rctx: crypto request context
1141 * @tx_frag_num: number of scatterlist elements required to construct the
1142 * SPU request message
1143 * @spu_hdr_len: length of SPU message header in bytes
1144 * @assoc: crypto API associated data scatterlist
1145 * @assoc_len: length of associated data
1146 * @assoc_nents: number of scatterlist entries containing assoc data
1147 * @aead_iv_len: length of AEAD IV, if included
1148 * @chunksize: Number of bytes of request data
1149 * @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM.
1150 * @pad_len: Number of pad bytes
1151 * @incl_icv: If true, write separate ICV buffer after data and
1154 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup()
1155 * when the request completes, whether the request is handled successfully or
1156 * there is an error.
1162 static int spu_aead_tx_sg_create(struct brcm_message *mssg,
1163 struct iproc_reqctx_s *rctx,
1166 struct scatterlist *assoc,
1167 unsigned int assoc_len,
1169 unsigned int aead_iv_len,
1170 unsigned int chunksize,
1171 u32 aad_pad_len, u32 pad_len, bool incl_icv)
1173 struct spu_hw *spu = &iproc_priv.spu;
1174 struct scatterlist *sg; /* used to build sgs in mbox message */
1175 struct scatterlist *assoc_sg = assoc;
1176 struct iproc_ctx_s *ctx = rctx->ctx;
1177 u32 datalen; /* Number of bytes of data to write */
1178 u32 written; /* Number of bytes of data written */
1179 u32 assoc_offset = 0;
1182 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist),
1188 sg_init_table(sg, tx_frag_num);
1190 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr,
1191 BCM_HDR_LEN + spu_hdr_len);
1194 /* Copy in each associated data sg entry from request */
1195 written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset,
1196 assoc_nents, assoc_len);
1197 if (written < assoc_len) {
1198 pr_err("%s(): failed to copy assoc sg to mbox msg",
1205 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len);
1208 memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len);
1209 sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len);
1212 datalen = chunksize;
1213 if ((chunksize > ctx->digestsize) && incl_icv)
1214 datalen -= ctx->digestsize;
1216 /* For aead, a single msg should consume the entire src sg */
1217 written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip,
1218 rctx->src_nents, datalen);
1219 if (written < datalen) {
1220 pr_err("%s(): failed to copy src sg to mbox msg",
1227 memset(rctx->msg_buf.spu_req_pad, 0, pad_len);
1228 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len);
1232 sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize);
1234 stat_len = spu->spu_tx_status_len();
1236 memset(rctx->msg_buf.tx_stat, 0, stat_len);
1237 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len);
1243 * handle_aead_req() - Submit a SPU request message for the next chunk of the
1244 * current AEAD request.
1245 * @rctx: Crypto request context
1247 * Unlike other operation types, we assume the length of the request fits in
1248 * a single SPU request message. aead_enqueue() makes sure this is true.
1249 * Comments for other op types regarding threads applies here as well.
1251 * Unlike incremental hash ops, where the spu returns the entire hash for
1252 * truncated algs like sha-224, the SPU returns just the truncated hash in
1253 * response to aead requests. So digestsize is always ctx->digestsize here.
1255 * Return: -EINPROGRESS: crypto request has been accepted and result will be
1256 * returned asynchronously
1257 * Any other value indicates an error
1259 static int handle_aead_req(struct iproc_reqctx_s *rctx)
1261 struct spu_hw *spu = &iproc_priv.spu;
1262 struct crypto_async_request *areq = rctx->parent;
1263 struct aead_request *req = container_of(areq,
1264 struct aead_request, base);
1265 struct iproc_ctx_s *ctx = rctx->ctx;
1267 unsigned int chunksize;
1268 unsigned int resp_len;
1273 struct brcm_message *mssg; /* mailbox message */
1274 struct spu_request_opts req_opts;
1275 struct spu_cipher_parms cipher_parms;
1276 struct spu_hash_parms hash_parms;
1277 struct spu_aead_parms aead_parms;
1278 int assoc_nents = 0;
1279 bool incl_icv = false;
1280 unsigned int digestsize = ctx->digestsize;
1282 /* number of entries in src and dst sg. Always includes SPU msg header.
1284 u8 rx_frag_num = 2; /* and STATUS */
1287 /* doing the whole thing at once */
1288 chunksize = rctx->total_todo;
1290 flow_log("%s: chunksize %u\n", __func__, chunksize);
1292 memset(&req_opts, 0, sizeof(req_opts));
1293 memset(&hash_parms, 0, sizeof(hash_parms));
1294 memset(&aead_parms, 0, sizeof(aead_parms));
1296 req_opts.is_inbound = !(rctx->is_encrypt);
1297 req_opts.auth_first = ctx->auth_first;
1298 req_opts.is_aead = true;
1299 req_opts.is_esp = ctx->is_esp;
1301 cipher_parms.alg = ctx->cipher.alg;
1302 cipher_parms.mode = ctx->cipher.mode;
1303 cipher_parms.type = ctx->cipher_type;
1304 cipher_parms.key_buf = ctx->enckey;
1305 cipher_parms.key_len = ctx->enckeylen;
1306 cipher_parms.iv_buf = rctx->msg_buf.iv_ctr;
1307 cipher_parms.iv_len = rctx->iv_ctr_len;
1309 hash_parms.alg = ctx->auth.alg;
1310 hash_parms.mode = ctx->auth.mode;
1311 hash_parms.type = HASH_TYPE_NONE;
1312 hash_parms.key_buf = (u8 *)ctx->authkey;
1313 hash_parms.key_len = ctx->authkeylen;
1314 hash_parms.digestsize = digestsize;
1316 if ((ctx->auth.alg == HASH_ALG_SHA224) &&
1317 (ctx->authkeylen < SHA224_DIGEST_SIZE))
1318 hash_parms.key_len = SHA224_DIGEST_SIZE;
1320 aead_parms.assoc_size = req->assoclen;
1321 if (ctx->is_esp && !ctx->is_rfc4543) {
1323 * 8-byte IV is included assoc data in request. SPU2
1324 * expects AAD to include just SPI and seqno. So
1325 * subtract off the IV len.
1327 aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE;
1329 if (rctx->is_encrypt) {
1330 aead_parms.return_iv = true;
1331 aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE;
1332 aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE;
1335 aead_parms.ret_iv_len = 0;
1339 * Count number of sg entries from the crypto API request that are to
1340 * be included in this mailbox message. For dst sg, don't count space
1341 * for digest. Digest gets caught in a separate buffer and copied back
1342 * to dst sg when processing response.
1344 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize);
1345 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize);
1346 if (aead_parms.assoc_size)
1347 assoc_nents = spu_sg_count(rctx->assoc, 0,
1348 aead_parms.assoc_size);
1350 mssg = &rctx->mb_mssg;
1352 rctx->total_sent = chunksize;
1353 rctx->src_sent = chunksize;
1354 if (spu->spu_assoc_resp_len(ctx->cipher.mode,
1355 aead_parms.assoc_size,
1356 aead_parms.ret_iv_len,
1360 aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode,
1363 if (ctx->auth.alg == HASH_ALG_AES)
1364 hash_parms.type = (enum hash_type)ctx->cipher_type;
1366 /* General case AAD padding (CCM and RFC4543 special cases below) */
1367 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1368 aead_parms.assoc_size);
1370 /* General case data padding (CCM decrypt special case below) */
1371 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1374 if (ctx->cipher.mode == CIPHER_MODE_CCM) {
1376 * for CCM, AAD len + 2 (rather than AAD len) needs to be
1379 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(
1381 aead_parms.assoc_size + 2);
1384 * And when decrypting CCM, need to pad without including
1385 * size of ICV which is tacked on to end of chunk
1387 if (!rctx->is_encrypt)
1388 aead_parms.data_pad_len =
1389 spu->spu_gcm_ccm_pad_len(ctx->cipher.mode,
1390 chunksize - digestsize);
1392 /* CCM also requires software to rewrite portions of IV: */
1393 spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen,
1394 chunksize, rctx->is_encrypt,
1398 if (ctx->is_rfc4543) {
1400 * RFC4543: data is included in AAD, so don't pad after AAD
1401 * and pad data based on both AAD + data size
1403 aead_parms.aad_pad_len = 0;
1404 if (!rctx->is_encrypt)
1405 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1407 aead_parms.assoc_size + chunksize -
1410 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(
1412 aead_parms.assoc_size + chunksize);
1414 req_opts.is_rfc4543 = true;
1417 if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) {
1420 /* Copy ICV from end of src scatterlist to digest buf */
1421 sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize,
1422 req->assoclen + rctx->total_sent -
1426 atomic64_add(chunksize, &iproc_priv.bytes_out);
1428 flow_log("%s()-sent chunksize:%u\n", __func__, chunksize);
1430 /* Prepend SPU header with type 3 BCM header */
1431 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1433 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr +
1434 BCM_HDR_LEN, &req_opts,
1435 &cipher_parms, &hash_parms,
1436 &aead_parms, chunksize);
1438 /* Determine total length of padding. Put all padding in one buffer. */
1439 db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0,
1440 chunksize, aead_parms.aad_pad_len,
1441 aead_parms.data_pad_len, 0);
1443 stat_pad_len = spu->spu_wordalign_padlen(db_size);
1447 pad_len = aead_parms.data_pad_len + stat_pad_len;
1450 spu->spu_request_pad(rctx->msg_buf.spu_req_pad,
1451 aead_parms.data_pad_len, 0,
1452 ctx->auth.alg, ctx->auth.mode,
1453 rctx->total_sent, stat_pad_len);
1456 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN,
1458 dump_sg(rctx->assoc, 0, aead_parms.assoc_size);
1459 packet_dump(" aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len);
1460 packet_log("BD:\n");
1461 dump_sg(rctx->src_sg, rctx->src_skip, chunksize);
1462 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len);
1465 * Build mailbox message containing SPU request msg and rx buffers
1466 * to catch response message
1468 memset(mssg, 0, sizeof(*mssg));
1469 mssg->type = BRCM_MESSAGE_SPU;
1470 mssg->ctx = rctx; /* Will be returned in response */
1472 /* Create rx scatterlist to catch result */
1473 rx_frag_num += rctx->dst_nents;
1474 resp_len = chunksize;
1477 * Always catch ICV in separate buffer. Have to for GCM/CCM because of
1478 * padding. Have to for SHA-224 and other truncated SHAs because SPU
1479 * sends entire digest back.
1483 if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
1484 (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) {
1486 * Input is ciphertxt plus ICV, but ICV not incl
1489 resp_len -= ctx->digestsize;
1491 /* no rx frags to catch output data */
1492 rx_frag_num -= rctx->dst_nents;
1495 err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num,
1496 aead_parms.assoc_size,
1497 aead_parms.ret_iv_len, resp_len, digestsize,
1502 /* Create tx scatterlist containing SPU request message */
1503 tx_frag_num += rctx->src_nents;
1504 tx_frag_num += assoc_nents;
1505 if (aead_parms.aad_pad_len)
1507 if (aead_parms.iv_len)
1509 if (spu->spu_tx_status_len())
1511 err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len,
1512 rctx->assoc, aead_parms.assoc_size,
1513 assoc_nents, aead_parms.iv_len, chunksize,
1514 aead_parms.aad_pad_len, pad_len, incl_icv);
1518 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
1519 if (unlikely(err < 0))
1522 return -EINPROGRESS;
1526 * handle_aead_resp() - Process a SPU response message for an AEAD request.
1527 * @rctx: Crypto request context
1529 static void handle_aead_resp(struct iproc_reqctx_s *rctx)
1531 struct spu_hw *spu = &iproc_priv.spu;
1532 struct crypto_async_request *areq = rctx->parent;
1533 struct aead_request *req = container_of(areq,
1534 struct aead_request, base);
1535 struct iproc_ctx_s *ctx = rctx->ctx;
1537 unsigned int icv_offset;
1540 /* See how much data was returned */
1541 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr);
1542 flow_log("payload_len %u\n", payload_len);
1544 /* only count payload */
1545 atomic64_add(payload_len, &iproc_priv.bytes_in);
1548 packet_dump(" assoc_data ", rctx->msg_buf.a.resp_aad,
1552 * Copy the ICV back to the destination
1553 * buffer. In decrypt case, SPU gives us back the digest, but crypto
1554 * API doesn't expect ICV in dst buffer.
1556 result_len = req->cryptlen;
1557 if (rctx->is_encrypt) {
1558 icv_offset = req->assoclen + rctx->total_sent;
1559 packet_dump(" ICV: ", rctx->msg_buf.digest, ctx->digestsize);
1560 flow_log("copying ICV to dst sg at offset %u\n", icv_offset);
1561 sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest,
1562 ctx->digestsize, icv_offset);
1563 result_len += ctx->digestsize;
1566 packet_log("response data: ");
1567 dump_sg(req->dst, req->assoclen, result_len);
1569 atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]);
1570 if (ctx->cipher.alg == CIPHER_ALG_AES) {
1571 if (ctx->cipher.mode == CIPHER_MODE_CCM)
1572 atomic_inc(&iproc_priv.aead_cnt[AES_CCM]);
1573 else if (ctx->cipher.mode == CIPHER_MODE_GCM)
1574 atomic_inc(&iproc_priv.aead_cnt[AES_GCM]);
1576 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1578 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]);
1583 * spu_chunk_cleanup() - Do cleanup after processing one chunk of a request
1584 * @rctx: request context
1586 * Mailbox scatterlists are allocated for each chunk. So free them after
1587 * processing each chunk.
1589 static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx)
1591 /* mailbox message used to tx request */
1592 struct brcm_message *mssg = &rctx->mb_mssg;
1594 kfree(mssg->spu.src);
1595 kfree(mssg->spu.dst);
1596 memset(mssg, 0, sizeof(struct brcm_message));
1600 * finish_req() - Used to invoke the complete callback from the requester when
1601 * a request has been handled asynchronously.
1602 * @rctx: Request context
1603 * @err: Indicates whether the request was successful or not
1605 * Ensures that cleanup has been done for request
1607 static void finish_req(struct iproc_reqctx_s *rctx, int err)
1609 struct crypto_async_request *areq = rctx->parent;
1611 flow_log("%s() err:%d\n\n", __func__, err);
1613 /* No harm done if already called */
1614 spu_chunk_cleanup(rctx);
1617 crypto_request_complete(areq, err);
1621 * spu_rx_callback() - Callback from mailbox framework with a SPU response.
1622 * @cl: mailbox client structure for SPU driver
1623 * @msg: mailbox message containing SPU response
1625 static void spu_rx_callback(struct mbox_client *cl, void *msg)
1627 struct spu_hw *spu = &iproc_priv.spu;
1628 struct brcm_message *mssg = msg;
1629 struct iproc_reqctx_s *rctx;
1633 if (unlikely(!rctx)) {
1635 pr_err("%s(): no request context", __func__);
1640 /* process the SPU status */
1641 err = spu->spu_status_process(rctx->msg_buf.rx_stat);
1643 if (err == SPU_INVALID_ICV)
1644 atomic_inc(&iproc_priv.bad_icv);
1649 /* Process the SPU response message */
1650 switch (rctx->ctx->alg->type) {
1651 case CRYPTO_ALG_TYPE_SKCIPHER:
1652 handle_skcipher_resp(rctx);
1654 case CRYPTO_ALG_TYPE_AHASH:
1655 handle_ahash_resp(rctx);
1657 case CRYPTO_ALG_TYPE_AEAD:
1658 handle_aead_resp(rctx);
1666 * If this response does not complete the request, then send the next
1669 if (rctx->total_sent < rctx->total_todo) {
1670 /* Deallocate anything specific to previous chunk */
1671 spu_chunk_cleanup(rctx);
1673 switch (rctx->ctx->alg->type) {
1674 case CRYPTO_ALG_TYPE_SKCIPHER:
1675 err = handle_skcipher_req(rctx);
1677 case CRYPTO_ALG_TYPE_AHASH:
1678 err = handle_ahash_req(rctx);
1681 * we saved data in hash carry, but tell crypto
1682 * API we successfully completed request.
1686 case CRYPTO_ALG_TYPE_AEAD:
1687 err = handle_aead_req(rctx);
1693 if (err == -EINPROGRESS)
1694 /* Successfully submitted request for next chunk */
1699 finish_req(rctx, err);
1702 /* ==================== Kernel Cryptographic API ==================== */
1705 * skcipher_enqueue() - Handle skcipher encrypt or decrypt request.
1706 * @req: Crypto API request
1707 * @encrypt: true if encrypting; false if decrypting
1709 * Return: -EINPROGRESS if request accepted and result will be returned
1713 static int skcipher_enqueue(struct skcipher_request *req, bool encrypt)
1715 struct iproc_reqctx_s *rctx = skcipher_request_ctx(req);
1716 struct iproc_ctx_s *ctx =
1717 crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1720 flow_log("%s() enc:%u\n", __func__, encrypt);
1722 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1723 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1724 rctx->parent = &req->base;
1725 rctx->is_encrypt = encrypt;
1726 rctx->bd_suppress = false;
1727 rctx->total_todo = req->cryptlen;
1729 rctx->total_sent = 0;
1730 rctx->total_received = 0;
1733 /* Initialize current position in src and dst scatterlists */
1734 rctx->src_sg = req->src;
1735 rctx->src_nents = 0;
1737 rctx->dst_sg = req->dst;
1738 rctx->dst_nents = 0;
1741 if (ctx->cipher.mode == CIPHER_MODE_CBC ||
1742 ctx->cipher.mode == CIPHER_MODE_CTR ||
1743 ctx->cipher.mode == CIPHER_MODE_OFB ||
1744 ctx->cipher.mode == CIPHER_MODE_XTS ||
1745 ctx->cipher.mode == CIPHER_MODE_GCM ||
1746 ctx->cipher.mode == CIPHER_MODE_CCM) {
1748 crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req));
1749 memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len);
1751 rctx->iv_ctr_len = 0;
1754 /* Choose a SPU to process this request */
1755 rctx->chan_idx = select_channel();
1756 err = handle_skcipher_req(rctx);
1757 if (err != -EINPROGRESS)
1758 /* synchronous result */
1759 spu_chunk_cleanup(rctx);
1764 static int des_setkey(struct crypto_skcipher *cipher, const u8 *key,
1765 unsigned int keylen)
1767 struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1770 err = verify_skcipher_des_key(cipher, key);
1774 ctx->cipher_type = CIPHER_TYPE_DES;
1778 static int threedes_setkey(struct crypto_skcipher *cipher, const u8 *key,
1779 unsigned int keylen)
1781 struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1784 err = verify_skcipher_des3_key(cipher, key);
1788 ctx->cipher_type = CIPHER_TYPE_3DES;
1792 static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
1793 unsigned int keylen)
1795 struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1797 if (ctx->cipher.mode == CIPHER_MODE_XTS)
1798 /* XTS includes two keys of equal length */
1799 keylen = keylen / 2;
1802 case AES_KEYSIZE_128:
1803 ctx->cipher_type = CIPHER_TYPE_AES128;
1805 case AES_KEYSIZE_192:
1806 ctx->cipher_type = CIPHER_TYPE_AES192;
1808 case AES_KEYSIZE_256:
1809 ctx->cipher_type = CIPHER_TYPE_AES256;
1814 WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) &&
1815 ((ctx->max_payload % AES_BLOCK_SIZE) != 0));
1819 static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key,
1820 unsigned int keylen)
1822 struct spu_hw *spu = &iproc_priv.spu;
1823 struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher);
1824 struct spu_cipher_parms cipher_parms;
1828 flow_log("skcipher_setkey() keylen: %d\n", keylen);
1829 flow_dump(" key: ", key, keylen);
1831 switch (ctx->cipher.alg) {
1832 case CIPHER_ALG_DES:
1833 err = des_setkey(cipher, key, keylen);
1835 case CIPHER_ALG_3DES:
1836 err = threedes_setkey(cipher, key, keylen);
1838 case CIPHER_ALG_AES:
1839 err = aes_setkey(cipher, key, keylen);
1842 pr_err("%s() Error: unknown cipher alg\n", __func__);
1848 memcpy(ctx->enckey, key, keylen);
1849 ctx->enckeylen = keylen;
1851 /* SPU needs XTS keys in the reverse order the crypto API presents */
1852 if ((ctx->cipher.alg == CIPHER_ALG_AES) &&
1853 (ctx->cipher.mode == CIPHER_MODE_XTS)) {
1854 unsigned int xts_keylen = keylen / 2;
1856 memcpy(ctx->enckey, key + xts_keylen, xts_keylen);
1857 memcpy(ctx->enckey + xts_keylen, key, xts_keylen);
1860 if (spu->spu_type == SPU_TYPE_SPUM)
1861 alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN;
1862 else if (spu->spu_type == SPU_TYPE_SPU2)
1863 alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN;
1864 memset(ctx->bcm_spu_req_hdr, 0, alloc_len);
1865 cipher_parms.iv_buf = NULL;
1866 cipher_parms.iv_len = crypto_skcipher_ivsize(cipher);
1867 flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len);
1869 cipher_parms.alg = ctx->cipher.alg;
1870 cipher_parms.mode = ctx->cipher.mode;
1871 cipher_parms.type = ctx->cipher_type;
1872 cipher_parms.key_buf = ctx->enckey;
1873 cipher_parms.key_len = ctx->enckeylen;
1875 /* Prepend SPU request message with BCM header */
1876 memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN);
1877 ctx->spu_req_hdr_len =
1878 spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN,
1881 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
1885 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]);
1890 static int skcipher_encrypt(struct skcipher_request *req)
1892 flow_log("skcipher_encrypt() nbytes:%u\n", req->cryptlen);
1894 return skcipher_enqueue(req, true);
1897 static int skcipher_decrypt(struct skcipher_request *req)
1899 flow_log("skcipher_decrypt() nbytes:%u\n", req->cryptlen);
1900 return skcipher_enqueue(req, false);
1903 static int ahash_enqueue(struct ahash_request *req)
1905 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1906 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1907 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
1909 const char *alg_name;
1911 flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes);
1913 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1914 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1915 rctx->parent = &req->base;
1917 rctx->bd_suppress = true;
1918 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
1920 /* Initialize position in src scatterlist */
1921 rctx->src_sg = req->src;
1923 rctx->src_nents = 0;
1924 rctx->dst_sg = NULL;
1926 rctx->dst_nents = 0;
1928 /* SPU2 hardware does not compute hash of zero length data */
1929 if ((rctx->is_final == 1) && (rctx->total_todo == 0) &&
1930 (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) {
1931 alg_name = crypto_ahash_alg_name(tfm);
1932 flow_log("Doing %sfinal %s zero-len hash request in software\n",
1933 rctx->is_final ? "" : "non-", alg_name);
1934 err = do_shash((unsigned char *)alg_name, req->result,
1935 NULL, 0, NULL, 0, ctx->authkey,
1938 flow_log("Hash request failed with error %d\n", err);
1941 /* Choose a SPU to process this request */
1942 rctx->chan_idx = select_channel();
1944 err = handle_ahash_req(rctx);
1945 if (err != -EINPROGRESS)
1946 /* synchronous result */
1947 spu_chunk_cleanup(rctx);
1951 * we saved data in hash carry, but tell crypto API
1952 * we successfully completed request.
1959 static int __ahash_init(struct ahash_request *req)
1961 struct spu_hw *spu = &iproc_priv.spu;
1962 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
1963 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1964 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
1966 flow_log("%s()\n", __func__);
1968 /* Initialize the context */
1969 rctx->hash_carry_len = 0;
1972 rctx->total_todo = 0;
1974 rctx->total_sent = 0;
1975 rctx->total_received = 0;
1977 ctx->digestsize = crypto_ahash_digestsize(tfm);
1978 /* If we add a hash whose digest is larger, catch it here. */
1979 WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE);
1981 rctx->is_sw_hmac = false;
1983 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0,
1990 * spu_no_incr_hash() - Determine whether incremental hashing is supported.
1991 * @ctx: Crypto session context
1993 * SPU-2 does not support incremental hashing (we'll have to revisit and
1994 * condition based on chip revision or device tree entry if future versions do
1995 * support incremental hash)
1997 * SPU-M also doesn't support incremental hashing of AES-XCBC
1999 * Return: true if incremental hashing is not supported
2002 static bool spu_no_incr_hash(struct iproc_ctx_s *ctx)
2004 struct spu_hw *spu = &iproc_priv.spu;
2006 if (spu->spu_type == SPU_TYPE_SPU2)
2009 if ((ctx->auth.alg == HASH_ALG_AES) &&
2010 (ctx->auth.mode == HASH_MODE_XCBC))
2013 /* Otherwise, incremental hashing is supported */
2017 static int ahash_init(struct ahash_request *req)
2019 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2020 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2021 const char *alg_name;
2022 struct crypto_shash *hash;
2026 if (spu_no_incr_hash(ctx)) {
2028 * If we get an incremental hashing request and it's not
2029 * supported by the hardware, we need to handle it in software
2030 * by calling synchronous hash functions.
2032 alg_name = crypto_ahash_alg_name(tfm);
2033 hash = crypto_alloc_shash(alg_name, 0, 0);
2035 ret = PTR_ERR(hash);
2039 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2040 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2041 ctx->shash = kmalloc(sizeof(*ctx->shash) +
2042 crypto_shash_descsize(hash), gfp);
2047 ctx->shash->tfm = hash;
2049 /* Set the key using data we already have from setkey */
2050 if (ctx->authkeylen > 0) {
2051 ret = crypto_shash_setkey(hash, ctx->authkey,
2057 /* Initialize hash w/ this key and other params */
2058 ret = crypto_shash_init(ctx->shash);
2062 /* Otherwise call the internal function which uses SPU hw */
2063 ret = __ahash_init(req);
2071 crypto_free_shash(hash);
2076 static int __ahash_update(struct ahash_request *req)
2078 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2080 flow_log("ahash_update() nbytes:%u\n", req->nbytes);
2084 rctx->total_todo += req->nbytes;
2087 return ahash_enqueue(req);
2090 static int ahash_update(struct ahash_request *req)
2092 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2093 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2099 if (spu_no_incr_hash(ctx)) {
2101 * If we get an incremental hashing request and it's not
2102 * supported by the hardware, we need to handle it in software
2103 * by calling synchronous hash functions.
2106 nents = sg_nents(req->src);
2110 /* Copy data from req scatterlist to tmp buffer */
2111 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2112 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2113 tmpbuf = kmalloc(req->nbytes, gfp);
2117 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2123 /* Call synchronous update */
2124 ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes);
2127 /* Otherwise call the internal function which uses SPU hw */
2128 ret = __ahash_update(req);
2134 static int __ahash_final(struct ahash_request *req)
2136 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2138 flow_log("ahash_final() nbytes:%u\n", req->nbytes);
2142 return ahash_enqueue(req);
2145 static int ahash_final(struct ahash_request *req)
2147 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2148 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2151 if (spu_no_incr_hash(ctx)) {
2153 * If we get an incremental hashing request and it's not
2154 * supported by the hardware, we need to handle it in software
2155 * by calling synchronous hash functions.
2157 ret = crypto_shash_final(ctx->shash, req->result);
2159 /* Done with hash, can deallocate it now */
2160 crypto_free_shash(ctx->shash->tfm);
2164 /* Otherwise call the internal function which uses SPU hw */
2165 ret = __ahash_final(req);
2171 static int __ahash_finup(struct ahash_request *req)
2173 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2175 flow_log("ahash_finup() nbytes:%u\n", req->nbytes);
2177 rctx->total_todo += req->nbytes;
2181 return ahash_enqueue(req);
2184 static int ahash_finup(struct ahash_request *req)
2186 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2187 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2193 if (spu_no_incr_hash(ctx)) {
2195 * If we get an incremental hashing request and it's not
2196 * supported by the hardware, we need to handle it in software
2197 * by calling synchronous hash functions.
2200 nents = sg_nents(req->src);
2203 goto ahash_finup_exit;
2206 /* Copy data from req scatterlist to tmp buffer */
2207 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2208 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2209 tmpbuf = kmalloc(req->nbytes, gfp);
2212 goto ahash_finup_exit;
2215 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) !=
2218 goto ahash_finup_free;
2221 /* Call synchronous update */
2222 ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes,
2225 /* Otherwise call the internal function which uses SPU hw */
2226 return __ahash_finup(req);
2232 /* Done with hash, can deallocate it now */
2233 crypto_free_shash(ctx->shash->tfm);
2238 static int ahash_digest(struct ahash_request *req)
2242 flow_log("ahash_digest() nbytes:%u\n", req->nbytes);
2244 /* whole thing at once */
2245 err = __ahash_init(req);
2247 err = __ahash_finup(req);
2252 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
2253 unsigned int keylen)
2255 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2257 flow_log("%s() ahash:%p key:%p keylen:%u\n",
2258 __func__, ahash, key, keylen);
2259 flow_dump(" key: ", key, keylen);
2261 if (ctx->auth.alg == HASH_ALG_AES) {
2263 case AES_KEYSIZE_128:
2264 ctx->cipher_type = CIPHER_TYPE_AES128;
2266 case AES_KEYSIZE_192:
2267 ctx->cipher_type = CIPHER_TYPE_AES192;
2269 case AES_KEYSIZE_256:
2270 ctx->cipher_type = CIPHER_TYPE_AES256;
2273 pr_err("%s() Error: Invalid key length\n", __func__);
2277 pr_err("%s() Error: unknown hash alg\n", __func__);
2280 memcpy(ctx->authkey, key, keylen);
2281 ctx->authkeylen = keylen;
2286 static int ahash_export(struct ahash_request *req, void *out)
2288 const struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2289 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out;
2291 spu_exp->total_todo = rctx->total_todo;
2292 spu_exp->total_sent = rctx->total_sent;
2293 spu_exp->is_sw_hmac = rctx->is_sw_hmac;
2294 memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry));
2295 spu_exp->hash_carry_len = rctx->hash_carry_len;
2296 memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash));
2301 static int ahash_import(struct ahash_request *req, const void *in)
2303 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2304 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in;
2306 rctx->total_todo = spu_exp->total_todo;
2307 rctx->total_sent = spu_exp->total_sent;
2308 rctx->is_sw_hmac = spu_exp->is_sw_hmac;
2309 memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry));
2310 rctx->hash_carry_len = spu_exp->hash_carry_len;
2311 memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash));
2316 static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
2317 unsigned int keylen)
2319 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash);
2320 unsigned int blocksize =
2321 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
2322 unsigned int digestsize = crypto_ahash_digestsize(ahash);
2326 flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n",
2327 __func__, ahash, key, keylen, blocksize, digestsize);
2328 flow_dump(" key: ", key, keylen);
2330 if (keylen > blocksize) {
2331 switch (ctx->auth.alg) {
2333 rc = do_shash("md5", ctx->authkey, key, keylen, NULL,
2337 rc = do_shash("sha1", ctx->authkey, key, keylen, NULL,
2340 case HASH_ALG_SHA224:
2341 rc = do_shash("sha224", ctx->authkey, key, keylen, NULL,
2344 case HASH_ALG_SHA256:
2345 rc = do_shash("sha256", ctx->authkey, key, keylen, NULL,
2348 case HASH_ALG_SHA384:
2349 rc = do_shash("sha384", ctx->authkey, key, keylen, NULL,
2352 case HASH_ALG_SHA512:
2353 rc = do_shash("sha512", ctx->authkey, key, keylen, NULL,
2356 case HASH_ALG_SHA3_224:
2357 rc = do_shash("sha3-224", ctx->authkey, key, keylen,
2360 case HASH_ALG_SHA3_256:
2361 rc = do_shash("sha3-256", ctx->authkey, key, keylen,
2364 case HASH_ALG_SHA3_384:
2365 rc = do_shash("sha3-384", ctx->authkey, key, keylen,
2368 case HASH_ALG_SHA3_512:
2369 rc = do_shash("sha3-512", ctx->authkey, key, keylen,
2373 pr_err("%s() Error: unknown hash alg\n", __func__);
2377 pr_err("%s() Error %d computing shash for %s\n",
2378 __func__, rc, hash_alg_name[ctx->auth.alg]);
2381 ctx->authkeylen = digestsize;
2383 flow_log(" keylen > digestsize... hashed\n");
2384 flow_dump(" newkey: ", ctx->authkey, ctx->authkeylen);
2386 memcpy(ctx->authkey, key, keylen);
2387 ctx->authkeylen = keylen;
2391 * Full HMAC operation in SPUM is not verified,
2392 * So keeping the generation of IPAD, OPAD and
2393 * outer hashing in software.
2395 if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) {
2396 memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen);
2397 memset(ctx->ipad + ctx->authkeylen, 0,
2398 blocksize - ctx->authkeylen);
2399 ctx->authkeylen = 0;
2400 unsafe_memcpy(ctx->opad, ctx->ipad, blocksize,
2401 "fortified memcpy causes -Wrestrict warning");
2403 for (index = 0; index < blocksize; index++) {
2404 ctx->ipad[index] ^= HMAC_IPAD_VALUE;
2405 ctx->opad[index] ^= HMAC_OPAD_VALUE;
2408 flow_dump(" ipad: ", ctx->ipad, blocksize);
2409 flow_dump(" opad: ", ctx->opad, blocksize);
2411 ctx->digestsize = digestsize;
2412 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]);
2417 static int ahash_hmac_init(struct ahash_request *req)
2419 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2420 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2421 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2422 unsigned int blocksize =
2423 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2425 flow_log("ahash_hmac_init()\n");
2427 /* init the context as a hash */
2430 if (!spu_no_incr_hash(ctx)) {
2431 /* SPU-M can do incr hashing but needs sw for outer HMAC */
2432 rctx->is_sw_hmac = true;
2433 ctx->auth.mode = HASH_MODE_HASH;
2434 /* start with a prepended ipad */
2435 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2436 rctx->hash_carry_len = blocksize;
2437 rctx->total_todo += blocksize;
2443 static int ahash_hmac_update(struct ahash_request *req)
2445 flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes);
2450 return ahash_update(req);
2453 static int ahash_hmac_final(struct ahash_request *req)
2455 flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes);
2457 return ahash_final(req);
2460 static int ahash_hmac_finup(struct ahash_request *req)
2462 flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes);
2464 return ahash_finup(req);
2467 static int ahash_hmac_digest(struct ahash_request *req)
2469 struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
2470 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2471 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
2472 unsigned int blocksize =
2473 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2475 flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes);
2477 /* Perform initialization and then call finup */
2480 if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) {
2482 * SPU2 supports full HMAC implementation in the
2483 * hardware, need not to generate IPAD, OPAD and
2484 * outer hash in software.
2485 * Only for hash key len > hash block size, SPU2
2486 * expects to perform hashing on the key, shorten
2487 * it to digest size and feed it as hash key.
2489 rctx->is_sw_hmac = false;
2490 ctx->auth.mode = HASH_MODE_HMAC;
2492 rctx->is_sw_hmac = true;
2493 ctx->auth.mode = HASH_MODE_HASH;
2494 /* start with a prepended ipad */
2495 memcpy(rctx->hash_carry, ctx->ipad, blocksize);
2496 rctx->hash_carry_len = blocksize;
2497 rctx->total_todo += blocksize;
2500 return __ahash_finup(req);
2505 static int aead_need_fallback(struct aead_request *req)
2507 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2508 struct spu_hw *spu = &iproc_priv.spu;
2509 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2510 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2514 * SPU hardware cannot handle the AES-GCM/CCM case where plaintext
2515 * and AAD are both 0 bytes long. So use fallback in this case.
2517 if (((ctx->cipher.mode == CIPHER_MODE_GCM) ||
2518 (ctx->cipher.mode == CIPHER_MODE_CCM)) &&
2519 (req->assoclen == 0)) {
2520 if ((rctx->is_encrypt && (req->cryptlen == 0)) ||
2521 (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) {
2522 flow_log("AES GCM/CCM needs fallback for 0 len req\n");
2527 /* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */
2528 if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2529 (spu->spu_type == SPU_TYPE_SPUM) &&
2530 (ctx->digestsize != 8) && (ctx->digestsize != 12) &&
2531 (ctx->digestsize != 16)) {
2532 flow_log("%s() AES CCM needs fallback for digest size %d\n",
2533 __func__, ctx->digestsize);
2538 * SPU-M on NSP has an issue where AES-CCM hash is not correct
2539 * when AAD size is 0
2541 if ((ctx->cipher.mode == CIPHER_MODE_CCM) &&
2542 (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) &&
2543 (req->assoclen == 0)) {
2544 flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n",
2550 * RFC4106 and RFC4543 cannot handle the case where AAD is other than
2551 * 16 or 20 bytes long. So use fallback in this case.
2553 if (ctx->cipher.mode == CIPHER_MODE_GCM &&
2554 ctx->cipher.alg == CIPHER_ALG_AES &&
2555 rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE &&
2556 req->assoclen != 16 && req->assoclen != 20) {
2557 flow_log("RFC4106/RFC4543 needs fallback for assoclen"
2558 " other than 16 or 20 bytes\n");
2562 payload_len = req->cryptlen;
2563 if (spu->spu_type == SPU_TYPE_SPUM)
2564 payload_len += req->assoclen;
2566 flow_log("%s() payload len: %u\n", __func__, payload_len);
2568 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2571 return payload_len > ctx->max_payload;
2574 static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
2576 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2577 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
2578 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2579 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
2580 struct aead_request *subreq;
2582 flow_log("%s() enc:%u\n", __func__, is_encrypt);
2584 if (!ctx->fallback_cipher)
2587 subreq = &rctx->req;
2588 aead_request_set_tfm(subreq, ctx->fallback_cipher);
2589 aead_request_set_callback(subreq, aead_request_flags(req),
2590 req->base.complete, req->base.data);
2591 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2593 aead_request_set_ad(subreq, req->assoclen);
2595 return is_encrypt ? crypto_aead_encrypt(req) :
2596 crypto_aead_decrypt(req);
2599 static int aead_enqueue(struct aead_request *req, bool is_encrypt)
2601 struct iproc_reqctx_s *rctx = aead_request_ctx(req);
2602 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2603 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead);
2606 flow_log("%s() enc:%u\n", __func__, is_encrypt);
2608 if (req->assoclen > MAX_ASSOC_SIZE) {
2610 ("%s() Error: associated data too long. (%u > %u bytes)\n",
2611 __func__, req->assoclen, MAX_ASSOC_SIZE);
2615 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
2616 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
2617 rctx->parent = &req->base;
2618 rctx->is_encrypt = is_encrypt;
2619 rctx->bd_suppress = false;
2620 rctx->total_todo = req->cryptlen;
2622 rctx->total_sent = 0;
2623 rctx->total_received = 0;
2624 rctx->is_sw_hmac = false;
2626 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message));
2628 /* assoc data is at start of src sg */
2629 rctx->assoc = req->src;
2632 * Init current position in src scatterlist to be after assoc data.
2633 * src_skip set to buffer offset where data begins. (Assoc data could
2634 * end in the middle of a buffer.)
2636 if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg,
2637 &rctx->src_skip) < 0) {
2638 pr_err("%s() Error: Unable to find start of src data\n",
2643 rctx->src_nents = 0;
2644 rctx->dst_nents = 0;
2645 if (req->dst == req->src) {
2646 rctx->dst_sg = rctx->src_sg;
2647 rctx->dst_skip = rctx->src_skip;
2650 * Expect req->dst to have room for assoc data followed by
2651 * output data and ICV, if encrypt. So initialize dst_sg
2652 * to point beyond assoc len offset.
2654 if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg,
2655 &rctx->dst_skip) < 0) {
2656 pr_err("%s() Error: Unable to find start of dst data\n",
2662 if (ctx->cipher.mode == CIPHER_MODE_CBC ||
2663 ctx->cipher.mode == CIPHER_MODE_CTR ||
2664 ctx->cipher.mode == CIPHER_MODE_OFB ||
2665 ctx->cipher.mode == CIPHER_MODE_XTS ||
2666 ctx->cipher.mode == CIPHER_MODE_GCM) {
2669 crypto_aead_ivsize(crypto_aead_reqtfm(req));
2670 } else if (ctx->cipher.mode == CIPHER_MODE_CCM) {
2671 rctx->iv_ctr_len = CCM_AES_IV_SIZE;
2673 rctx->iv_ctr_len = 0;
2676 rctx->hash_carry_len = 0;
2678 flow_log(" src sg: %p\n", req->src);
2679 flow_log(" rctx->src_sg: %p, src_skip %u\n",
2680 rctx->src_sg, rctx->src_skip);
2681 flow_log(" assoc: %p, assoclen %u\n", rctx->assoc, req->assoclen);
2682 flow_log(" dst sg: %p\n", req->dst);
2683 flow_log(" rctx->dst_sg: %p, dst_skip %u\n",
2684 rctx->dst_sg, rctx->dst_skip);
2685 flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len);
2686 flow_dump(" iv: ", req->iv, rctx->iv_ctr_len);
2687 flow_log(" authkeylen:%u\n", ctx->authkeylen);
2688 flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no");
2690 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF)
2691 flow_log(" max_payload infinite");
2693 flow_log(" max_payload: %u\n", ctx->max_payload);
2695 if (unlikely(aead_need_fallback(req)))
2696 return aead_do_fallback(req, is_encrypt);
2699 * Do memory allocations for request after fallback check, because if we
2700 * do fallback, we won't call finish_req() to dealloc.
2702 if (rctx->iv_ctr_len) {
2704 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset,
2705 ctx->salt, ctx->salt_len);
2706 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len,
2708 rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset);
2711 rctx->chan_idx = select_channel();
2712 err = handle_aead_req(rctx);
2713 if (err != -EINPROGRESS)
2714 /* synchronous result */
2715 spu_chunk_cleanup(rctx);
2720 static int aead_authenc_setkey(struct crypto_aead *cipher,
2721 const u8 *key, unsigned int keylen)
2723 struct spu_hw *spu = &iproc_priv.spu;
2724 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2725 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2726 struct crypto_authenc_keys keys;
2729 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
2731 flow_dump(" key: ", key, keylen);
2733 ret = crypto_authenc_extractkeys(&keys, key, keylen);
2737 if (keys.enckeylen > MAX_KEY_SIZE ||
2738 keys.authkeylen > MAX_KEY_SIZE)
2741 ctx->enckeylen = keys.enckeylen;
2742 ctx->authkeylen = keys.authkeylen;
2744 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
2745 /* May end up padding auth key. So make sure it's zeroed. */
2746 memset(ctx->authkey, 0, sizeof(ctx->authkey));
2747 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
2749 switch (ctx->alg->cipher_info.alg) {
2750 case CIPHER_ALG_DES:
2751 if (verify_aead_des_key(cipher, keys.enckey, keys.enckeylen))
2754 ctx->cipher_type = CIPHER_TYPE_DES;
2756 case CIPHER_ALG_3DES:
2757 if (verify_aead_des3_key(cipher, keys.enckey, keys.enckeylen))
2760 ctx->cipher_type = CIPHER_TYPE_3DES;
2762 case CIPHER_ALG_AES:
2763 switch (ctx->enckeylen) {
2764 case AES_KEYSIZE_128:
2765 ctx->cipher_type = CIPHER_TYPE_AES128;
2767 case AES_KEYSIZE_192:
2768 ctx->cipher_type = CIPHER_TYPE_AES192;
2770 case AES_KEYSIZE_256:
2771 ctx->cipher_type = CIPHER_TYPE_AES256;
2778 pr_err("%s() Error: Unknown cipher alg\n", __func__);
2782 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2784 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
2785 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
2787 /* setkey the fallback just in case we needto use it */
2788 if (ctx->fallback_cipher) {
2789 flow_log(" running fallback setkey()\n");
2791 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2792 ctx->fallback_cipher->base.crt_flags |=
2793 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2794 ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
2796 flow_log(" fallback setkey() returned:%d\n", ret);
2799 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2803 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2809 ctx->authkeylen = 0;
2810 ctx->digestsize = 0;
2815 static int aead_gcm_ccm_setkey(struct crypto_aead *cipher,
2816 const u8 *key, unsigned int keylen)
2818 struct spu_hw *spu = &iproc_priv.spu;
2819 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2820 struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
2824 flow_log("%s() keylen:%u\n", __func__, keylen);
2825 flow_dump(" key: ", key, keylen);
2828 ctx->digestsize = keylen;
2830 ctx->enckeylen = keylen;
2831 ctx->authkeylen = 0;
2833 switch (ctx->enckeylen) {
2834 case AES_KEYSIZE_128:
2835 ctx->cipher_type = CIPHER_TYPE_AES128;
2837 case AES_KEYSIZE_192:
2838 ctx->cipher_type = CIPHER_TYPE_AES192;
2840 case AES_KEYSIZE_256:
2841 ctx->cipher_type = CIPHER_TYPE_AES256;
2847 memcpy(ctx->enckey, key, ctx->enckeylen);
2849 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2851 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen);
2852 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen);
2854 /* setkey the fallback just in case we need to use it */
2855 if (ctx->fallback_cipher) {
2856 flow_log(" running fallback setkey()\n");
2858 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
2859 ctx->fallback_cipher->base.crt_flags |=
2860 tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
2861 ret = crypto_aead_setkey(ctx->fallback_cipher, key,
2862 keylen + ctx->salt_len);
2864 flow_log(" fallback setkey() returned:%d\n", ret);
2867 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen,
2871 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]);
2873 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen,
2880 ctx->authkeylen = 0;
2881 ctx->digestsize = 0;
2887 * aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES.
2888 * @cipher: AEAD structure
2889 * @key: Key followed by 4 bytes of salt
2890 * @keylen: Length of key plus salt, in bytes
2892 * Extracts salt from key and stores it to be prepended to IV on each request.
2893 * Digest is always 16 bytes
2895 * Return: Value from generic gcm setkey.
2897 static int aead_gcm_esp_setkey(struct crypto_aead *cipher,
2898 const u8 *key, unsigned int keylen)
2900 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2902 flow_log("%s\n", __func__);
2904 if (keylen < GCM_ESP_SALT_SIZE)
2907 ctx->salt_len = GCM_ESP_SALT_SIZE;
2908 ctx->salt_offset = GCM_ESP_SALT_OFFSET;
2909 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
2910 keylen -= GCM_ESP_SALT_SIZE;
2911 ctx->digestsize = GCM_ESP_DIGESTSIZE;
2913 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
2915 return aead_gcm_ccm_setkey(cipher, key, keylen);
2919 * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC.
2920 * @cipher: AEAD structure
2921 * @key: Key followed by 4 bytes of salt
2922 * @keylen: Length of key plus salt, in bytes
2924 * Extracts salt from key and stores it to be prepended to IV on each request.
2925 * Digest is always 16 bytes
2927 * Return: Value from generic gcm setkey.
2929 static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher,
2930 const u8 *key, unsigned int keylen)
2932 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2934 flow_log("%s\n", __func__);
2936 if (keylen < GCM_ESP_SALT_SIZE)
2939 ctx->salt_len = GCM_ESP_SALT_SIZE;
2940 ctx->salt_offset = GCM_ESP_SALT_OFFSET;
2941 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE);
2942 keylen -= GCM_ESP_SALT_SIZE;
2943 ctx->digestsize = GCM_ESP_DIGESTSIZE;
2945 ctx->is_rfc4543 = true;
2946 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE);
2948 return aead_gcm_ccm_setkey(cipher, key, keylen);
2952 * aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES.
2953 * @cipher: AEAD structure
2954 * @key: Key followed by 4 bytes of salt
2955 * @keylen: Length of key plus salt, in bytes
2957 * Extracts salt from key and stores it to be prepended to IV on each request.
2958 * Digest is always 16 bytes
2960 * Return: Value from generic ccm setkey.
2962 static int aead_ccm_esp_setkey(struct crypto_aead *cipher,
2963 const u8 *key, unsigned int keylen)
2965 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2967 flow_log("%s\n", __func__);
2969 if (keylen < CCM_ESP_SALT_SIZE)
2972 ctx->salt_len = CCM_ESP_SALT_SIZE;
2973 ctx->salt_offset = CCM_ESP_SALT_OFFSET;
2974 memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE);
2975 keylen -= CCM_ESP_SALT_SIZE;
2977 flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE);
2979 return aead_gcm_ccm_setkey(cipher, key, keylen);
2982 static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize)
2984 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
2987 flow_log("%s() authkeylen:%u authsize:%u\n",
2988 __func__, ctx->authkeylen, authsize);
2990 ctx->digestsize = authsize;
2992 /* setkey the fallback just in case we needto use it */
2993 if (ctx->fallback_cipher) {
2994 flow_log(" running fallback setauth()\n");
2996 ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize);
2998 flow_log(" fallback setauth() returned:%d\n", ret);
3004 static int aead_encrypt(struct aead_request *req)
3006 flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen,
3008 dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3009 flow_log(" assoc_len:%u\n", req->assoclen);
3011 return aead_enqueue(req, true);
3014 static int aead_decrypt(struct aead_request *req)
3016 flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen);
3017 dump_sg(req->src, 0, req->cryptlen + req->assoclen);
3018 flow_log(" assoc_len:%u\n", req->assoclen);
3020 return aead_enqueue(req, false);
3023 /* ==================== Supported Cipher Algorithms ==================== */
3025 static struct iproc_alg_s driver_algs[] = {
3027 .type = CRYPTO_ALG_TYPE_AEAD,
3030 .cra_name = "gcm(aes)",
3031 .cra_driver_name = "gcm-aes-iproc",
3032 .cra_blocksize = AES_BLOCK_SIZE,
3033 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3035 .setkey = aead_gcm_ccm_setkey,
3036 .ivsize = GCM_AES_IV_SIZE,
3037 .maxauthsize = AES_BLOCK_SIZE,
3040 .alg = CIPHER_ALG_AES,
3041 .mode = CIPHER_MODE_GCM,
3044 .alg = HASH_ALG_AES,
3045 .mode = HASH_MODE_GCM,
3050 .type = CRYPTO_ALG_TYPE_AEAD,
3053 .cra_name = "ccm(aes)",
3054 .cra_driver_name = "ccm-aes-iproc",
3055 .cra_blocksize = AES_BLOCK_SIZE,
3056 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3058 .setkey = aead_gcm_ccm_setkey,
3059 .ivsize = CCM_AES_IV_SIZE,
3060 .maxauthsize = AES_BLOCK_SIZE,
3063 .alg = CIPHER_ALG_AES,
3064 .mode = CIPHER_MODE_CCM,
3067 .alg = HASH_ALG_AES,
3068 .mode = HASH_MODE_CCM,
3073 .type = CRYPTO_ALG_TYPE_AEAD,
3076 .cra_name = "rfc4106(gcm(aes))",
3077 .cra_driver_name = "gcm-aes-esp-iproc",
3078 .cra_blocksize = AES_BLOCK_SIZE,
3079 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3081 .setkey = aead_gcm_esp_setkey,
3082 .ivsize = GCM_RFC4106_IV_SIZE,
3083 .maxauthsize = AES_BLOCK_SIZE,
3086 .alg = CIPHER_ALG_AES,
3087 .mode = CIPHER_MODE_GCM,
3090 .alg = HASH_ALG_AES,
3091 .mode = HASH_MODE_GCM,
3096 .type = CRYPTO_ALG_TYPE_AEAD,
3099 .cra_name = "rfc4309(ccm(aes))",
3100 .cra_driver_name = "ccm-aes-esp-iproc",
3101 .cra_blocksize = AES_BLOCK_SIZE,
3102 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3104 .setkey = aead_ccm_esp_setkey,
3105 .ivsize = CCM_AES_IV_SIZE,
3106 .maxauthsize = AES_BLOCK_SIZE,
3109 .alg = CIPHER_ALG_AES,
3110 .mode = CIPHER_MODE_CCM,
3113 .alg = HASH_ALG_AES,
3114 .mode = HASH_MODE_CCM,
3119 .type = CRYPTO_ALG_TYPE_AEAD,
3122 .cra_name = "rfc4543(gcm(aes))",
3123 .cra_driver_name = "gmac-aes-esp-iproc",
3124 .cra_blocksize = AES_BLOCK_SIZE,
3125 .cra_flags = CRYPTO_ALG_NEED_FALLBACK
3127 .setkey = rfc4543_gcm_esp_setkey,
3128 .ivsize = GCM_RFC4106_IV_SIZE,
3129 .maxauthsize = AES_BLOCK_SIZE,
3132 .alg = CIPHER_ALG_AES,
3133 .mode = CIPHER_MODE_GCM,
3136 .alg = HASH_ALG_AES,
3137 .mode = HASH_MODE_GCM,
3142 .type = CRYPTO_ALG_TYPE_AEAD,
3145 .cra_name = "authenc(hmac(md5),cbc(aes))",
3146 .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc",
3147 .cra_blocksize = AES_BLOCK_SIZE,
3148 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3150 CRYPTO_ALG_ALLOCATES_MEMORY
3152 .setkey = aead_authenc_setkey,
3153 .ivsize = AES_BLOCK_SIZE,
3154 .maxauthsize = MD5_DIGEST_SIZE,
3157 .alg = CIPHER_ALG_AES,
3158 .mode = CIPHER_MODE_CBC,
3161 .alg = HASH_ALG_MD5,
3162 .mode = HASH_MODE_HMAC,
3167 .type = CRYPTO_ALG_TYPE_AEAD,
3170 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3171 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc",
3172 .cra_blocksize = AES_BLOCK_SIZE,
3173 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3175 CRYPTO_ALG_ALLOCATES_MEMORY
3177 .setkey = aead_authenc_setkey,
3178 .ivsize = AES_BLOCK_SIZE,
3179 .maxauthsize = SHA1_DIGEST_SIZE,
3182 .alg = CIPHER_ALG_AES,
3183 .mode = CIPHER_MODE_CBC,
3186 .alg = HASH_ALG_SHA1,
3187 .mode = HASH_MODE_HMAC,
3192 .type = CRYPTO_ALG_TYPE_AEAD,
3195 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3196 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc",
3197 .cra_blocksize = AES_BLOCK_SIZE,
3198 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3200 CRYPTO_ALG_ALLOCATES_MEMORY
3202 .setkey = aead_authenc_setkey,
3203 .ivsize = AES_BLOCK_SIZE,
3204 .maxauthsize = SHA256_DIGEST_SIZE,
3207 .alg = CIPHER_ALG_AES,
3208 .mode = CIPHER_MODE_CBC,
3211 .alg = HASH_ALG_SHA256,
3212 .mode = HASH_MODE_HMAC,
3217 .type = CRYPTO_ALG_TYPE_AEAD,
3220 .cra_name = "authenc(hmac(md5),cbc(des))",
3221 .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc",
3222 .cra_blocksize = DES_BLOCK_SIZE,
3223 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3225 CRYPTO_ALG_ALLOCATES_MEMORY
3227 .setkey = aead_authenc_setkey,
3228 .ivsize = DES_BLOCK_SIZE,
3229 .maxauthsize = MD5_DIGEST_SIZE,
3232 .alg = CIPHER_ALG_DES,
3233 .mode = CIPHER_MODE_CBC,
3236 .alg = HASH_ALG_MD5,
3237 .mode = HASH_MODE_HMAC,
3242 .type = CRYPTO_ALG_TYPE_AEAD,
3245 .cra_name = "authenc(hmac(sha1),cbc(des))",
3246 .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc",
3247 .cra_blocksize = DES_BLOCK_SIZE,
3248 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3250 CRYPTO_ALG_ALLOCATES_MEMORY
3252 .setkey = aead_authenc_setkey,
3253 .ivsize = DES_BLOCK_SIZE,
3254 .maxauthsize = SHA1_DIGEST_SIZE,
3257 .alg = CIPHER_ALG_DES,
3258 .mode = CIPHER_MODE_CBC,
3261 .alg = HASH_ALG_SHA1,
3262 .mode = HASH_MODE_HMAC,
3267 .type = CRYPTO_ALG_TYPE_AEAD,
3270 .cra_name = "authenc(hmac(sha224),cbc(des))",
3271 .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc",
3272 .cra_blocksize = DES_BLOCK_SIZE,
3273 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3275 CRYPTO_ALG_ALLOCATES_MEMORY
3277 .setkey = aead_authenc_setkey,
3278 .ivsize = DES_BLOCK_SIZE,
3279 .maxauthsize = SHA224_DIGEST_SIZE,
3282 .alg = CIPHER_ALG_DES,
3283 .mode = CIPHER_MODE_CBC,
3286 .alg = HASH_ALG_SHA224,
3287 .mode = HASH_MODE_HMAC,
3292 .type = CRYPTO_ALG_TYPE_AEAD,
3295 .cra_name = "authenc(hmac(sha256),cbc(des))",
3296 .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc",
3297 .cra_blocksize = DES_BLOCK_SIZE,
3298 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3300 CRYPTO_ALG_ALLOCATES_MEMORY
3302 .setkey = aead_authenc_setkey,
3303 .ivsize = DES_BLOCK_SIZE,
3304 .maxauthsize = SHA256_DIGEST_SIZE,
3307 .alg = CIPHER_ALG_DES,
3308 .mode = CIPHER_MODE_CBC,
3311 .alg = HASH_ALG_SHA256,
3312 .mode = HASH_MODE_HMAC,
3317 .type = CRYPTO_ALG_TYPE_AEAD,
3320 .cra_name = "authenc(hmac(sha384),cbc(des))",
3321 .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc",
3322 .cra_blocksize = DES_BLOCK_SIZE,
3323 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3325 CRYPTO_ALG_ALLOCATES_MEMORY
3327 .setkey = aead_authenc_setkey,
3328 .ivsize = DES_BLOCK_SIZE,
3329 .maxauthsize = SHA384_DIGEST_SIZE,
3332 .alg = CIPHER_ALG_DES,
3333 .mode = CIPHER_MODE_CBC,
3336 .alg = HASH_ALG_SHA384,
3337 .mode = HASH_MODE_HMAC,
3342 .type = CRYPTO_ALG_TYPE_AEAD,
3345 .cra_name = "authenc(hmac(sha512),cbc(des))",
3346 .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc",
3347 .cra_blocksize = DES_BLOCK_SIZE,
3348 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3350 CRYPTO_ALG_ALLOCATES_MEMORY
3352 .setkey = aead_authenc_setkey,
3353 .ivsize = DES_BLOCK_SIZE,
3354 .maxauthsize = SHA512_DIGEST_SIZE,
3357 .alg = CIPHER_ALG_DES,
3358 .mode = CIPHER_MODE_CBC,
3361 .alg = HASH_ALG_SHA512,
3362 .mode = HASH_MODE_HMAC,
3367 .type = CRYPTO_ALG_TYPE_AEAD,
3370 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
3371 .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc",
3372 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3373 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3375 CRYPTO_ALG_ALLOCATES_MEMORY
3377 .setkey = aead_authenc_setkey,
3378 .ivsize = DES3_EDE_BLOCK_SIZE,
3379 .maxauthsize = MD5_DIGEST_SIZE,
3382 .alg = CIPHER_ALG_3DES,
3383 .mode = CIPHER_MODE_CBC,
3386 .alg = HASH_ALG_MD5,
3387 .mode = HASH_MODE_HMAC,
3392 .type = CRYPTO_ALG_TYPE_AEAD,
3395 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
3396 .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc",
3397 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3398 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3400 CRYPTO_ALG_ALLOCATES_MEMORY
3402 .setkey = aead_authenc_setkey,
3403 .ivsize = DES3_EDE_BLOCK_SIZE,
3404 .maxauthsize = SHA1_DIGEST_SIZE,
3407 .alg = CIPHER_ALG_3DES,
3408 .mode = CIPHER_MODE_CBC,
3411 .alg = HASH_ALG_SHA1,
3412 .mode = HASH_MODE_HMAC,
3417 .type = CRYPTO_ALG_TYPE_AEAD,
3420 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
3421 .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc",
3422 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3423 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3425 CRYPTO_ALG_ALLOCATES_MEMORY
3427 .setkey = aead_authenc_setkey,
3428 .ivsize = DES3_EDE_BLOCK_SIZE,
3429 .maxauthsize = SHA224_DIGEST_SIZE,
3432 .alg = CIPHER_ALG_3DES,
3433 .mode = CIPHER_MODE_CBC,
3436 .alg = HASH_ALG_SHA224,
3437 .mode = HASH_MODE_HMAC,
3442 .type = CRYPTO_ALG_TYPE_AEAD,
3445 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
3446 .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc",
3447 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3448 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3450 CRYPTO_ALG_ALLOCATES_MEMORY
3452 .setkey = aead_authenc_setkey,
3453 .ivsize = DES3_EDE_BLOCK_SIZE,
3454 .maxauthsize = SHA256_DIGEST_SIZE,
3457 .alg = CIPHER_ALG_3DES,
3458 .mode = CIPHER_MODE_CBC,
3461 .alg = HASH_ALG_SHA256,
3462 .mode = HASH_MODE_HMAC,
3467 .type = CRYPTO_ALG_TYPE_AEAD,
3470 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
3471 .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc",
3472 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3473 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3475 CRYPTO_ALG_ALLOCATES_MEMORY
3477 .setkey = aead_authenc_setkey,
3478 .ivsize = DES3_EDE_BLOCK_SIZE,
3479 .maxauthsize = SHA384_DIGEST_SIZE,
3482 .alg = CIPHER_ALG_3DES,
3483 .mode = CIPHER_MODE_CBC,
3486 .alg = HASH_ALG_SHA384,
3487 .mode = HASH_MODE_HMAC,
3492 .type = CRYPTO_ALG_TYPE_AEAD,
3495 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
3496 .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc",
3497 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
3498 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
3500 CRYPTO_ALG_ALLOCATES_MEMORY
3502 .setkey = aead_authenc_setkey,
3503 .ivsize = DES3_EDE_BLOCK_SIZE,
3504 .maxauthsize = SHA512_DIGEST_SIZE,
3507 .alg = CIPHER_ALG_3DES,
3508 .mode = CIPHER_MODE_CBC,
3511 .alg = HASH_ALG_SHA512,
3512 .mode = HASH_MODE_HMAC,
3517 /* SKCIPHER algorithms. */
3519 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3521 .base.cra_name = "ofb(des)",
3522 .base.cra_driver_name = "ofb-des-iproc",
3523 .base.cra_blocksize = DES_BLOCK_SIZE,
3524 .min_keysize = DES_KEY_SIZE,
3525 .max_keysize = DES_KEY_SIZE,
3526 .ivsize = DES_BLOCK_SIZE,
3529 .alg = CIPHER_ALG_DES,
3530 .mode = CIPHER_MODE_OFB,
3533 .alg = HASH_ALG_NONE,
3534 .mode = HASH_MODE_NONE,
3538 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3540 .base.cra_name = "cbc(des)",
3541 .base.cra_driver_name = "cbc-des-iproc",
3542 .base.cra_blocksize = DES_BLOCK_SIZE,
3543 .min_keysize = DES_KEY_SIZE,
3544 .max_keysize = DES_KEY_SIZE,
3545 .ivsize = DES_BLOCK_SIZE,
3548 .alg = CIPHER_ALG_DES,
3549 .mode = CIPHER_MODE_CBC,
3552 .alg = HASH_ALG_NONE,
3553 .mode = HASH_MODE_NONE,
3557 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3559 .base.cra_name = "ecb(des)",
3560 .base.cra_driver_name = "ecb-des-iproc",
3561 .base.cra_blocksize = DES_BLOCK_SIZE,
3562 .min_keysize = DES_KEY_SIZE,
3563 .max_keysize = DES_KEY_SIZE,
3567 .alg = CIPHER_ALG_DES,
3568 .mode = CIPHER_MODE_ECB,
3571 .alg = HASH_ALG_NONE,
3572 .mode = HASH_MODE_NONE,
3576 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3578 .base.cra_name = "ofb(des3_ede)",
3579 .base.cra_driver_name = "ofb-des3-iproc",
3580 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3581 .min_keysize = DES3_EDE_KEY_SIZE,
3582 .max_keysize = DES3_EDE_KEY_SIZE,
3583 .ivsize = DES3_EDE_BLOCK_SIZE,
3586 .alg = CIPHER_ALG_3DES,
3587 .mode = CIPHER_MODE_OFB,
3590 .alg = HASH_ALG_NONE,
3591 .mode = HASH_MODE_NONE,
3595 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3597 .base.cra_name = "cbc(des3_ede)",
3598 .base.cra_driver_name = "cbc-des3-iproc",
3599 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3600 .min_keysize = DES3_EDE_KEY_SIZE,
3601 .max_keysize = DES3_EDE_KEY_SIZE,
3602 .ivsize = DES3_EDE_BLOCK_SIZE,
3605 .alg = CIPHER_ALG_3DES,
3606 .mode = CIPHER_MODE_CBC,
3609 .alg = HASH_ALG_NONE,
3610 .mode = HASH_MODE_NONE,
3614 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3616 .base.cra_name = "ecb(des3_ede)",
3617 .base.cra_driver_name = "ecb-des3-iproc",
3618 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
3619 .min_keysize = DES3_EDE_KEY_SIZE,
3620 .max_keysize = DES3_EDE_KEY_SIZE,
3624 .alg = CIPHER_ALG_3DES,
3625 .mode = CIPHER_MODE_ECB,
3628 .alg = HASH_ALG_NONE,
3629 .mode = HASH_MODE_NONE,
3633 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3635 .base.cra_name = "ofb(aes)",
3636 .base.cra_driver_name = "ofb-aes-iproc",
3637 .base.cra_blocksize = AES_BLOCK_SIZE,
3638 .min_keysize = AES_MIN_KEY_SIZE,
3639 .max_keysize = AES_MAX_KEY_SIZE,
3640 .ivsize = AES_BLOCK_SIZE,
3643 .alg = CIPHER_ALG_AES,
3644 .mode = CIPHER_MODE_OFB,
3647 .alg = HASH_ALG_NONE,
3648 .mode = HASH_MODE_NONE,
3652 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3654 .base.cra_name = "cbc(aes)",
3655 .base.cra_driver_name = "cbc-aes-iproc",
3656 .base.cra_blocksize = AES_BLOCK_SIZE,
3657 .min_keysize = AES_MIN_KEY_SIZE,
3658 .max_keysize = AES_MAX_KEY_SIZE,
3659 .ivsize = AES_BLOCK_SIZE,
3662 .alg = CIPHER_ALG_AES,
3663 .mode = CIPHER_MODE_CBC,
3666 .alg = HASH_ALG_NONE,
3667 .mode = HASH_MODE_NONE,
3671 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3673 .base.cra_name = "ecb(aes)",
3674 .base.cra_driver_name = "ecb-aes-iproc",
3675 .base.cra_blocksize = AES_BLOCK_SIZE,
3676 .min_keysize = AES_MIN_KEY_SIZE,
3677 .max_keysize = AES_MAX_KEY_SIZE,
3681 .alg = CIPHER_ALG_AES,
3682 .mode = CIPHER_MODE_ECB,
3685 .alg = HASH_ALG_NONE,
3686 .mode = HASH_MODE_NONE,
3690 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3692 .base.cra_name = "ctr(aes)",
3693 .base.cra_driver_name = "ctr-aes-iproc",
3694 .base.cra_blocksize = AES_BLOCK_SIZE,
3695 .min_keysize = AES_MIN_KEY_SIZE,
3696 .max_keysize = AES_MAX_KEY_SIZE,
3697 .ivsize = AES_BLOCK_SIZE,
3700 .alg = CIPHER_ALG_AES,
3701 .mode = CIPHER_MODE_CTR,
3704 .alg = HASH_ALG_NONE,
3705 .mode = HASH_MODE_NONE,
3709 .type = CRYPTO_ALG_TYPE_SKCIPHER,
3711 .base.cra_name = "xts(aes)",
3712 .base.cra_driver_name = "xts-aes-iproc",
3713 .base.cra_blocksize = AES_BLOCK_SIZE,
3714 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3715 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3716 .ivsize = AES_BLOCK_SIZE,
3719 .alg = CIPHER_ALG_AES,
3720 .mode = CIPHER_MODE_XTS,
3723 .alg = HASH_ALG_NONE,
3724 .mode = HASH_MODE_NONE,
3728 /* AHASH algorithms. */
3730 .type = CRYPTO_ALG_TYPE_AHASH,
3732 .halg.digestsize = MD5_DIGEST_SIZE,
3735 .cra_driver_name = "md5-iproc",
3736 .cra_blocksize = MD5_BLOCK_WORDS * 4,
3737 .cra_flags = CRYPTO_ALG_ASYNC |
3738 CRYPTO_ALG_ALLOCATES_MEMORY,
3742 .alg = CIPHER_ALG_NONE,
3743 .mode = CIPHER_MODE_NONE,
3746 .alg = HASH_ALG_MD5,
3747 .mode = HASH_MODE_HASH,
3751 .type = CRYPTO_ALG_TYPE_AHASH,
3753 .halg.digestsize = MD5_DIGEST_SIZE,
3755 .cra_name = "hmac(md5)",
3756 .cra_driver_name = "hmac-md5-iproc",
3757 .cra_blocksize = MD5_BLOCK_WORDS * 4,
3761 .alg = CIPHER_ALG_NONE,
3762 .mode = CIPHER_MODE_NONE,
3765 .alg = HASH_ALG_MD5,
3766 .mode = HASH_MODE_HMAC,
3769 {.type = CRYPTO_ALG_TYPE_AHASH,
3771 .halg.digestsize = SHA1_DIGEST_SIZE,
3774 .cra_driver_name = "sha1-iproc",
3775 .cra_blocksize = SHA1_BLOCK_SIZE,
3779 .alg = CIPHER_ALG_NONE,
3780 .mode = CIPHER_MODE_NONE,
3783 .alg = HASH_ALG_SHA1,
3784 .mode = HASH_MODE_HASH,
3787 {.type = CRYPTO_ALG_TYPE_AHASH,
3789 .halg.digestsize = SHA1_DIGEST_SIZE,
3791 .cra_name = "hmac(sha1)",
3792 .cra_driver_name = "hmac-sha1-iproc",
3793 .cra_blocksize = SHA1_BLOCK_SIZE,
3797 .alg = CIPHER_ALG_NONE,
3798 .mode = CIPHER_MODE_NONE,
3801 .alg = HASH_ALG_SHA1,
3802 .mode = HASH_MODE_HMAC,
3805 {.type = CRYPTO_ALG_TYPE_AHASH,
3807 .halg.digestsize = SHA224_DIGEST_SIZE,
3809 .cra_name = "sha224",
3810 .cra_driver_name = "sha224-iproc",
3811 .cra_blocksize = SHA224_BLOCK_SIZE,
3815 .alg = CIPHER_ALG_NONE,
3816 .mode = CIPHER_MODE_NONE,
3819 .alg = HASH_ALG_SHA224,
3820 .mode = HASH_MODE_HASH,
3823 {.type = CRYPTO_ALG_TYPE_AHASH,
3825 .halg.digestsize = SHA224_DIGEST_SIZE,
3827 .cra_name = "hmac(sha224)",
3828 .cra_driver_name = "hmac-sha224-iproc",
3829 .cra_blocksize = SHA224_BLOCK_SIZE,
3833 .alg = CIPHER_ALG_NONE,
3834 .mode = CIPHER_MODE_NONE,
3837 .alg = HASH_ALG_SHA224,
3838 .mode = HASH_MODE_HMAC,
3841 {.type = CRYPTO_ALG_TYPE_AHASH,
3843 .halg.digestsize = SHA256_DIGEST_SIZE,
3845 .cra_name = "sha256",
3846 .cra_driver_name = "sha256-iproc",
3847 .cra_blocksize = SHA256_BLOCK_SIZE,
3851 .alg = CIPHER_ALG_NONE,
3852 .mode = CIPHER_MODE_NONE,
3855 .alg = HASH_ALG_SHA256,
3856 .mode = HASH_MODE_HASH,
3859 {.type = CRYPTO_ALG_TYPE_AHASH,
3861 .halg.digestsize = SHA256_DIGEST_SIZE,
3863 .cra_name = "hmac(sha256)",
3864 .cra_driver_name = "hmac-sha256-iproc",
3865 .cra_blocksize = SHA256_BLOCK_SIZE,
3869 .alg = CIPHER_ALG_NONE,
3870 .mode = CIPHER_MODE_NONE,
3873 .alg = HASH_ALG_SHA256,
3874 .mode = HASH_MODE_HMAC,
3878 .type = CRYPTO_ALG_TYPE_AHASH,
3880 .halg.digestsize = SHA384_DIGEST_SIZE,
3882 .cra_name = "sha384",
3883 .cra_driver_name = "sha384-iproc",
3884 .cra_blocksize = SHA384_BLOCK_SIZE,
3888 .alg = CIPHER_ALG_NONE,
3889 .mode = CIPHER_MODE_NONE,
3892 .alg = HASH_ALG_SHA384,
3893 .mode = HASH_MODE_HASH,
3897 .type = CRYPTO_ALG_TYPE_AHASH,
3899 .halg.digestsize = SHA384_DIGEST_SIZE,
3901 .cra_name = "hmac(sha384)",
3902 .cra_driver_name = "hmac-sha384-iproc",
3903 .cra_blocksize = SHA384_BLOCK_SIZE,
3907 .alg = CIPHER_ALG_NONE,
3908 .mode = CIPHER_MODE_NONE,
3911 .alg = HASH_ALG_SHA384,
3912 .mode = HASH_MODE_HMAC,
3916 .type = CRYPTO_ALG_TYPE_AHASH,
3918 .halg.digestsize = SHA512_DIGEST_SIZE,
3920 .cra_name = "sha512",
3921 .cra_driver_name = "sha512-iproc",
3922 .cra_blocksize = SHA512_BLOCK_SIZE,
3926 .alg = CIPHER_ALG_NONE,
3927 .mode = CIPHER_MODE_NONE,
3930 .alg = HASH_ALG_SHA512,
3931 .mode = HASH_MODE_HASH,
3935 .type = CRYPTO_ALG_TYPE_AHASH,
3937 .halg.digestsize = SHA512_DIGEST_SIZE,
3939 .cra_name = "hmac(sha512)",
3940 .cra_driver_name = "hmac-sha512-iproc",
3941 .cra_blocksize = SHA512_BLOCK_SIZE,
3945 .alg = CIPHER_ALG_NONE,
3946 .mode = CIPHER_MODE_NONE,
3949 .alg = HASH_ALG_SHA512,
3950 .mode = HASH_MODE_HMAC,
3954 .type = CRYPTO_ALG_TYPE_AHASH,
3956 .halg.digestsize = SHA3_224_DIGEST_SIZE,
3958 .cra_name = "sha3-224",
3959 .cra_driver_name = "sha3-224-iproc",
3960 .cra_blocksize = SHA3_224_BLOCK_SIZE,
3964 .alg = CIPHER_ALG_NONE,
3965 .mode = CIPHER_MODE_NONE,
3968 .alg = HASH_ALG_SHA3_224,
3969 .mode = HASH_MODE_HASH,
3973 .type = CRYPTO_ALG_TYPE_AHASH,
3975 .halg.digestsize = SHA3_224_DIGEST_SIZE,
3977 .cra_name = "hmac(sha3-224)",
3978 .cra_driver_name = "hmac-sha3-224-iproc",
3979 .cra_blocksize = SHA3_224_BLOCK_SIZE,
3983 .alg = CIPHER_ALG_NONE,
3984 .mode = CIPHER_MODE_NONE,
3987 .alg = HASH_ALG_SHA3_224,
3988 .mode = HASH_MODE_HMAC
3992 .type = CRYPTO_ALG_TYPE_AHASH,
3994 .halg.digestsize = SHA3_256_DIGEST_SIZE,
3996 .cra_name = "sha3-256",
3997 .cra_driver_name = "sha3-256-iproc",
3998 .cra_blocksize = SHA3_256_BLOCK_SIZE,
4002 .alg = CIPHER_ALG_NONE,
4003 .mode = CIPHER_MODE_NONE,
4006 .alg = HASH_ALG_SHA3_256,
4007 .mode = HASH_MODE_HASH,
4011 .type = CRYPTO_ALG_TYPE_AHASH,
4013 .halg.digestsize = SHA3_256_DIGEST_SIZE,
4015 .cra_name = "hmac(sha3-256)",
4016 .cra_driver_name = "hmac-sha3-256-iproc",
4017 .cra_blocksize = SHA3_256_BLOCK_SIZE,
4021 .alg = CIPHER_ALG_NONE,
4022 .mode = CIPHER_MODE_NONE,
4025 .alg = HASH_ALG_SHA3_256,
4026 .mode = HASH_MODE_HMAC,
4030 .type = CRYPTO_ALG_TYPE_AHASH,
4032 .halg.digestsize = SHA3_384_DIGEST_SIZE,
4034 .cra_name = "sha3-384",
4035 .cra_driver_name = "sha3-384-iproc",
4036 .cra_blocksize = SHA3_224_BLOCK_SIZE,
4040 .alg = CIPHER_ALG_NONE,
4041 .mode = CIPHER_MODE_NONE,
4044 .alg = HASH_ALG_SHA3_384,
4045 .mode = HASH_MODE_HASH,
4049 .type = CRYPTO_ALG_TYPE_AHASH,
4051 .halg.digestsize = SHA3_384_DIGEST_SIZE,
4053 .cra_name = "hmac(sha3-384)",
4054 .cra_driver_name = "hmac-sha3-384-iproc",
4055 .cra_blocksize = SHA3_384_BLOCK_SIZE,
4059 .alg = CIPHER_ALG_NONE,
4060 .mode = CIPHER_MODE_NONE,
4063 .alg = HASH_ALG_SHA3_384,
4064 .mode = HASH_MODE_HMAC,
4068 .type = CRYPTO_ALG_TYPE_AHASH,
4070 .halg.digestsize = SHA3_512_DIGEST_SIZE,
4072 .cra_name = "sha3-512",
4073 .cra_driver_name = "sha3-512-iproc",
4074 .cra_blocksize = SHA3_512_BLOCK_SIZE,
4078 .alg = CIPHER_ALG_NONE,
4079 .mode = CIPHER_MODE_NONE,
4082 .alg = HASH_ALG_SHA3_512,
4083 .mode = HASH_MODE_HASH,
4087 .type = CRYPTO_ALG_TYPE_AHASH,
4089 .halg.digestsize = SHA3_512_DIGEST_SIZE,
4091 .cra_name = "hmac(sha3-512)",
4092 .cra_driver_name = "hmac-sha3-512-iproc",
4093 .cra_blocksize = SHA3_512_BLOCK_SIZE,
4097 .alg = CIPHER_ALG_NONE,
4098 .mode = CIPHER_MODE_NONE,
4101 .alg = HASH_ALG_SHA3_512,
4102 .mode = HASH_MODE_HMAC,
4106 .type = CRYPTO_ALG_TYPE_AHASH,
4108 .halg.digestsize = AES_BLOCK_SIZE,
4110 .cra_name = "xcbc(aes)",
4111 .cra_driver_name = "xcbc-aes-iproc",
4112 .cra_blocksize = AES_BLOCK_SIZE,
4116 .alg = CIPHER_ALG_NONE,
4117 .mode = CIPHER_MODE_NONE,
4120 .alg = HASH_ALG_AES,
4121 .mode = HASH_MODE_XCBC,
4125 .type = CRYPTO_ALG_TYPE_AHASH,
4127 .halg.digestsize = AES_BLOCK_SIZE,
4129 .cra_name = "cmac(aes)",
4130 .cra_driver_name = "cmac-aes-iproc",
4131 .cra_blocksize = AES_BLOCK_SIZE,
4135 .alg = CIPHER_ALG_NONE,
4136 .mode = CIPHER_MODE_NONE,
4139 .alg = HASH_ALG_AES,
4140 .mode = HASH_MODE_CMAC,
4145 static int generic_cra_init(struct crypto_tfm *tfm,
4146 struct iproc_alg_s *cipher_alg)
4148 struct spu_hw *spu = &iproc_priv.spu;
4149 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4150 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm);
4152 flow_log("%s()\n", __func__);
4154 ctx->alg = cipher_alg;
4155 ctx->cipher = cipher_alg->cipher_info;
4156 ctx->auth = cipher_alg->auth_info;
4157 ctx->auth_first = cipher_alg->auth_first;
4158 ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg,
4161 ctx->fallback_cipher = NULL;
4164 ctx->authkeylen = 0;
4166 atomic_inc(&iproc_priv.stream_count);
4167 atomic_inc(&iproc_priv.session_count);
4172 static int skcipher_init_tfm(struct crypto_skcipher *skcipher)
4174 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
4175 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
4176 struct iproc_alg_s *cipher_alg;
4178 flow_log("%s()\n", __func__);
4180 crypto_skcipher_set_reqsize(skcipher, sizeof(struct iproc_reqctx_s));
4182 cipher_alg = container_of(alg, struct iproc_alg_s, alg.skcipher);
4183 return generic_cra_init(tfm, cipher_alg);
4186 static int ahash_cra_init(struct crypto_tfm *tfm)
4189 struct crypto_alg *alg = tfm->__crt_alg;
4190 struct iproc_alg_s *cipher_alg;
4192 cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s,
4195 err = generic_cra_init(tfm, cipher_alg);
4196 flow_log("%s()\n", __func__);
4199 * export state size has to be < 512 bytes. So don't include msg bufs
4202 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
4203 sizeof(struct iproc_reqctx_s));
4208 static int aead_cra_init(struct crypto_aead *aead)
4210 unsigned int reqsize = sizeof(struct iproc_reqctx_s);
4211 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4212 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4213 struct crypto_alg *alg = tfm->__crt_alg;
4214 struct aead_alg *aalg = container_of(alg, struct aead_alg, base);
4215 struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s,
4218 int err = generic_cra_init(tfm, cipher_alg);
4220 flow_log("%s()\n", __func__);
4222 ctx->is_esp = false;
4224 ctx->salt_offset = 0;
4226 /* random first IV */
4227 get_random_bytes(ctx->iv, MAX_IV_SIZE);
4228 flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE);
4233 if (!(alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK))
4236 flow_log("%s() creating fallback cipher\n", __func__);
4238 ctx->fallback_cipher = crypto_alloc_aead(alg->cra_name, 0,
4240 CRYPTO_ALG_NEED_FALLBACK);
4241 if (IS_ERR(ctx->fallback_cipher)) {
4242 pr_err("%s() Error: failed to allocate fallback for %s\n",
4243 __func__, alg->cra_name);
4244 return PTR_ERR(ctx->fallback_cipher);
4247 reqsize += crypto_aead_reqsize(ctx->fallback_cipher);
4250 crypto_aead_set_reqsize(aead, reqsize);
4256 static void generic_cra_exit(struct crypto_tfm *tfm)
4258 atomic_dec(&iproc_priv.session_count);
4261 static void skcipher_exit_tfm(struct crypto_skcipher *tfm)
4263 generic_cra_exit(crypto_skcipher_tfm(tfm));
4266 static void aead_cra_exit(struct crypto_aead *aead)
4268 struct crypto_tfm *tfm = crypto_aead_tfm(aead);
4269 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
4271 generic_cra_exit(tfm);
4273 if (ctx->fallback_cipher) {
4274 crypto_free_aead(ctx->fallback_cipher);
4275 ctx->fallback_cipher = NULL;
4280 * spu_functions_register() - Specify hardware-specific SPU functions based on
4281 * SPU type read from device tree.
4282 * @dev: device structure
4283 * @spu_type: SPU hardware generation
4284 * @spu_subtype: SPU hardware version
4286 static void spu_functions_register(struct device *dev,
4287 enum spu_spu_type spu_type,
4288 enum spu_spu_subtype spu_subtype)
4290 struct spu_hw *spu = &iproc_priv.spu;
4292 if (spu_type == SPU_TYPE_SPUM) {
4293 dev_dbg(dev, "Registering SPUM functions");
4294 spu->spu_dump_msg_hdr = spum_dump_msg_hdr;
4295 spu->spu_payload_length = spum_payload_length;
4296 spu->spu_response_hdr_len = spum_response_hdr_len;
4297 spu->spu_hash_pad_len = spum_hash_pad_len;
4298 spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len;
4299 spu->spu_assoc_resp_len = spum_assoc_resp_len;
4300 spu->spu_aead_ivlen = spum_aead_ivlen;
4301 spu->spu_hash_type = spum_hash_type;
4302 spu->spu_digest_size = spum_digest_size;
4303 spu->spu_create_request = spum_create_request;
4304 spu->spu_cipher_req_init = spum_cipher_req_init;
4305 spu->spu_cipher_req_finish = spum_cipher_req_finish;
4306 spu->spu_request_pad = spum_request_pad;
4307 spu->spu_tx_status_len = spum_tx_status_len;
4308 spu->spu_rx_status_len = spum_rx_status_len;
4309 spu->spu_status_process = spum_status_process;
4310 spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload;
4311 spu->spu_ccm_update_iv = spum_ccm_update_iv;
4312 spu->spu_wordalign_padlen = spum_wordalign_padlen;
4313 if (spu_subtype == SPU_SUBTYPE_SPUM_NS2)
4314 spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload;
4316 spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload;
4318 dev_dbg(dev, "Registering SPU2 functions");
4319 spu->spu_dump_msg_hdr = spu2_dump_msg_hdr;
4320 spu->spu_ctx_max_payload = spu2_ctx_max_payload;
4321 spu->spu_payload_length = spu2_payload_length;
4322 spu->spu_response_hdr_len = spu2_response_hdr_len;
4323 spu->spu_hash_pad_len = spu2_hash_pad_len;
4324 spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len;
4325 spu->spu_assoc_resp_len = spu2_assoc_resp_len;
4326 spu->spu_aead_ivlen = spu2_aead_ivlen;
4327 spu->spu_hash_type = spu2_hash_type;
4328 spu->spu_digest_size = spu2_digest_size;
4329 spu->spu_create_request = spu2_create_request;
4330 spu->spu_cipher_req_init = spu2_cipher_req_init;
4331 spu->spu_cipher_req_finish = spu2_cipher_req_finish;
4332 spu->spu_request_pad = spu2_request_pad;
4333 spu->spu_tx_status_len = spu2_tx_status_len;
4334 spu->spu_rx_status_len = spu2_rx_status_len;
4335 spu->spu_status_process = spu2_status_process;
4336 spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload;
4337 spu->spu_ccm_update_iv = spu2_ccm_update_iv;
4338 spu->spu_wordalign_padlen = spu2_wordalign_padlen;
4343 * spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox
4344 * channel for the SPU being probed.
4345 * @dev: SPU driver device structure
4347 * Return: 0 if successful
4350 static int spu_mb_init(struct device *dev)
4352 struct mbox_client *mcl = &iproc_priv.mcl;
4355 iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan,
4356 sizeof(struct mbox_chan *), GFP_KERNEL);
4357 if (!iproc_priv.mbox)
4361 mcl->tx_block = false;
4363 mcl->knows_txdone = true;
4364 mcl->rx_callback = spu_rx_callback;
4365 mcl->tx_done = NULL;
4367 for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4368 iproc_priv.mbox[i] = mbox_request_channel(mcl, i);
4369 if (IS_ERR(iproc_priv.mbox[i])) {
4370 err = PTR_ERR(iproc_priv.mbox[i]);
4372 "Mbox channel %d request failed with err %d",
4374 iproc_priv.mbox[i] = NULL;
4381 for (i = 0; i < iproc_priv.spu.num_chan; i++) {
4382 if (iproc_priv.mbox[i])
4383 mbox_free_channel(iproc_priv.mbox[i]);
4389 static void spu_mb_release(struct platform_device *pdev)
4393 for (i = 0; i < iproc_priv.spu.num_chan; i++)
4394 mbox_free_channel(iproc_priv.mbox[i]);
4397 static void spu_counters_init(void)
4402 atomic_set(&iproc_priv.session_count, 0);
4403 atomic_set(&iproc_priv.stream_count, 0);
4404 atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan);
4405 atomic64_set(&iproc_priv.bytes_in, 0);
4406 atomic64_set(&iproc_priv.bytes_out, 0);
4407 for (i = 0; i < SPU_OP_NUM; i++) {
4408 atomic_set(&iproc_priv.op_counts[i], 0);
4409 atomic_set(&iproc_priv.setkey_cnt[i], 0);
4411 for (i = 0; i < CIPHER_ALG_LAST; i++)
4412 for (j = 0; j < CIPHER_MODE_LAST; j++)
4413 atomic_set(&iproc_priv.cipher_cnt[i][j], 0);
4415 for (i = 0; i < HASH_ALG_LAST; i++) {
4416 atomic_set(&iproc_priv.hash_cnt[i], 0);
4417 atomic_set(&iproc_priv.hmac_cnt[i], 0);
4419 for (i = 0; i < AEAD_TYPE_LAST; i++)
4420 atomic_set(&iproc_priv.aead_cnt[i], 0);
4422 atomic_set(&iproc_priv.mb_no_spc, 0);
4423 atomic_set(&iproc_priv.mb_send_fail, 0);
4424 atomic_set(&iproc_priv.bad_icv, 0);
4427 static int spu_register_skcipher(struct iproc_alg_s *driver_alg)
4429 struct skcipher_alg *crypto = &driver_alg->alg.skcipher;
4432 crypto->base.cra_module = THIS_MODULE;
4433 crypto->base.cra_priority = cipher_pri;
4434 crypto->base.cra_alignmask = 0;
4435 crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4436 crypto->base.cra_flags = CRYPTO_ALG_ASYNC |
4437 CRYPTO_ALG_ALLOCATES_MEMORY |
4438 CRYPTO_ALG_KERN_DRIVER_ONLY;
4440 crypto->init = skcipher_init_tfm;
4441 crypto->exit = skcipher_exit_tfm;
4442 crypto->setkey = skcipher_setkey;
4443 crypto->encrypt = skcipher_encrypt;
4444 crypto->decrypt = skcipher_decrypt;
4446 err = crypto_register_skcipher(crypto);
4447 /* Mark alg as having been registered, if successful */
4449 driver_alg->registered = true;
4450 pr_debug(" registered skcipher %s\n", crypto->base.cra_driver_name);
4454 static int spu_register_ahash(struct iproc_alg_s *driver_alg)
4456 struct spu_hw *spu = &iproc_priv.spu;
4457 struct ahash_alg *hash = &driver_alg->alg.hash;
4460 /* AES-XCBC is the only AES hash type currently supported on SPU-M */
4461 if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4462 (driver_alg->auth_info.mode != HASH_MODE_XCBC) &&
4463 (spu->spu_type == SPU_TYPE_SPUM))
4466 /* SHA3 algorithm variants are not registered for SPU-M or SPU2. */
4467 if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) &&
4468 (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2))
4471 hash->halg.base.cra_module = THIS_MODULE;
4472 hash->halg.base.cra_priority = hash_pri;
4473 hash->halg.base.cra_alignmask = 0;
4474 hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4475 hash->halg.base.cra_init = ahash_cra_init;
4476 hash->halg.base.cra_exit = generic_cra_exit;
4477 hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC |
4478 CRYPTO_ALG_ALLOCATES_MEMORY;
4479 hash->halg.statesize = sizeof(struct spu_hash_export_s);
4481 if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
4482 hash->init = ahash_init;
4483 hash->update = ahash_update;
4484 hash->final = ahash_final;
4485 hash->finup = ahash_finup;
4486 hash->digest = ahash_digest;
4487 if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
4488 ((driver_alg->auth_info.mode == HASH_MODE_XCBC) ||
4489 (driver_alg->auth_info.mode == HASH_MODE_CMAC))) {
4490 hash->setkey = ahash_setkey;
4493 hash->setkey = ahash_hmac_setkey;
4494 hash->init = ahash_hmac_init;
4495 hash->update = ahash_hmac_update;
4496 hash->final = ahash_hmac_final;
4497 hash->finup = ahash_hmac_finup;
4498 hash->digest = ahash_hmac_digest;
4500 hash->export = ahash_export;
4501 hash->import = ahash_import;
4503 err = crypto_register_ahash(hash);
4504 /* Mark alg as having been registered, if successful */
4506 driver_alg->registered = true;
4507 pr_debug(" registered ahash %s\n",
4508 hash->halg.base.cra_driver_name);
4512 static int spu_register_aead(struct iproc_alg_s *driver_alg)
4514 struct aead_alg *aead = &driver_alg->alg.aead;
4517 aead->base.cra_module = THIS_MODULE;
4518 aead->base.cra_priority = aead_pri;
4519 aead->base.cra_alignmask = 0;
4520 aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
4522 aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4523 /* setkey set in alg initialization */
4524 aead->setauthsize = aead_setauthsize;
4525 aead->encrypt = aead_encrypt;
4526 aead->decrypt = aead_decrypt;
4527 aead->init = aead_cra_init;
4528 aead->exit = aead_cra_exit;
4530 err = crypto_register_aead(aead);
4531 /* Mark alg as having been registered, if successful */
4533 driver_alg->registered = true;
4534 pr_debug(" registered aead %s\n", aead->base.cra_driver_name);
4538 /* register crypto algorithms the device supports */
4539 static int spu_algs_register(struct device *dev)
4544 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4545 switch (driver_algs[i].type) {
4546 case CRYPTO_ALG_TYPE_SKCIPHER:
4547 err = spu_register_skcipher(&driver_algs[i]);
4549 case CRYPTO_ALG_TYPE_AHASH:
4550 err = spu_register_ahash(&driver_algs[i]);
4552 case CRYPTO_ALG_TYPE_AEAD:
4553 err = spu_register_aead(&driver_algs[i]);
4557 "iproc-crypto: unknown alg type: %d",
4558 driver_algs[i].type);
4563 dev_err(dev, "alg registration failed with error %d\n",
4572 for (j = 0; j < i; j++) {
4573 /* Skip any algorithm not registered */
4574 if (!driver_algs[j].registered)
4576 switch (driver_algs[j].type) {
4577 case CRYPTO_ALG_TYPE_SKCIPHER:
4578 crypto_unregister_skcipher(&driver_algs[j].alg.skcipher);
4579 driver_algs[j].registered = false;
4581 case CRYPTO_ALG_TYPE_AHASH:
4582 crypto_unregister_ahash(&driver_algs[j].alg.hash);
4583 driver_algs[j].registered = false;
4585 case CRYPTO_ALG_TYPE_AEAD:
4586 crypto_unregister_aead(&driver_algs[j].alg.aead);
4587 driver_algs[j].registered = false;
4594 /* ==================== Kernel Platform API ==================== */
4596 static struct spu_type_subtype spum_ns2_types = {
4597 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2
4600 static struct spu_type_subtype spum_nsp_types = {
4601 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP
4604 static struct spu_type_subtype spu2_types = {
4605 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1
4608 static struct spu_type_subtype spu2_v2_types = {
4609 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2
4612 static const struct of_device_id bcm_spu_dt_ids[] = {
4614 .compatible = "brcm,spum-crypto",
4615 .data = &spum_ns2_types,
4618 .compatible = "brcm,spum-nsp-crypto",
4619 .data = &spum_nsp_types,
4622 .compatible = "brcm,spu2-crypto",
4623 .data = &spu2_types,
4626 .compatible = "brcm,spu2-v2-crypto",
4627 .data = &spu2_v2_types,
4632 MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids);
4634 static int spu_dt_read(struct platform_device *pdev)
4636 struct device *dev = &pdev->dev;
4637 struct spu_hw *spu = &iproc_priv.spu;
4638 struct resource *spu_ctrl_regs;
4639 const struct spu_type_subtype *matched_spu_type;
4640 struct device_node *dn = pdev->dev.of_node;
4643 /* Count number of mailbox channels */
4644 spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells");
4646 matched_spu_type = of_device_get_match_data(dev);
4647 if (!matched_spu_type) {
4648 dev_err(dev, "Failed to match device\n");
4652 spu->spu_type = matched_spu_type->type;
4653 spu->spu_subtype = matched_spu_type->subtype;
4655 for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs =
4656 platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) {
4658 spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs);
4659 if (IS_ERR(spu->reg_vbase[i])) {
4660 err = PTR_ERR(spu->reg_vbase[i]);
4661 dev_err(dev, "Failed to map registers: %d\n",
4663 spu->reg_vbase[i] = NULL;
4668 dev_dbg(dev, "Device has %d SPUs", spu->num_spu);
4673 static int bcm_spu_probe(struct platform_device *pdev)
4675 struct device *dev = &pdev->dev;
4676 struct spu_hw *spu = &iproc_priv.spu;
4679 iproc_priv.pdev = pdev;
4680 platform_set_drvdata(iproc_priv.pdev,
4683 err = spu_dt_read(pdev);
4687 err = spu_mb_init(dev);
4691 if (spu->spu_type == SPU_TYPE_SPUM)
4692 iproc_priv.bcm_hdr_len = 8;
4693 else if (spu->spu_type == SPU_TYPE_SPU2)
4694 iproc_priv.bcm_hdr_len = 0;
4696 spu_functions_register(dev, spu->spu_type, spu->spu_subtype);
4698 spu_counters_init();
4700 spu_setup_debugfs();
4702 err = spu_algs_register(dev);
4711 spu_mb_release(pdev);
4712 dev_err(dev, "%s failed with error %d.\n", __func__, err);
4717 static int bcm_spu_remove(struct platform_device *pdev)
4720 struct device *dev = &pdev->dev;
4723 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4725 * Not all algorithms were registered, depending on whether
4726 * hardware is SPU or SPU2. So here we make sure to skip
4727 * those algorithms that were not previously registered.
4729 if (!driver_algs[i].registered)
4732 switch (driver_algs[i].type) {
4733 case CRYPTO_ALG_TYPE_SKCIPHER:
4734 crypto_unregister_skcipher(&driver_algs[i].alg.skcipher);
4735 dev_dbg(dev, " unregistered cipher %s\n",
4736 driver_algs[i].alg.skcipher.base.cra_driver_name);
4737 driver_algs[i].registered = false;
4739 case CRYPTO_ALG_TYPE_AHASH:
4740 crypto_unregister_ahash(&driver_algs[i].alg.hash);
4741 cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name;
4742 dev_dbg(dev, " unregistered hash %s\n", cdn);
4743 driver_algs[i].registered = false;
4745 case CRYPTO_ALG_TYPE_AEAD:
4746 crypto_unregister_aead(&driver_algs[i].alg.aead);
4747 dev_dbg(dev, " unregistered aead %s\n",
4748 driver_algs[i].alg.aead.base.cra_driver_name);
4749 driver_algs[i].registered = false;
4754 spu_mb_release(pdev);
4758 /* ===== Kernel Module API ===== */
4760 static struct platform_driver bcm_spu_pdriver = {
4762 .name = "brcm-spu-crypto",
4763 .of_match_table = of_match_ptr(bcm_spu_dt_ids),
4765 .probe = bcm_spu_probe,
4766 .remove = bcm_spu_remove,
4768 module_platform_driver(bcm_spu_pdriver);
4770 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
4771 MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver");
4772 MODULE_LICENSE("GPL v2");