1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
5 * Copyright (C) 2014-2017 Axis Communications AB
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/bitfield.h>
10 #include <linux/crypto.h>
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/fault-inject.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
25 #include <crypto/aes.h>
26 #include <crypto/gcm.h>
27 #include <crypto/internal/aead.h>
28 #include <crypto/internal/hash.h>
29 #include <crypto/internal/skcipher.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/sha1.h>
32 #include <crypto/sha2.h>
33 #include <crypto/xts.h>
35 /* Max length of a line in all cache levels for Artpec SoCs. */
36 #define ARTPEC_CACHE_LINE_MAX 32
38 #define PDMA_OUT_CFG 0x0000
39 #define PDMA_OUT_BUF_CFG 0x0004
40 #define PDMA_OUT_CMD 0x0008
41 #define PDMA_OUT_DESCRQ_PUSH 0x0010
42 #define PDMA_OUT_DESCRQ_STAT 0x0014
44 #define A6_PDMA_IN_CFG 0x0028
45 #define A6_PDMA_IN_BUF_CFG 0x002c
46 #define A6_PDMA_IN_CMD 0x0030
47 #define A6_PDMA_IN_STATQ_PUSH 0x0038
48 #define A6_PDMA_IN_DESCRQ_PUSH 0x0044
49 #define A6_PDMA_IN_DESCRQ_STAT 0x0048
50 #define A6_PDMA_INTR_MASK 0x0068
51 #define A6_PDMA_ACK_INTR 0x006c
52 #define A6_PDMA_MASKED_INTR 0x0074
54 #define A7_PDMA_IN_CFG 0x002c
55 #define A7_PDMA_IN_BUF_CFG 0x0030
56 #define A7_PDMA_IN_CMD 0x0034
57 #define A7_PDMA_IN_STATQ_PUSH 0x003c
58 #define A7_PDMA_IN_DESCRQ_PUSH 0x0048
59 #define A7_PDMA_IN_DESCRQ_STAT 0x004C
60 #define A7_PDMA_INTR_MASK 0x006c
61 #define A7_PDMA_ACK_INTR 0x0070
62 #define A7_PDMA_MASKED_INTR 0x0078
64 #define PDMA_OUT_CFG_EN BIT(0)
66 #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
67 #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
69 #define PDMA_OUT_CMD_START BIT(0)
70 #define A6_PDMA_OUT_CMD_STOP BIT(3)
71 #define A7_PDMA_OUT_CMD_STOP BIT(2)
73 #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
74 #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
76 #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
77 #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
79 #define PDMA_IN_CFG_EN BIT(0)
81 #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
82 #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
83 #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
85 #define PDMA_IN_CMD_START BIT(0)
86 #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
87 #define A6_PDMA_IN_CMD_STOP BIT(3)
88 #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
89 #define A7_PDMA_IN_CMD_STOP BIT(2)
91 #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
92 #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
94 #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
95 #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
97 #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
98 #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
100 #define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
101 #define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
102 #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
104 #define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
105 #define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
106 #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
108 #define A6_CRY_MD_OPER GENMASK(19, 16)
110 #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
111 #define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
113 #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
114 #define A6_CRY_MD_CIPHER_DECR BIT(22)
115 #define A6_CRY_MD_CIPHER_TWEAK BIT(23)
116 #define A6_CRY_MD_CIPHER_DSEQ BIT(24)
118 #define A7_CRY_MD_OPER GENMASK(11, 8)
120 #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
121 #define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
123 #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
124 #define A7_CRY_MD_CIPHER_DECR BIT(14)
125 #define A7_CRY_MD_CIPHER_TWEAK BIT(15)
126 #define A7_CRY_MD_CIPHER_DSEQ BIT(16)
128 /* DMA metadata constants */
129 #define regk_crypto_aes_cbc 0x00000002
130 #define regk_crypto_aes_ctr 0x00000003
131 #define regk_crypto_aes_ecb 0x00000001
132 #define regk_crypto_aes_gcm 0x00000004
133 #define regk_crypto_aes_xts 0x00000005
134 #define regk_crypto_cache 0x00000002
135 #define a6_regk_crypto_dlkey 0x0000000a
136 #define a7_regk_crypto_dlkey 0x0000000e
137 #define regk_crypto_ext 0x00000001
138 #define regk_crypto_hmac_sha1 0x00000007
139 #define regk_crypto_hmac_sha256 0x00000009
140 #define regk_crypto_init 0x00000000
141 #define regk_crypto_key_128 0x00000000
142 #define regk_crypto_key_192 0x00000001
143 #define regk_crypto_key_256 0x00000002
144 #define regk_crypto_null 0x00000000
145 #define regk_crypto_sha1 0x00000006
146 #define regk_crypto_sha256 0x00000008
148 /* DMA descriptor structures */
149 struct pdma_descr_ctrl {
150 unsigned char short_descr : 1;
151 unsigned char pad1 : 1;
152 unsigned char eop : 1;
153 unsigned char intr : 1;
154 unsigned char short_len : 3;
155 unsigned char pad2 : 1;
158 struct pdma_data_descr {
159 unsigned int len : 24;
160 unsigned int buf : 32;
163 struct pdma_short_descr {
164 unsigned char data[7];
168 struct pdma_descr_ctrl ctrl;
170 struct pdma_data_descr data;
171 struct pdma_short_descr shrt;
175 struct pdma_stat_descr {
176 unsigned char pad1 : 1;
177 unsigned char pad2 : 1;
178 unsigned char eop : 1;
179 unsigned char pad3 : 5;
180 unsigned int len : 24;
183 /* Each descriptor array can hold max 64 entries */
184 #define PDMA_DESCR_COUNT 64
186 #define MODULE_NAME "Artpec-6 CA"
188 /* Hash modes (including HMAC variants) */
189 #define ARTPEC6_CRYPTO_HASH_SHA1 1
190 #define ARTPEC6_CRYPTO_HASH_SHA256 2
193 #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
194 #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
195 #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
196 #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
198 /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
199 * It operates on a descriptor array with up to 64 descriptor entries.
200 * The arrays must be 64 byte aligned in memory.
202 * The ciphering unit has no registers and is completely controlled by
203 * a 4-byte metadata that is inserted at the beginning of each dma packet.
205 * A dma packet is a sequence of descriptors terminated by setting the .eop
206 * field in the final descriptor of the packet.
208 * Multiple packets are used for providing context data, key data and
209 * the plain/ciphertext.
211 * PDMA Descriptors (Array)
212 * +------+------+------+~~+-------+------+----
213 * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
214 * +--+---+--+---+----+-+~~+-------+----+-+----
217 * __|__ +-------++-------++-------+ +----+
218 * | MD | |Payload||Payload||Payload| | MD |
219 * +-----+ +-------++-------++-------+ +----+
222 struct artpec6_crypto_bounce_buffer {
223 struct list_head list;
225 struct scatterlist *sg;
227 /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
228 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
233 struct artpec6_crypto_dma_map {
236 enum dma_data_direction dir;
239 struct artpec6_crypto_dma_descriptors {
240 struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
241 struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
242 u32 stat[PDMA_DESCR_COUNT] __aligned(64);
243 struct list_head bounce_buffers;
244 /* Enough maps for all out/in buffers, and all three descr. arrays */
245 struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
246 dma_addr_t out_dma_addr;
247 dma_addr_t in_dma_addr;
248 dma_addr_t stat_dma_addr;
254 enum artpec6_crypto_variant {
259 struct artpec6_crypto {
261 spinlock_t queue_lock;
262 struct list_head queue; /* waiting for pdma fifo space */
263 struct list_head pending; /* submitted to pdma fifo */
264 struct tasklet_struct task;
265 struct kmem_cache *dma_cache;
267 struct timer_list timer;
268 enum artpec6_crypto_variant variant;
269 void *pad_buffer; /* cache-aligned block padding buffer */
273 enum artpec6_crypto_hash_flags {
274 HASH_FLAG_INIT_CTX = 2,
275 HASH_FLAG_UPDATE = 4,
276 HASH_FLAG_FINALIZE = 8,
278 HASH_FLAG_UPDATE_KEY = 32,
281 struct artpec6_crypto_req_common {
282 struct list_head list;
283 struct list_head complete_in_progress;
284 struct artpec6_crypto_dma_descriptors *dma;
285 struct crypto_async_request *req;
286 void (*complete)(struct crypto_async_request *req);
290 struct artpec6_hash_request_context {
291 char partial_buffer[SHA256_BLOCK_SIZE];
292 char partial_buffer_out[SHA256_BLOCK_SIZE];
293 char key_buffer[SHA256_BLOCK_SIZE];
294 char pad_buffer[SHA256_BLOCK_SIZE + 32];
295 unsigned char digeststate[SHA256_DIGEST_SIZE];
296 size_t partial_bytes;
300 enum artpec6_crypto_hash_flags hash_flags;
301 struct artpec6_crypto_req_common common;
304 struct artpec6_hash_export_state {
305 char partial_buffer[SHA256_BLOCK_SIZE];
306 unsigned char digeststate[SHA256_DIGEST_SIZE];
307 size_t partial_bytes;
310 unsigned int hash_flags;
313 struct artpec6_hashalg_context {
314 char hmac_key[SHA256_BLOCK_SIZE];
315 size_t hmac_key_length;
316 struct crypto_shash *child_hash;
319 struct artpec6_crypto_request_context {
322 struct artpec6_crypto_req_common common;
325 struct artpec6_cryptotfm_context {
326 unsigned char aes_key[2*AES_MAX_KEY_SIZE];
330 struct crypto_sync_skcipher *fallback;
333 struct artpec6_crypto_aead_hw_ctx {
334 __be64 aad_length_bits;
335 __be64 text_length_bits;
336 __u8 J0[AES_BLOCK_SIZE];
339 struct artpec6_crypto_aead_req_ctx {
340 struct artpec6_crypto_aead_hw_ctx hw_ctx;
343 struct artpec6_crypto_req_common common;
344 __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
347 /* The crypto framework makes it hard to avoid this global. */
348 static struct device *artpec6_crypto_dev;
350 #ifdef CONFIG_FAULT_INJECTION
351 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
352 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
356 ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
357 ARTPEC6_CRYPTO_PREPARE_HASH_START,
360 static int artpec6_crypto_prepare_aead(struct aead_request *areq);
361 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
362 static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
365 artpec6_crypto_complete_crypto(struct crypto_async_request *req);
367 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
369 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
371 artpec6_crypto_complete_aead(struct crypto_async_request *req);
373 artpec6_crypto_complete_hash(struct crypto_async_request *req);
376 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
379 artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
381 struct artpec6_crypto_walk {
382 struct scatterlist *sg;
386 static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
387 struct scatterlist *sg)
393 static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
396 while (nbytes && awalk->sg) {
399 WARN_ON(awalk->offset > awalk->sg->length);
401 piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
403 awalk->offset += piece;
404 if (awalk->offset == awalk->sg->length) {
405 awalk->sg = sg_next(awalk->sg);
415 artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
417 WARN_ON(awalk->sg->length == awalk->offset);
419 return awalk->sg->length - awalk->offset;
423 artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
425 return sg_phys(awalk->sg) + awalk->offset;
429 artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
431 struct artpec6_crypto_dma_descriptors *dma = common->dma;
432 struct artpec6_crypto_bounce_buffer *b;
433 struct artpec6_crypto_bounce_buffer *next;
435 list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
436 pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
437 b, b->length, b->offset, b->buf);
438 sg_pcopy_from_buffer(b->sg,
449 static inline bool artpec6_crypto_busy(void)
451 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
452 int fifo_count = ac->pending_count;
454 return fifo_count > 6;
457 static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
459 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
462 spin_lock_bh(&ac->queue_lock);
464 if (!artpec6_crypto_busy()) {
465 list_add_tail(&req->list, &ac->pending);
466 artpec6_crypto_start_dma(req);
468 } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
469 list_add_tail(&req->list, &ac->queue);
471 artpec6_crypto_common_destroy(req);
474 spin_unlock_bh(&ac->queue_lock);
479 static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
481 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
482 enum artpec6_crypto_variant variant = ac->variant;
483 void __iomem *base = ac->base;
484 struct artpec6_crypto_dma_descriptors *dma = common->dma;
485 u32 ind, statd, outd;
487 /* Make descriptor content visible to the DMA before starting it. */
490 ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
491 FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
493 statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
494 FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
496 outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
497 FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
499 if (variant == ARTPEC6_CRYPTO) {
500 writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
501 writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
502 writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
504 writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
505 writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
506 writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
509 writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
510 writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
516 artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
518 struct artpec6_crypto_dma_descriptors *dma = common->dma;
523 INIT_LIST_HEAD(&dma->bounce_buffers);
526 static bool fault_inject_dma_descr(void)
528 #ifdef CONFIG_FAULT_INJECTION
529 return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
535 /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
538 * @addr: The physical address of the data buffer
539 * @len: The length of the data buffer
540 * @eop: True if this is the last buffer in the packet
542 * @return 0 on success or -ENOSPC if there are no more descriptors available
545 artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
546 dma_addr_t addr, size_t len, bool eop)
548 struct artpec6_crypto_dma_descriptors *dma = common->dma;
549 struct pdma_descr *d;
551 if (dma->out_cnt >= PDMA_DESCR_COUNT ||
552 fault_inject_dma_descr()) {
553 pr_err("No free OUT DMA descriptors available!\n");
557 d = &dma->out[dma->out_cnt++];
558 memset(d, 0, sizeof(*d));
560 d->ctrl.short_descr = 0;
567 /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
569 * @dst: The virtual address of the data
570 * @len: The length of the data, must be between 1 to 7 bytes
571 * @eop: True if this is the last buffer in the packet
573 * @return 0 on success
574 * -ENOSPC if no more descriptors are available
575 * -EINVAL if the data length exceeds 7 bytes
578 artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
579 void *dst, unsigned int len, bool eop)
581 struct artpec6_crypto_dma_descriptors *dma = common->dma;
582 struct pdma_descr *d;
584 if (dma->out_cnt >= PDMA_DESCR_COUNT ||
585 fault_inject_dma_descr()) {
586 pr_err("No free OUT DMA descriptors available!\n");
588 } else if (len > 7 || len < 1) {
591 d = &dma->out[dma->out_cnt++];
592 memset(d, 0, sizeof(*d));
594 d->ctrl.short_descr = 1;
595 d->ctrl.short_len = len;
597 memcpy(d->shrt.data, dst, len);
601 static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
602 struct page *page, size_t offset,
604 enum dma_data_direction dir,
605 dma_addr_t *dma_addr_out)
607 struct artpec6_crypto_dma_descriptors *dma = common->dma;
608 struct device *dev = artpec6_crypto_dev;
609 struct artpec6_crypto_dma_map *map;
614 if (dma->map_count >= ARRAY_SIZE(dma->maps))
617 dma_addr = dma_map_page(dev, page, offset, size, dir);
618 if (dma_mapping_error(dev, dma_addr))
621 map = &dma->maps[dma->map_count++];
623 map->dma_addr = dma_addr;
626 *dma_addr_out = dma_addr;
632 artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
633 void *ptr, size_t size,
634 enum dma_data_direction dir,
635 dma_addr_t *dma_addr_out)
637 struct page *page = virt_to_page(ptr);
638 size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
640 return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
645 artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
647 struct artpec6_crypto_dma_descriptors *dma = common->dma;
650 ret = artpec6_crypto_dma_map_single(common, dma->in,
651 sizeof(dma->in[0]) * dma->in_cnt,
652 DMA_TO_DEVICE, &dma->in_dma_addr);
656 ret = artpec6_crypto_dma_map_single(common, dma->out,
657 sizeof(dma->out[0]) * dma->out_cnt,
658 DMA_TO_DEVICE, &dma->out_dma_addr);
662 /* We only read one stat descriptor */
663 dma->stat[dma->in_cnt - 1] = 0;
666 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
669 return artpec6_crypto_dma_map_single(common,
671 sizeof(dma->stat[0]) * dma->in_cnt,
673 &dma->stat_dma_addr);
677 artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
679 struct artpec6_crypto_dma_descriptors *dma = common->dma;
680 struct device *dev = artpec6_crypto_dev;
683 for (i = 0; i < dma->map_count; i++) {
684 struct artpec6_crypto_dma_map *map = &dma->maps[i];
686 dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
692 /** artpec6_crypto_setup_out_descr - Setup an out descriptor
694 * @dst: The virtual address of the data
695 * @len: The length of the data
696 * @eop: True if this is the last buffer in the packet
697 * @use_short: If this is true and the data length is 7 bytes or less then
698 * a short descriptor will be used
700 * @return 0 on success
701 * Any errors from artpec6_crypto_setup_out_descr_short() or
702 * setup_out_descr_phys()
705 artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
706 void *dst, unsigned int len, bool eop,
709 if (use_short && len < 7) {
710 return artpec6_crypto_setup_out_descr_short(common, dst, len,
716 ret = artpec6_crypto_dma_map_single(common, dst, len,
722 return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
727 /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
730 * @addr: The physical address of the data buffer
731 * @len: The length of the data buffer
732 * @intr: True if an interrupt should be fired after HW processing of this
737 artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
738 dma_addr_t addr, unsigned int len, bool intr)
740 struct artpec6_crypto_dma_descriptors *dma = common->dma;
741 struct pdma_descr *d;
743 if (dma->in_cnt >= PDMA_DESCR_COUNT ||
744 fault_inject_dma_descr()) {
745 pr_err("No free IN DMA descriptors available!\n");
748 d = &dma->in[dma->in_cnt++];
749 memset(d, 0, sizeof(*d));
757 /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
759 * @buffer: The virtual address to of the data buffer
760 * @len: The length of the data buffer
761 * @last: If this is the last data buffer in the request (i.e. an interrupt
764 * Short descriptors are not used for the in channel
767 artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
768 void *buffer, unsigned int len, bool last)
773 ret = artpec6_crypto_dma_map_single(common, buffer, len,
774 DMA_FROM_DEVICE, &dma_addr);
778 return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
781 static struct artpec6_crypto_bounce_buffer *
782 artpec6_crypto_alloc_bounce(gfp_t flags)
785 size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
786 2 * ARTPEC_CACHE_LINE_MAX;
787 struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
793 bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
797 static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
798 struct artpec6_crypto_walk *walk, size_t size)
800 struct artpec6_crypto_bounce_buffer *bbuf;
803 bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
809 bbuf->offset = walk->offset;
811 ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
817 pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
818 list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
823 artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
824 struct artpec6_crypto_walk *walk,
831 while (walk->sg && count) {
832 chunk = min(count, artpec6_crypto_walk_chunklen(walk));
833 addr = artpec6_crypto_walk_chunk_phys(walk);
835 /* When destination buffers are not aligned to the cache line
836 * size we need bounce buffers. The DMA-API requires that the
837 * entire line is owned by the DMA buffer and this holds also
838 * for the case when coherent DMA is used.
840 if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
841 chunk = min_t(dma_addr_t, chunk,
842 ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
845 pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
846 ret = setup_bounce_buffer_in(common, walk, chunk);
847 } else if (chunk < ARTPEC_CACHE_LINE_MAX) {
848 pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
849 ret = setup_bounce_buffer_in(common, walk, chunk);
853 chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
855 pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
857 ret = artpec6_crypto_dma_map_page(common,
867 ret = artpec6_crypto_setup_in_descr_phys(common,
875 count = count - chunk;
876 artpec6_crypto_walk_advance(walk, chunk);
880 pr_err("EOL unexpected %zu bytes left\n", count);
882 return count ? -EINVAL : 0;
886 artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
887 struct artpec6_crypto_walk *walk,
894 while (walk->sg && count) {
895 chunk = min(count, artpec6_crypto_walk_chunklen(walk));
896 addr = artpec6_crypto_walk_chunk_phys(walk);
898 pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
903 chunk = min_t(size_t, chunk, (4-(addr&3)));
905 sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
908 ret = artpec6_crypto_setup_out_descr_short(common, buf,
914 ret = artpec6_crypto_dma_map_page(common,
924 ret = artpec6_crypto_setup_out_descr_phys(common,
932 count = count - chunk;
933 artpec6_crypto_walk_advance(walk, chunk);
937 pr_err("EOL unexpected %zu bytes left\n", count);
939 return count ? -EINVAL : 0;
943 /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
945 * If the out descriptor list is non-empty, then the eop flag on the
946 * last used out descriptor will be set.
948 * @return 0 on success
949 * -EINVAL if the out descriptor is empty or has overflown
952 artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
954 struct artpec6_crypto_dma_descriptors *dma = common->dma;
955 struct pdma_descr *d;
957 if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
958 pr_err("%s: OUT descriptor list is %s\n",
959 MODULE_NAME, dma->out_cnt ? "empty" : "full");
964 d = &dma->out[dma->out_cnt-1];
970 /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
973 * See artpec6_crypto_terminate_out_descrs() for return values
976 artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
978 struct artpec6_crypto_dma_descriptors *dma = common->dma;
979 struct pdma_descr *d;
981 if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
982 pr_err("%s: IN descriptor list is %s\n",
983 MODULE_NAME, dma->in_cnt ? "empty" : "full");
987 d = &dma->in[dma->in_cnt-1];
992 /** create_hash_pad - Create a Secure Hash conformant pad
994 * @dst: The destination buffer to write the pad. Must be at least 64 bytes
995 * @dgstlen: The total length of the hash digest in bytes
996 * @bitcount: The total length of the digest in bits
998 * @return The total number of padding bytes written to @dst
1001 create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
1003 unsigned int mod, target, diff, pad_bytes, size_bytes;
1004 __be64 bits = __cpu_to_be64(bitcount);
1007 case regk_crypto_sha1:
1008 case regk_crypto_sha256:
1009 case regk_crypto_hmac_sha1:
1010 case regk_crypto_hmac_sha256:
1023 diff = dgstlen & (mod - 1);
1024 pad_bytes = diff > target ? target + mod - diff : target - diff;
1026 memset(dst + 1, 0, pad_bytes);
1029 if (size_bytes == 16) {
1030 memset(dst + 1 + pad_bytes, 0, 8);
1031 memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
1033 memcpy(dst + 1 + pad_bytes, &bits, 8);
1036 return pad_bytes + size_bytes + 1;
1039 static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
1040 struct crypto_async_request *parent,
1041 void (*complete)(struct crypto_async_request *req),
1042 struct scatterlist *dstsg, unsigned int nbytes)
1045 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1047 flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1048 GFP_KERNEL : GFP_ATOMIC;
1050 common->gfp_flags = flags;
1051 common->dma = kmem_cache_alloc(ac->dma_cache, flags);
1055 common->req = parent;
1056 common->complete = complete;
1061 artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
1063 struct artpec6_crypto_bounce_buffer *b;
1064 struct artpec6_crypto_bounce_buffer *next;
1066 list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
1072 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
1074 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1076 artpec6_crypto_dma_unmap_all(common);
1077 artpec6_crypto_bounce_destroy(common->dma);
1078 kmem_cache_free(ac->dma_cache, common->dma);
1084 * Ciphering functions.
1086 static int artpec6_crypto_encrypt(struct skcipher_request *req)
1088 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1089 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1090 struct artpec6_crypto_request_context *req_ctx = NULL;
1091 void (*complete)(struct crypto_async_request *req);
1094 req_ctx = skcipher_request_ctx(req);
1096 switch (ctx->crypto_type) {
1097 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1098 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1099 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1100 req_ctx->decrypt = 0;
1106 switch (ctx->crypto_type) {
1107 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1108 complete = artpec6_crypto_complete_cbc_encrypt;
1111 complete = artpec6_crypto_complete_crypto;
1115 ret = artpec6_crypto_common_init(&req_ctx->common,
1118 req->dst, req->cryptlen);
1122 ret = artpec6_crypto_prepare_crypto(req);
1124 artpec6_crypto_common_destroy(&req_ctx->common);
1128 return artpec6_crypto_submit(&req_ctx->common);
1131 static int artpec6_crypto_decrypt(struct skcipher_request *req)
1134 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1135 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1136 struct artpec6_crypto_request_context *req_ctx = NULL;
1137 void (*complete)(struct crypto_async_request *req);
1139 req_ctx = skcipher_request_ctx(req);
1141 switch (ctx->crypto_type) {
1142 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1143 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1144 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1145 req_ctx->decrypt = 1;
1152 switch (ctx->crypto_type) {
1153 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1154 complete = artpec6_crypto_complete_cbc_decrypt;
1157 complete = artpec6_crypto_complete_crypto;
1161 ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1163 req->dst, req->cryptlen);
1167 ret = artpec6_crypto_prepare_crypto(req);
1169 artpec6_crypto_common_destroy(&req_ctx->common);
1173 return artpec6_crypto_submit(&req_ctx->common);
1177 artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
1179 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1180 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1181 size_t iv_len = crypto_skcipher_ivsize(cipher);
1182 unsigned int counter = be32_to_cpup((__be32 *)
1183 (req->iv + iv_len - 4));
1184 unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
1188 * The hardware uses only the last 32-bits as the counter while the
1189 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
1190 * the whole IV is a counter. So fallback if the counter is going to
1193 if (counter + nblks < counter) {
1196 pr_debug("counter %x will overflow (nblks %u), falling back\n",
1197 counter, counter + nblks);
1199 ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
1205 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
1207 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
1208 skcipher_request_set_callback(subreq, req->base.flags,
1210 skcipher_request_set_crypt(subreq, req->src, req->dst,
1211 req->cryptlen, req->iv);
1212 ret = encrypt ? crypto_skcipher_encrypt(subreq)
1213 : crypto_skcipher_decrypt(subreq);
1214 skcipher_request_zero(subreq);
1219 return encrypt ? artpec6_crypto_encrypt(req)
1220 : artpec6_crypto_decrypt(req);
1223 static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
1225 return artpec6_crypto_ctr_crypt(req, true);
1228 static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
1230 return artpec6_crypto_ctr_crypt(req, false);
1236 static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
1238 struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
1240 memset(tfm_ctx, 0, sizeof(*tfm_ctx));
1242 crypto_aead_set_reqsize(tfm,
1243 sizeof(struct artpec6_crypto_aead_req_ctx));
1248 static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
1251 struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
1253 if (len != 16 && len != 24 && len != 32)
1256 ctx->key_length = len;
1258 memcpy(ctx->aes_key, key, len);
1262 static int artpec6_crypto_aead_encrypt(struct aead_request *req)
1265 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1267 req_ctx->decrypt = false;
1268 ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1269 artpec6_crypto_complete_aead,
1274 ret = artpec6_crypto_prepare_aead(req);
1276 artpec6_crypto_common_destroy(&req_ctx->common);
1280 return artpec6_crypto_submit(&req_ctx->common);
1283 static int artpec6_crypto_aead_decrypt(struct aead_request *req)
1286 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1288 req_ctx->decrypt = true;
1289 if (req->cryptlen < AES_BLOCK_SIZE)
1292 ret = artpec6_crypto_common_init(&req_ctx->common,
1294 artpec6_crypto_complete_aead,
1299 ret = artpec6_crypto_prepare_aead(req);
1301 artpec6_crypto_common_destroy(&req_ctx->common);
1305 return artpec6_crypto_submit(&req_ctx->common);
1308 static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
1310 struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1311 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
1312 size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
1313 size_t contextsize = digestsize;
1314 size_t blocksize = crypto_tfm_alg_blocksize(
1315 crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
1316 struct artpec6_crypto_req_common *common = &req_ctx->common;
1317 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1318 enum artpec6_crypto_variant variant = ac->variant;
1320 bool ext_ctx = false;
1321 bool run_hw = false;
1324 artpec6_crypto_init_dma_operation(common);
1326 /* Upload HMAC key, must be first the first packet */
1327 if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
1328 if (variant == ARTPEC6_CRYPTO) {
1329 req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1330 a6_regk_crypto_dlkey);
1332 req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1333 a7_regk_crypto_dlkey);
1336 /* Copy and pad up the key */
1337 memcpy(req_ctx->key_buffer, ctx->hmac_key,
1338 ctx->hmac_key_length);
1339 memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
1340 blocksize - ctx->hmac_key_length);
1342 error = artpec6_crypto_setup_out_descr(common,
1343 (void *)&req_ctx->key_md,
1344 sizeof(req_ctx->key_md), false, false);
1348 error = artpec6_crypto_setup_out_descr(common,
1349 req_ctx->key_buffer, blocksize,
1355 if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
1356 /* Restore context */
1357 sel_ctx = regk_crypto_ext;
1360 sel_ctx = regk_crypto_init;
1363 if (variant == ARTPEC6_CRYPTO) {
1364 req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
1365 req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
1367 /* If this is the final round, set the final flag */
1368 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1369 req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
1371 req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
1372 req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
1374 /* If this is the final round, set the final flag */
1375 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1376 req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
1379 /* Setup up metadata descriptors */
1380 error = artpec6_crypto_setup_out_descr(common,
1381 (void *)&req_ctx->hash_md,
1382 sizeof(req_ctx->hash_md), false, false);
1386 error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1391 error = artpec6_crypto_setup_out_descr(common,
1392 req_ctx->digeststate,
1393 contextsize, false, false);
1399 if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
1400 size_t done_bytes = 0;
1401 size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
1402 size_t ready_bytes = round_down(total_bytes, blocksize);
1403 struct artpec6_crypto_walk walk;
1405 run_hw = ready_bytes > 0;
1406 if (req_ctx->partial_bytes && ready_bytes) {
1407 /* We have a partial buffer and will at least some bytes
1408 * to the HW. Empty this partial buffer before tackling
1411 memcpy(req_ctx->partial_buffer_out,
1412 req_ctx->partial_buffer,
1413 req_ctx->partial_bytes);
1415 error = artpec6_crypto_setup_out_descr(common,
1416 req_ctx->partial_buffer_out,
1417 req_ctx->partial_bytes,
1422 /* Reset partial buffer */
1423 done_bytes += req_ctx->partial_bytes;
1424 req_ctx->partial_bytes = 0;
1427 artpec6_crypto_walk_init(&walk, areq->src);
1429 error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
1436 size_t sg_skip = ready_bytes - done_bytes;
1437 size_t sg_rem = areq->nbytes - sg_skip;
1439 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
1440 req_ctx->partial_buffer +
1441 req_ctx->partial_bytes,
1444 req_ctx->partial_bytes += sg_rem;
1447 req_ctx->digcnt += ready_bytes;
1448 req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
1452 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
1453 size_t hash_pad_len;
1457 if (variant == ARTPEC6_CRYPTO)
1458 oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
1460 oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
1462 /* Write out the partial buffer if present */
1463 if (req_ctx->partial_bytes) {
1464 memcpy(req_ctx->partial_buffer_out,
1465 req_ctx->partial_buffer,
1466 req_ctx->partial_bytes);
1467 error = artpec6_crypto_setup_out_descr(common,
1468 req_ctx->partial_buffer_out,
1469 req_ctx->partial_bytes,
1474 req_ctx->digcnt += req_ctx->partial_bytes;
1475 req_ctx->partial_bytes = 0;
1478 if (req_ctx->hash_flags & HASH_FLAG_HMAC)
1479 digest_bits = 8 * (req_ctx->digcnt + blocksize);
1481 digest_bits = 8 * req_ctx->digcnt;
1483 /* Add the hash pad */
1484 hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
1485 req_ctx->digcnt, digest_bits);
1486 error = artpec6_crypto_setup_out_descr(common,
1487 req_ctx->pad_buffer,
1488 hash_pad_len, false,
1490 req_ctx->digcnt = 0;
1495 /* Descriptor for the final result */
1496 error = artpec6_crypto_setup_in_descr(common, areq->result,
1502 } else { /* This is not the final operation for this request */
1504 return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
1506 /* Save the result to the context */
1507 error = artpec6_crypto_setup_in_descr(common,
1508 req_ctx->digeststate,
1509 contextsize, false);
1515 req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
1516 HASH_FLAG_FINALIZE);
1518 error = artpec6_crypto_terminate_in_descrs(common);
1522 error = artpec6_crypto_terminate_out_descrs(common);
1526 error = artpec6_crypto_dma_map_descs(common);
1530 return ARTPEC6_CRYPTO_PREPARE_HASH_START;
1534 static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
1536 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1538 tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1539 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
1544 static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
1546 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1549 crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
1550 0, CRYPTO_ALG_NEED_FALLBACK);
1551 if (IS_ERR(ctx->fallback))
1552 return PTR_ERR(ctx->fallback);
1554 tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1555 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
1560 static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
1562 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1564 tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1565 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
1570 static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
1572 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1574 tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1575 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
1580 static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
1582 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1584 memset(ctx, 0, sizeof(*ctx));
1587 static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
1589 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1591 crypto_free_sync_skcipher(ctx->fallback);
1592 artpec6_crypto_aes_exit(tfm);
1596 artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
1597 unsigned int keylen)
1599 struct artpec6_cryptotfm_context *ctx =
1600 crypto_skcipher_ctx(cipher);
1611 memcpy(ctx->aes_key, key, keylen);
1612 ctx->key_length = keylen;
1617 artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
1618 unsigned int keylen)
1620 struct artpec6_cryptotfm_context *ctx =
1621 crypto_skcipher_ctx(cipher);
1624 ret = xts_verify_key(cipher, key, keylen);
1637 memcpy(ctx->aes_key, key, keylen);
1638 ctx->key_length = keylen;
1642 /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
1644 * @req: The asynch request to process
1646 * @return 0 if the dma job was successfully prepared
1649 * This function sets up the PDMA descriptors for a block cipher request.
1651 * The required padding is added for AES-CTR using a statically defined
1654 * The PDMA descriptor list will be as follows:
1656 * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
1657 * IN: <CIPHER_MD><data_0>...[data_n]<intr>
1660 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
1663 struct artpec6_crypto_walk walk;
1664 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1665 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1666 struct artpec6_crypto_request_context *req_ctx = NULL;
1667 size_t iv_len = crypto_skcipher_ivsize(cipher);
1668 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1669 enum artpec6_crypto_variant variant = ac->variant;
1670 struct artpec6_crypto_req_common *common;
1671 bool cipher_decr = false;
1673 u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
1676 req_ctx = skcipher_request_ctx(areq);
1677 common = &req_ctx->common;
1679 artpec6_crypto_init_dma_operation(common);
1681 if (variant == ARTPEC6_CRYPTO)
1682 ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
1684 ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
1686 ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1687 sizeof(ctx->key_md), false, false);
1691 ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1692 ctx->key_length, true, false);
1696 req_ctx->cipher_md = 0;
1698 if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
1699 cipher_klen = ctx->key_length/2;
1701 cipher_klen = ctx->key_length;
1704 switch (cipher_klen) {
1706 cipher_len = regk_crypto_key_128;
1709 cipher_len = regk_crypto_key_192;
1712 cipher_len = regk_crypto_key_256;
1715 pr_err("%s: Invalid key length %zu!\n",
1716 MODULE_NAME, ctx->key_length);
1720 switch (ctx->crypto_type) {
1721 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1722 oper = regk_crypto_aes_ecb;
1723 cipher_decr = req_ctx->decrypt;
1726 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1727 oper = regk_crypto_aes_cbc;
1728 cipher_decr = req_ctx->decrypt;
1731 case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
1732 oper = regk_crypto_aes_ctr;
1733 cipher_decr = false;
1736 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1737 oper = regk_crypto_aes_xts;
1738 cipher_decr = req_ctx->decrypt;
1740 if (variant == ARTPEC6_CRYPTO)
1741 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
1743 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
1747 pr_err("%s: Invalid cipher mode %d!\n",
1748 MODULE_NAME, ctx->crypto_type);
1752 if (variant == ARTPEC6_CRYPTO) {
1753 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
1754 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1757 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1759 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
1760 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1763 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1766 ret = artpec6_crypto_setup_out_descr(common,
1767 &req_ctx->cipher_md,
1768 sizeof(req_ctx->cipher_md),
1773 ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1778 ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
1784 artpec6_crypto_walk_init(&walk, areq->src);
1785 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
1790 artpec6_crypto_walk_init(&walk, areq->dst);
1791 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
1795 /* CTR-mode padding required by the HW. */
1796 if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
1797 ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
1798 size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
1802 ret = artpec6_crypto_setup_out_descr(common,
1808 ret = artpec6_crypto_setup_in_descr(common,
1809 ac->pad_buffer, pad,
1816 ret = artpec6_crypto_terminate_out_descrs(common);
1820 ret = artpec6_crypto_terminate_in_descrs(common);
1824 return artpec6_crypto_dma_map_descs(common);
1827 static int artpec6_crypto_prepare_aead(struct aead_request *areq)
1831 size_t input_length;
1832 struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1833 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
1834 struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
1835 struct artpec6_crypto_req_common *common = &req_ctx->common;
1836 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1837 enum artpec6_crypto_variant variant = ac->variant;
1840 artpec6_crypto_init_dma_operation(common);
1843 if (variant == ARTPEC6_CRYPTO) {
1844 ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1845 a6_regk_crypto_dlkey);
1847 ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1848 a7_regk_crypto_dlkey);
1850 ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1851 sizeof(ctx->key_md), false, false);
1855 ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1856 ctx->key_length, true, false);
1860 req_ctx->cipher_md = 0;
1862 switch (ctx->key_length) {
1864 md_cipher_len = regk_crypto_key_128;
1867 md_cipher_len = regk_crypto_key_192;
1870 md_cipher_len = regk_crypto_key_256;
1876 if (variant == ARTPEC6_CRYPTO) {
1877 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
1878 regk_crypto_aes_gcm);
1879 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1881 if (req_ctx->decrypt)
1882 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1884 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
1885 regk_crypto_aes_gcm);
1886 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1888 if (req_ctx->decrypt)
1889 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1892 ret = artpec6_crypto_setup_out_descr(common,
1893 (void *) &req_ctx->cipher_md,
1894 sizeof(req_ctx->cipher_md), false,
1899 ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1903 /* For the decryption, cryptlen includes the tag. */
1904 input_length = areq->cryptlen;
1905 if (req_ctx->decrypt)
1906 input_length -= crypto_aead_authsize(cipher);
1908 /* Prepare the context buffer */
1909 req_ctx->hw_ctx.aad_length_bits =
1910 __cpu_to_be64(8*areq->assoclen);
1912 req_ctx->hw_ctx.text_length_bits =
1913 __cpu_to_be64(8*input_length);
1915 memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
1916 // The HW omits the initial increment of the counter field.
1917 memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
1919 ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
1920 sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
1925 struct artpec6_crypto_walk walk;
1927 artpec6_crypto_walk_init(&walk, areq->src);
1929 /* Associated data */
1930 count = areq->assoclen;
1931 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1935 if (!IS_ALIGNED(areq->assoclen, 16)) {
1936 size_t assoc_pad = 16 - (areq->assoclen % 16);
1937 /* The HW mandates zero padding here */
1938 ret = artpec6_crypto_setup_out_descr(common,
1946 /* Data to crypto */
1947 count = input_length;
1948 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1952 if (!IS_ALIGNED(input_length, 16)) {
1953 size_t crypto_pad = 16 - (input_length % 16);
1954 /* The HW mandates zero padding here */
1955 ret = artpec6_crypto_setup_out_descr(common,
1965 /* Data from crypto */
1967 struct artpec6_crypto_walk walk;
1968 size_t output_len = areq->cryptlen;
1970 if (req_ctx->decrypt)
1971 output_len -= crypto_aead_authsize(cipher);
1973 artpec6_crypto_walk_init(&walk, areq->dst);
1975 /* skip associated data in the output */
1976 count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
1981 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
1985 /* Put padding between the cryptotext and the auth tag */
1986 if (!IS_ALIGNED(output_len, 16)) {
1987 size_t crypto_pad = 16 - (output_len % 16);
1989 ret = artpec6_crypto_setup_in_descr(common,
1996 /* The authentication tag shall follow immediately after
1997 * the output ciphertext. For decryption it is put in a context
1998 * buffer for later compare against the input tag.
2001 if (req_ctx->decrypt) {
2002 ret = artpec6_crypto_setup_in_descr(common,
2003 req_ctx->decryption_tag, AES_BLOCK_SIZE, false);
2008 /* For encryption the requested tag size may be smaller
2009 * than the hardware's generated tag.
2011 size_t authsize = crypto_aead_authsize(cipher);
2013 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
2018 if (authsize < AES_BLOCK_SIZE) {
2019 count = AES_BLOCK_SIZE - authsize;
2020 ret = artpec6_crypto_setup_in_descr(common,
2030 ret = artpec6_crypto_terminate_in_descrs(common);
2034 ret = artpec6_crypto_terminate_out_descrs(common);
2038 return artpec6_crypto_dma_map_descs(common);
2041 static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
2042 struct list_head *completions)
2044 struct artpec6_crypto_req_common *req;
2046 while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
2047 req = list_first_entry(&ac->queue,
2048 struct artpec6_crypto_req_common,
2050 list_move_tail(&req->list, &ac->pending);
2051 artpec6_crypto_start_dma(req);
2053 list_add_tail(&req->complete_in_progress, completions);
2057 * In some cases, the hardware can raise an in_eop_flush interrupt
2058 * before actually updating the status, so we have an timer which will
2059 * recheck the status on timeout. Since the cases are expected to be
2060 * very rare, we use a relatively large timeout value. There should be
2061 * no noticeable negative effect if we timeout spuriously.
2063 if (ac->pending_count)
2064 mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
2066 del_timer(&ac->timer);
2069 static void artpec6_crypto_timeout(struct timer_list *t)
2071 struct artpec6_crypto *ac = from_timer(ac, t, timer);
2073 dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
2075 tasklet_schedule(&ac->task);
2078 static void artpec6_crypto_task(unsigned long data)
2080 struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
2081 struct artpec6_crypto_req_common *req;
2082 struct artpec6_crypto_req_common *n;
2083 struct list_head complete_done;
2084 struct list_head complete_in_progress;
2086 INIT_LIST_HEAD(&complete_done);
2087 INIT_LIST_HEAD(&complete_in_progress);
2089 if (list_empty(&ac->pending)) {
2090 pr_debug("Spurious IRQ\n");
2094 spin_lock(&ac->queue_lock);
2096 list_for_each_entry_safe(req, n, &ac->pending, list) {
2097 struct artpec6_crypto_dma_descriptors *dma = req->dma;
2099 dma_addr_t stataddr;
2101 stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1);
2102 dma_sync_single_for_cpu(artpec6_crypto_dev,
2107 stat = req->dma->stat[req->dma->in_cnt-1];
2109 /* A non-zero final status descriptor indicates
2110 * this job has finished.
2112 pr_debug("Request %p status is %X\n", req, stat);
2116 /* Allow testing of timeout handling with fault injection */
2117 #ifdef CONFIG_FAULT_INJECTION
2118 if (should_fail(&artpec6_crypto_fail_status_read, 1))
2122 pr_debug("Completing request %p\n", req);
2124 list_move_tail(&req->list, &complete_done);
2126 ac->pending_count--;
2129 artpec6_crypto_process_queue(ac, &complete_in_progress);
2131 spin_unlock(&ac->queue_lock);
2133 /* Perform the completion callbacks without holding the queue lock
2134 * to allow new request submissions from the callbacks.
2136 list_for_each_entry_safe(req, n, &complete_done, list) {
2137 artpec6_crypto_dma_unmap_all(req);
2138 artpec6_crypto_copy_bounce_buffers(req);
2139 artpec6_crypto_common_destroy(req);
2141 req->complete(req->req);
2144 list_for_each_entry_safe(req, n, &complete_in_progress,
2145 complete_in_progress) {
2146 crypto_request_complete(req->req, -EINPROGRESS);
2150 static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
2152 crypto_request_complete(req, 0);
2156 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
2158 struct skcipher_request *cipher_req = container_of(req,
2159 struct skcipher_request, base);
2161 scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
2162 cipher_req->cryptlen - AES_BLOCK_SIZE,
2164 skcipher_request_complete(cipher_req, 0);
2168 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
2170 struct skcipher_request *cipher_req = container_of(req,
2171 struct skcipher_request, base);
2173 scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
2174 cipher_req->cryptlen - AES_BLOCK_SIZE,
2176 skcipher_request_complete(cipher_req, 0);
2179 static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
2183 /* Verify GCM hashtag. */
2184 struct aead_request *areq = container_of(req,
2185 struct aead_request, base);
2186 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
2187 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
2189 if (req_ctx->decrypt) {
2190 u8 input_tag[AES_BLOCK_SIZE];
2191 unsigned int authsize = crypto_aead_authsize(aead);
2193 sg_pcopy_to_buffer(areq->src,
2194 sg_nents(areq->src),
2197 areq->assoclen + areq->cryptlen -
2200 if (crypto_memneq(req_ctx->decryption_tag,
2203 pr_debug("***EBADMSG:\n");
2204 print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
2205 input_tag, authsize, true);
2206 print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
2207 req_ctx->decryption_tag,
2214 aead_request_complete(areq, result);
2217 static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
2219 crypto_request_complete(req, 0);
2223 /*------------------- Hash functions -----------------------------------------*/
2225 artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
2226 const u8 *key, unsigned int keylen)
2228 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
2233 pr_err("Invalid length (%d) of HMAC key\n",
2238 memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2240 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2242 if (keylen > blocksize) {
2243 tfm_ctx->hmac_key_length = blocksize;
2245 ret = crypto_shash_tfm_digest(tfm_ctx->child_hash, key, keylen,
2250 memcpy(tfm_ctx->hmac_key, key, keylen);
2251 tfm_ctx->hmac_key_length = keylen;
2258 artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
2260 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2261 enum artpec6_crypto_variant variant = ac->variant;
2262 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2265 memset(req_ctx, 0, sizeof(*req_ctx));
2267 req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
2269 req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
2272 case ARTPEC6_CRYPTO_HASH_SHA1:
2273 oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
2275 case ARTPEC6_CRYPTO_HASH_SHA256:
2276 oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
2279 pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
2283 if (variant == ARTPEC6_CRYPTO)
2284 req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
2286 req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
2291 static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
2293 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2296 if (!req_ctx->common.dma) {
2297 ret = artpec6_crypto_common_init(&req_ctx->common,
2299 artpec6_crypto_complete_hash,
2306 ret = artpec6_crypto_prepare_hash(req);
2308 case ARTPEC6_CRYPTO_PREPARE_HASH_START:
2309 ret = artpec6_crypto_submit(&req_ctx->common);
2312 case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
2317 artpec6_crypto_common_destroy(&req_ctx->common);
2324 static int artpec6_crypto_hash_final(struct ahash_request *req)
2326 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2328 req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
2330 return artpec6_crypto_prepare_submit_hash(req);
2333 static int artpec6_crypto_hash_update(struct ahash_request *req)
2335 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2337 req_ctx->hash_flags |= HASH_FLAG_UPDATE;
2339 return artpec6_crypto_prepare_submit_hash(req);
2342 static int artpec6_crypto_sha1_init(struct ahash_request *req)
2344 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2347 static int artpec6_crypto_sha1_digest(struct ahash_request *req)
2349 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2351 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2353 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2355 return artpec6_crypto_prepare_submit_hash(req);
2358 static int artpec6_crypto_sha256_init(struct ahash_request *req)
2360 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2363 static int artpec6_crypto_sha256_digest(struct ahash_request *req)
2365 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2367 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2368 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2370 return artpec6_crypto_prepare_submit_hash(req);
2373 static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
2375 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2378 static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
2380 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2382 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2383 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2385 return artpec6_crypto_prepare_submit_hash(req);
2388 static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
2389 const char *base_hash_name)
2391 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2393 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2394 sizeof(struct artpec6_hash_request_context));
2395 memset(tfm_ctx, 0, sizeof(*tfm_ctx));
2397 if (base_hash_name) {
2398 struct crypto_shash *child;
2400 child = crypto_alloc_shash(base_hash_name, 0,
2401 CRYPTO_ALG_NEED_FALLBACK);
2404 return PTR_ERR(child);
2406 tfm_ctx->child_hash = child;
2412 static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
2414 return artpec6_crypto_ahash_init_common(tfm, NULL);
2417 static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
2419 return artpec6_crypto_ahash_init_common(tfm, "sha256");
2422 static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
2424 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2426 if (tfm_ctx->child_hash)
2427 crypto_free_shash(tfm_ctx->child_hash);
2429 memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2430 tfm_ctx->hmac_key_length = 0;
2433 static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
2435 const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2436 struct artpec6_hash_export_state *state = out;
2437 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2438 enum artpec6_crypto_variant variant = ac->variant;
2440 BUILD_BUG_ON(sizeof(state->partial_buffer) !=
2441 sizeof(ctx->partial_buffer));
2442 BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
2444 state->digcnt = ctx->digcnt;
2445 state->partial_bytes = ctx->partial_bytes;
2446 state->hash_flags = ctx->hash_flags;
2448 if (variant == ARTPEC6_CRYPTO)
2449 state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
2451 state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
2453 memcpy(state->partial_buffer, ctx->partial_buffer,
2454 sizeof(state->partial_buffer));
2455 memcpy(state->digeststate, ctx->digeststate,
2456 sizeof(state->digeststate));
2461 static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
2463 struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2464 const struct artpec6_hash_export_state *state = in;
2465 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2466 enum artpec6_crypto_variant variant = ac->variant;
2468 memset(ctx, 0, sizeof(*ctx));
2470 ctx->digcnt = state->digcnt;
2471 ctx->partial_bytes = state->partial_bytes;
2472 ctx->hash_flags = state->hash_flags;
2474 if (variant == ARTPEC6_CRYPTO)
2475 ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
2477 ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
2479 memcpy(ctx->partial_buffer, state->partial_buffer,
2480 sizeof(state->partial_buffer));
2481 memcpy(ctx->digeststate, state->digeststate,
2482 sizeof(state->digeststate));
2487 static int init_crypto_hw(struct artpec6_crypto *ac)
2489 enum artpec6_crypto_variant variant = ac->variant;
2490 void __iomem *base = ac->base;
2491 u32 out_descr_buf_size;
2492 u32 out_data_buf_size;
2493 u32 in_data_buf_size;
2494 u32 in_descr_buf_size;
2495 u32 in_stat_buf_size;
2499 * The PDMA unit contains 1984 bytes of internal memory for the OUT
2500 * channels and 1024 bytes for the IN channel. This is an elastic
2501 * memory used to internally store the descriptors and data. The values
2502 * ares specified in 64 byte incremements. Trustzone buffers are not
2503 * used at this stage.
2505 out_data_buf_size = 16; /* 1024 bytes for data */
2506 out_descr_buf_size = 15; /* 960 bytes for descriptors */
2507 in_data_buf_size = 8; /* 512 bytes for data */
2508 in_descr_buf_size = 4; /* 256 bytes for descriptors */
2509 in_stat_buf_size = 4; /* 256 bytes for stat descrs */
2511 BUILD_BUG_ON_MSG((out_data_buf_size
2512 + out_descr_buf_size) * 64 > 1984,
2513 "Invalid OUT configuration");
2515 BUILD_BUG_ON_MSG((in_data_buf_size
2517 + in_stat_buf_size) * 64 > 1024,
2518 "Invalid IN configuration");
2520 in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
2521 FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
2522 FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
2524 out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
2525 FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
2527 writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
2528 writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
2530 if (variant == ARTPEC6_CRYPTO) {
2531 writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
2532 writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
2533 writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
2534 A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
2535 base + A6_PDMA_INTR_MASK);
2537 writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
2538 writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
2539 writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
2540 A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
2541 base + A7_PDMA_INTR_MASK);
2547 static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
2549 enum artpec6_crypto_variant variant = ac->variant;
2550 void __iomem *base = ac->base;
2552 if (variant == ARTPEC6_CRYPTO) {
2553 writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
2554 writel_relaxed(0, base + A6_PDMA_IN_CFG);
2555 writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2557 writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
2558 writel_relaxed(0, base + A7_PDMA_IN_CFG);
2559 writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2562 writel_relaxed(0, base + PDMA_OUT_CFG);
2566 static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
2568 struct artpec6_crypto *ac = dev_id;
2569 enum artpec6_crypto_variant variant = ac->variant;
2570 void __iomem *base = ac->base;
2571 u32 mask_in_data, mask_in_eop_flush;
2572 u32 in_cmd_flush_stat, in_cmd_reg;
2577 if (variant == ARTPEC6_CRYPTO) {
2578 intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
2579 mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
2580 mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
2581 in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
2582 in_cmd_reg = A6_PDMA_IN_CMD;
2583 ack_intr_reg = A6_PDMA_ACK_INTR;
2585 intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
2586 mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
2587 mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
2588 in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
2589 in_cmd_reg = A7_PDMA_IN_CMD;
2590 ack_intr_reg = A7_PDMA_ACK_INTR;
2593 /* We get two interrupt notifications from each job.
2594 * The in_data means all data was sent to memory and then
2595 * we request a status flush command to write the per-job
2596 * status to its status vector. This ensures that the
2597 * tasklet can detect exactly how many submitted jobs
2598 * that have finished.
2600 if (intr & mask_in_data)
2601 ack |= mask_in_data;
2603 if (intr & mask_in_eop_flush)
2604 ack |= mask_in_eop_flush;
2606 writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
2608 writel_relaxed(ack, base + ack_intr_reg);
2610 if (intr & mask_in_eop_flush)
2611 tasklet_schedule(&ac->task);
2616 /*------------------- Algorithm definitions ----------------------------------*/
2619 static struct ahash_alg hash_algos[] = {
2622 .init = artpec6_crypto_sha1_init,
2623 .update = artpec6_crypto_hash_update,
2624 .final = artpec6_crypto_hash_final,
2625 .digest = artpec6_crypto_sha1_digest,
2626 .import = artpec6_crypto_hash_import,
2627 .export = artpec6_crypto_hash_export,
2628 .halg.digestsize = SHA1_DIGEST_SIZE,
2629 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2632 .cra_driver_name = "artpec-sha1",
2633 .cra_priority = 300,
2634 .cra_flags = CRYPTO_ALG_ASYNC |
2635 CRYPTO_ALG_ALLOCATES_MEMORY,
2636 .cra_blocksize = SHA1_BLOCK_SIZE,
2637 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2639 .cra_module = THIS_MODULE,
2640 .cra_init = artpec6_crypto_ahash_init,
2641 .cra_exit = artpec6_crypto_ahash_exit,
2646 .init = artpec6_crypto_sha256_init,
2647 .update = artpec6_crypto_hash_update,
2648 .final = artpec6_crypto_hash_final,
2649 .digest = artpec6_crypto_sha256_digest,
2650 .import = artpec6_crypto_hash_import,
2651 .export = artpec6_crypto_hash_export,
2652 .halg.digestsize = SHA256_DIGEST_SIZE,
2653 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2655 .cra_name = "sha256",
2656 .cra_driver_name = "artpec-sha256",
2657 .cra_priority = 300,
2658 .cra_flags = CRYPTO_ALG_ASYNC |
2659 CRYPTO_ALG_ALLOCATES_MEMORY,
2660 .cra_blocksize = SHA256_BLOCK_SIZE,
2661 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2663 .cra_module = THIS_MODULE,
2664 .cra_init = artpec6_crypto_ahash_init,
2665 .cra_exit = artpec6_crypto_ahash_exit,
2670 .init = artpec6_crypto_hmac_sha256_init,
2671 .update = artpec6_crypto_hash_update,
2672 .final = artpec6_crypto_hash_final,
2673 .digest = artpec6_crypto_hmac_sha256_digest,
2674 .import = artpec6_crypto_hash_import,
2675 .export = artpec6_crypto_hash_export,
2676 .setkey = artpec6_crypto_hash_set_key,
2677 .halg.digestsize = SHA256_DIGEST_SIZE,
2678 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2680 .cra_name = "hmac(sha256)",
2681 .cra_driver_name = "artpec-hmac-sha256",
2682 .cra_priority = 300,
2683 .cra_flags = CRYPTO_ALG_ASYNC |
2684 CRYPTO_ALG_ALLOCATES_MEMORY,
2685 .cra_blocksize = SHA256_BLOCK_SIZE,
2686 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2688 .cra_module = THIS_MODULE,
2689 .cra_init = artpec6_crypto_ahash_init_hmac_sha256,
2690 .cra_exit = artpec6_crypto_ahash_exit,
2696 static struct skcipher_alg crypto_algos[] = {
2700 .cra_name = "ecb(aes)",
2701 .cra_driver_name = "artpec6-ecb-aes",
2702 .cra_priority = 300,
2703 .cra_flags = CRYPTO_ALG_ASYNC |
2704 CRYPTO_ALG_ALLOCATES_MEMORY,
2705 .cra_blocksize = AES_BLOCK_SIZE,
2706 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2708 .cra_module = THIS_MODULE,
2710 .min_keysize = AES_MIN_KEY_SIZE,
2711 .max_keysize = AES_MAX_KEY_SIZE,
2712 .setkey = artpec6_crypto_cipher_set_key,
2713 .encrypt = artpec6_crypto_encrypt,
2714 .decrypt = artpec6_crypto_decrypt,
2715 .init = artpec6_crypto_aes_ecb_init,
2716 .exit = artpec6_crypto_aes_exit,
2721 .cra_name = "ctr(aes)",
2722 .cra_driver_name = "artpec6-ctr-aes",
2723 .cra_priority = 300,
2724 .cra_flags = CRYPTO_ALG_ASYNC |
2725 CRYPTO_ALG_ALLOCATES_MEMORY |
2726 CRYPTO_ALG_NEED_FALLBACK,
2728 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2730 .cra_module = THIS_MODULE,
2732 .min_keysize = AES_MIN_KEY_SIZE,
2733 .max_keysize = AES_MAX_KEY_SIZE,
2734 .ivsize = AES_BLOCK_SIZE,
2735 .setkey = artpec6_crypto_cipher_set_key,
2736 .encrypt = artpec6_crypto_ctr_encrypt,
2737 .decrypt = artpec6_crypto_ctr_decrypt,
2738 .init = artpec6_crypto_aes_ctr_init,
2739 .exit = artpec6_crypto_aes_ctr_exit,
2744 .cra_name = "cbc(aes)",
2745 .cra_driver_name = "artpec6-cbc-aes",
2746 .cra_priority = 300,
2747 .cra_flags = CRYPTO_ALG_ASYNC |
2748 CRYPTO_ALG_ALLOCATES_MEMORY,
2749 .cra_blocksize = AES_BLOCK_SIZE,
2750 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2752 .cra_module = THIS_MODULE,
2754 .min_keysize = AES_MIN_KEY_SIZE,
2755 .max_keysize = AES_MAX_KEY_SIZE,
2756 .ivsize = AES_BLOCK_SIZE,
2757 .setkey = artpec6_crypto_cipher_set_key,
2758 .encrypt = artpec6_crypto_encrypt,
2759 .decrypt = artpec6_crypto_decrypt,
2760 .init = artpec6_crypto_aes_cbc_init,
2761 .exit = artpec6_crypto_aes_exit
2766 .cra_name = "xts(aes)",
2767 .cra_driver_name = "artpec6-xts-aes",
2768 .cra_priority = 300,
2769 .cra_flags = CRYPTO_ALG_ASYNC |
2770 CRYPTO_ALG_ALLOCATES_MEMORY,
2772 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2774 .cra_module = THIS_MODULE,
2776 .min_keysize = 2*AES_MIN_KEY_SIZE,
2777 .max_keysize = 2*AES_MAX_KEY_SIZE,
2779 .setkey = artpec6_crypto_xts_set_key,
2780 .encrypt = artpec6_crypto_encrypt,
2781 .decrypt = artpec6_crypto_decrypt,
2782 .init = artpec6_crypto_aes_xts_init,
2783 .exit = artpec6_crypto_aes_exit,
2787 static struct aead_alg aead_algos[] = {
2789 .init = artpec6_crypto_aead_init,
2790 .setkey = artpec6_crypto_aead_set_key,
2791 .encrypt = artpec6_crypto_aead_encrypt,
2792 .decrypt = artpec6_crypto_aead_decrypt,
2793 .ivsize = GCM_AES_IV_SIZE,
2794 .maxauthsize = AES_BLOCK_SIZE,
2797 .cra_name = "gcm(aes)",
2798 .cra_driver_name = "artpec-gcm-aes",
2799 .cra_priority = 300,
2800 .cra_flags = CRYPTO_ALG_ASYNC |
2801 CRYPTO_ALG_ALLOCATES_MEMORY |
2802 CRYPTO_ALG_KERN_DRIVER_ONLY,
2804 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2806 .cra_module = THIS_MODULE,
2811 #ifdef CONFIG_DEBUG_FS
2820 static struct dentry *dbgfs_root;
2822 static void artpec6_crypto_init_debugfs(void)
2824 dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
2826 #ifdef CONFIG_FAULT_INJECTION
2827 fault_create_debugfs_attr("fail_status_read", dbgfs_root,
2828 &artpec6_crypto_fail_status_read);
2830 fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
2831 &artpec6_crypto_fail_dma_array_full);
2835 static void artpec6_crypto_free_debugfs(void)
2837 debugfs_remove_recursive(dbgfs_root);
2842 static const struct of_device_id artpec6_crypto_of_match[] = {
2843 { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
2844 { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
2847 MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
2849 static int artpec6_crypto_probe(struct platform_device *pdev)
2851 const struct of_device_id *match;
2852 enum artpec6_crypto_variant variant;
2853 struct artpec6_crypto *ac;
2854 struct device *dev = &pdev->dev;
2859 if (artpec6_crypto_dev)
2862 match = of_match_node(artpec6_crypto_of_match, dev->of_node);
2866 variant = (enum artpec6_crypto_variant)match->data;
2868 base = devm_platform_ioremap_resource(pdev, 0);
2870 return PTR_ERR(base);
2872 irq = platform_get_irq(pdev, 0);
2876 ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
2881 platform_set_drvdata(pdev, ac);
2882 ac->variant = variant;
2884 spin_lock_init(&ac->queue_lock);
2885 INIT_LIST_HEAD(&ac->queue);
2886 INIT_LIST_HEAD(&ac->pending);
2887 timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
2891 ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
2892 sizeof(struct artpec6_crypto_dma_descriptors),
2899 #ifdef CONFIG_DEBUG_FS
2900 artpec6_crypto_init_debugfs();
2903 tasklet_init(&ac->task, artpec6_crypto_task,
2906 ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
2908 if (!ac->pad_buffer)
2910 ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
2912 ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
2914 if (!ac->zero_buffer)
2916 ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
2918 err = init_crypto_hw(ac);
2922 err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
2923 "artpec6-crypto", ac);
2927 artpec6_crypto_dev = &pdev->dev;
2929 err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2931 dev_err(dev, "Failed to register ahashes\n");
2935 err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2937 dev_err(dev, "Failed to register ciphers\n");
2938 goto unregister_ahashes;
2941 err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
2943 dev_err(dev, "Failed to register aeads\n");
2944 goto unregister_algs;
2950 crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2952 crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2954 artpec6_crypto_disable_hw(ac);
2956 kmem_cache_destroy(ac->dma_cache);
2960 static int artpec6_crypto_remove(struct platform_device *pdev)
2962 struct artpec6_crypto *ac = platform_get_drvdata(pdev);
2963 int irq = platform_get_irq(pdev, 0);
2965 crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2966 crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2967 crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
2969 tasklet_disable(&ac->task);
2970 devm_free_irq(&pdev->dev, irq, ac);
2971 tasklet_kill(&ac->task);
2972 del_timer_sync(&ac->timer);
2974 artpec6_crypto_disable_hw(ac);
2976 kmem_cache_destroy(ac->dma_cache);
2977 #ifdef CONFIG_DEBUG_FS
2978 artpec6_crypto_free_debugfs();
2983 static struct platform_driver artpec6_crypto_driver = {
2984 .probe = artpec6_crypto_probe,
2985 .remove = artpec6_crypto_remove,
2987 .name = "artpec6-crypto",
2988 .of_match_table = artpec6_crypto_of_match,
2992 module_platform_driver(artpec6_crypto_driver);
2994 MODULE_AUTHOR("Axis Communications AB");
2995 MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
2996 MODULE_LICENSE("GPL");