crypto: aspeed - Use new crypto_engine_op interface
[platform/kernel/linux-starfive.git] / drivers / crypto / aspeed / aspeed-acry.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2021 Aspeed Technology Inc.
4  */
5 #include <crypto/engine.h>
6 #include <crypto/internal/akcipher.h>
7 #include <crypto/internal/rsa.h>
8 #include <crypto/scatterwalk.h>
9 #include <linux/clk.h>
10 #include <linux/count_zeros.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/mfd/syscon.h>
16 #include <linux/module.h>
17 #include <linux/of.h>
18 #include <linux/of_device.h>
19 #include <linux/of_address.h>
20 #include <linux/of_irq.h>
21 #include <linux/platform_device.h>
22 #include <linux/regmap.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
25
26 #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
27 #define ACRY_DBG(d, fmt, ...)   \
28         dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
29 #else
30 #define ACRY_DBG(d, fmt, ...)   \
31         dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
32 #endif
33
34 /*****************************
35  *                           *
36  * ACRY register definitions *
37  *                           *
38  * ***************************/
39 #define ASPEED_ACRY_TRIGGER             0x000   /* ACRY Engine Control: trigger */
40 #define ASPEED_ACRY_DMA_CMD             0x048   /* ACRY Engine Control: Command */
41 #define ASPEED_ACRY_DMA_SRC_BASE        0x04C   /* ACRY DRAM base address for DMA */
42 #define ASPEED_ACRY_DMA_LEN             0x050   /* ACRY Data Length of DMA */
43 #define ASPEED_ACRY_RSA_KEY_LEN         0x058   /* ACRY RSA Exp/Mod Key Length (Bits) */
44 #define ASPEED_ACRY_INT_MASK            0x3F8   /* ACRY Interrupt Mask */
45 #define ASPEED_ACRY_STATUS              0x3FC   /* ACRY Interrupt Status */
46
47 /* rsa trigger */
48 #define  ACRY_CMD_RSA_TRIGGER           BIT(0)
49 #define  ACRY_CMD_DMA_RSA_TRIGGER       BIT(1)
50
51 /* rsa dma cmd */
52 #define  ACRY_CMD_DMA_SRAM_MODE_RSA     (0x3 << 4)
53 #define  ACRY_CMD_DMEM_AHB              BIT(8)
54 #define  ACRY_CMD_DMA_SRAM_AHB_ENGINE   0
55
56 /* rsa key len */
57 #define  RSA_E_BITS_LEN(x)              ((x) << 16)
58 #define  RSA_M_BITS_LEN(x)              (x)
59
60 /* acry isr */
61 #define  ACRY_RSA_ISR                   BIT(1)
62
63 #define ASPEED_ACRY_BUFF_SIZE           0x1800  /* DMA buffer size */
64 #define ASPEED_ACRY_SRAM_MAX_LEN        2048    /* ACRY SRAM maximum length (Bytes) */
65 #define ASPEED_ACRY_RSA_MAX_KEY_LEN     512     /* ACRY RSA maximum key length (Bytes) */
66
67 #define CRYPTO_FLAGS_BUSY               BIT(1)
68 #define BYTES_PER_DWORD                 4
69
70 /*****************************
71  *                           *
72  * AHBC register definitions *
73  *                           *
74  * ***************************/
75 #define AHBC_REGION_PROT                0x240
76 #define REGION_ACRYM                    BIT(23)
77
78 #define ast_acry_write(acry, val, offset)       \
79         writel((val), (acry)->regs + (offset))
80
81 #define ast_acry_read(acry, offset)             \
82         readl((acry)->regs + (offset))
83
84 struct aspeed_acry_dev;
85
86 typedef int (*aspeed_acry_fn_t)(struct aspeed_acry_dev *);
87
88 struct aspeed_acry_dev {
89         void __iomem                    *regs;
90         struct device                   *dev;
91         int                             irq;
92         struct clk                      *clk;
93         struct regmap                   *ahbc;
94
95         struct akcipher_request         *req;
96         struct tasklet_struct           done_task;
97         aspeed_acry_fn_t                resume;
98         unsigned long                   flags;
99
100         /* ACRY output SRAM buffer */
101         void __iomem                    *acry_sram;
102
103         /* ACRY input DMA buffer */
104         void                            *buf_addr;
105         dma_addr_t                      buf_dma_addr;
106
107         struct crypto_engine            *crypt_engine_rsa;
108
109         /* ACRY SRAM memory mapped */
110         int                             exp_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN];
111         int                             mod_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN];
112         int                             data_byte_mapping[ASPEED_ACRY_SRAM_MAX_LEN];
113 };
114
115 struct aspeed_acry_ctx {
116         struct aspeed_acry_dev          *acry_dev;
117
118         struct rsa_key                  key;
119         int                             enc;
120         u8                              *n;
121         u8                              *e;
122         u8                              *d;
123         size_t                          n_sz;
124         size_t                          e_sz;
125         size_t                          d_sz;
126
127         aspeed_acry_fn_t                trigger;
128
129         struct crypto_akcipher          *fallback_tfm;
130 };
131
132 struct aspeed_acry_alg {
133         struct aspeed_acry_dev          *acry_dev;
134         struct akcipher_engine_alg      akcipher;
135 };
136
137 enum aspeed_rsa_key_mode {
138         ASPEED_RSA_EXP_MODE = 0,
139         ASPEED_RSA_MOD_MODE,
140         ASPEED_RSA_DATA_MODE,
141 };
142
143 static inline struct akcipher_request *
144         akcipher_request_cast(struct crypto_async_request *req)
145 {
146         return container_of(req, struct akcipher_request, base);
147 }
148
149 static int aspeed_acry_do_fallback(struct akcipher_request *req)
150 {
151         struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
152         struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
153         int err;
154
155         akcipher_request_set_tfm(req, ctx->fallback_tfm);
156
157         if (ctx->enc)
158                 err = crypto_akcipher_encrypt(req);
159         else
160                 err = crypto_akcipher_decrypt(req);
161
162         akcipher_request_set_tfm(req, cipher);
163
164         return err;
165 }
166
167 static bool aspeed_acry_need_fallback(struct akcipher_request *req)
168 {
169         struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
170         struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
171
172         return ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN;
173 }
174
175 static int aspeed_acry_handle_queue(struct aspeed_acry_dev *acry_dev,
176                                     struct akcipher_request *req)
177 {
178         if (aspeed_acry_need_fallback(req)) {
179                 ACRY_DBG(acry_dev, "SW fallback\n");
180                 return aspeed_acry_do_fallback(req);
181         }
182
183         return crypto_transfer_akcipher_request_to_engine(acry_dev->crypt_engine_rsa, req);
184 }
185
186 static int aspeed_acry_do_request(struct crypto_engine *engine, void *areq)
187 {
188         struct akcipher_request *req = akcipher_request_cast(areq);
189         struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
190         struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
191         struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
192
193         acry_dev->req = req;
194         acry_dev->flags |= CRYPTO_FLAGS_BUSY;
195
196         return ctx->trigger(acry_dev);
197 }
198
199 static int aspeed_acry_complete(struct aspeed_acry_dev *acry_dev, int err)
200 {
201         struct akcipher_request *req = acry_dev->req;
202
203         acry_dev->flags &= ~CRYPTO_FLAGS_BUSY;
204
205         crypto_finalize_akcipher_request(acry_dev->crypt_engine_rsa, req, err);
206
207         return err;
208 }
209
210 /*
211  * Copy Data to DMA buffer for engine used.
212  */
213 static void aspeed_acry_rsa_sg_copy_to_buffer(struct aspeed_acry_dev *acry_dev,
214                                               u8 *buf, struct scatterlist *src,
215                                               size_t nbytes)
216 {
217         static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN];
218         int i = 0, j;
219         int data_idx;
220
221         ACRY_DBG(acry_dev, "\n");
222
223         scatterwalk_map_and_copy(dram_buffer, src, 0, nbytes, 0);
224
225         for (j = nbytes - 1; j >= 0; j--) {
226                 data_idx = acry_dev->data_byte_mapping[i];
227                 buf[data_idx] =  dram_buffer[j];
228                 i++;
229         }
230
231         for (; i < ASPEED_ACRY_SRAM_MAX_LEN; i++) {
232                 data_idx = acry_dev->data_byte_mapping[i];
233                 buf[data_idx] = 0;
234         }
235 }
236
237 /*
238  * Copy Exp/Mod to DMA buffer for engine used.
239  *
240  * Params:
241  * - mode 0 : Exponential
242  * - mode 1 : Modulus
243  *
244  * Example:
245  * - DRAM memory layout:
246  *      D[0], D[4], D[8], D[12]
247  * - ACRY SRAM memory layout should reverse the order of source data:
248  *      D[12], D[8], D[4], D[0]
249  */
250 static int aspeed_acry_rsa_ctx_copy(struct aspeed_acry_dev *acry_dev, void *buf,
251                                     const void *xbuf, size_t nbytes,
252                                     enum aspeed_rsa_key_mode mode)
253 {
254         const u8 *src = xbuf;
255         __le32 *dw_buf = buf;
256         int nbits, ndw;
257         int i, j, idx;
258         u32 data = 0;
259
260         ACRY_DBG(acry_dev, "nbytes:%zu, mode:%d\n", nbytes, mode);
261
262         if (nbytes > ASPEED_ACRY_RSA_MAX_KEY_LEN)
263                 return -ENOMEM;
264
265         /* Remove the leading zeros */
266         while (nbytes > 0 && src[0] == 0) {
267                 src++;
268                 nbytes--;
269         }
270
271         nbits = nbytes * 8;
272         if (nbytes > 0)
273                 nbits -= count_leading_zeros(src[0]) - (BITS_PER_LONG - 8);
274
275         /* double-world alignment */
276         ndw = DIV_ROUND_UP(nbytes, BYTES_PER_DWORD);
277
278         if (nbytes > 0) {
279                 i = BYTES_PER_DWORD - nbytes % BYTES_PER_DWORD;
280                 i %= BYTES_PER_DWORD;
281
282                 for (j = ndw; j > 0; j--) {
283                         for (; i < BYTES_PER_DWORD; i++) {
284                                 data <<= 8;
285                                 data |= *src++;
286                         }
287
288                         i = 0;
289
290                         if (mode == ASPEED_RSA_EXP_MODE)
291                                 idx = acry_dev->exp_dw_mapping[j - 1];
292                         else /* mode == ASPEED_RSA_MOD_MODE */
293                                 idx = acry_dev->mod_dw_mapping[j - 1];
294
295                         dw_buf[idx] = cpu_to_le32(data);
296                 }
297         }
298
299         return nbits;
300 }
301
302 static int aspeed_acry_rsa_transfer(struct aspeed_acry_dev *acry_dev)
303 {
304         struct akcipher_request *req = acry_dev->req;
305         u8 __iomem *sram_buffer = acry_dev->acry_sram;
306         struct scatterlist *out_sg = req->dst;
307         static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN];
308         int leading_zero = 1;
309         int result_nbytes;
310         int i = 0, j;
311         int data_idx;
312
313         /* Set Data Memory to AHB(CPU) Access Mode */
314         ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD);
315
316         /* Disable ACRY SRAM protection */
317         regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT,
318                            REGION_ACRYM, 0);
319
320         result_nbytes = ASPEED_ACRY_SRAM_MAX_LEN;
321
322         for (j = ASPEED_ACRY_SRAM_MAX_LEN - 1; j >= 0; j--) {
323                 data_idx = acry_dev->data_byte_mapping[j];
324                 if (readb(sram_buffer + data_idx) == 0 && leading_zero) {
325                         result_nbytes--;
326                 } else {
327                         leading_zero = 0;
328                         dram_buffer[i] = readb(sram_buffer + data_idx);
329                         i++;
330                 }
331         }
332
333         ACRY_DBG(acry_dev, "result_nbytes:%d, req->dst_len:%d\n",
334                  result_nbytes, req->dst_len);
335
336         if (result_nbytes <= req->dst_len) {
337                 scatterwalk_map_and_copy(dram_buffer, out_sg, 0, result_nbytes,
338                                          1);
339                 req->dst_len = result_nbytes;
340
341         } else {
342                 dev_err(acry_dev->dev, "RSA engine error!\n");
343         }
344
345         memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE);
346
347         return aspeed_acry_complete(acry_dev, 0);
348 }
349
350 static int aspeed_acry_rsa_trigger(struct aspeed_acry_dev *acry_dev)
351 {
352         struct akcipher_request *req = acry_dev->req;
353         struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
354         struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
355         int ne, nm;
356
357         if (!ctx->n || !ctx->n_sz) {
358                 dev_err(acry_dev->dev, "%s: key n is not set\n", __func__);
359                 return -EINVAL;
360         }
361
362         memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE);
363
364         /* Copy source data to DMA buffer */
365         aspeed_acry_rsa_sg_copy_to_buffer(acry_dev, acry_dev->buf_addr,
366                                           req->src, req->src_len);
367
368         nm = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr, ctx->n,
369                                       ctx->n_sz, ASPEED_RSA_MOD_MODE);
370         if (ctx->enc) {
371                 if (!ctx->e || !ctx->e_sz) {
372                         dev_err(acry_dev->dev, "%s: key e is not set\n",
373                                 __func__);
374                         return -EINVAL;
375                 }
376                 /* Copy key e to DMA buffer */
377                 ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr,
378                                               ctx->e, ctx->e_sz,
379                                               ASPEED_RSA_EXP_MODE);
380         } else {
381                 if (!ctx->d || !ctx->d_sz) {
382                         dev_err(acry_dev->dev, "%s: key d is not set\n",
383                                 __func__);
384                         return -EINVAL;
385                 }
386                 /* Copy key d to DMA buffer */
387                 ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr,
388                                               ctx->key.d, ctx->key.d_sz,
389                                               ASPEED_RSA_EXP_MODE);
390         }
391
392         ast_acry_write(acry_dev, acry_dev->buf_dma_addr,
393                        ASPEED_ACRY_DMA_SRC_BASE);
394         ast_acry_write(acry_dev, (ne << 16) + nm,
395                        ASPEED_ACRY_RSA_KEY_LEN);
396         ast_acry_write(acry_dev, ASPEED_ACRY_BUFF_SIZE,
397                        ASPEED_ACRY_DMA_LEN);
398
399         acry_dev->resume = aspeed_acry_rsa_transfer;
400
401         /* Enable ACRY SRAM protection */
402         regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT,
403                            REGION_ACRYM, REGION_ACRYM);
404
405         ast_acry_write(acry_dev, ACRY_RSA_ISR, ASPEED_ACRY_INT_MASK);
406         ast_acry_write(acry_dev, ACRY_CMD_DMA_SRAM_MODE_RSA |
407                           ACRY_CMD_DMA_SRAM_AHB_ENGINE, ASPEED_ACRY_DMA_CMD);
408
409         /* Trigger RSA engines */
410         ast_acry_write(acry_dev, ACRY_CMD_RSA_TRIGGER |
411                           ACRY_CMD_DMA_RSA_TRIGGER, ASPEED_ACRY_TRIGGER);
412
413         return 0;
414 }
415
416 static int aspeed_acry_rsa_enc(struct akcipher_request *req)
417 {
418         struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
419         struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
420         struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
421
422         ctx->trigger = aspeed_acry_rsa_trigger;
423         ctx->enc = 1;
424
425         return aspeed_acry_handle_queue(acry_dev, req);
426 }
427
428 static int aspeed_acry_rsa_dec(struct akcipher_request *req)
429 {
430         struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
431         struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
432         struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
433
434         ctx->trigger = aspeed_acry_rsa_trigger;
435         ctx->enc = 0;
436
437         return aspeed_acry_handle_queue(acry_dev, req);
438 }
439
440 static u8 *aspeed_rsa_key_copy(u8 *src, size_t len)
441 {
442         return kmemdup(src, len, GFP_KERNEL);
443 }
444
445 static int aspeed_rsa_set_n(struct aspeed_acry_ctx *ctx, u8 *value,
446                             size_t len)
447 {
448         ctx->n_sz = len;
449         ctx->n = aspeed_rsa_key_copy(value, len);
450         if (!ctx->n)
451                 return -ENOMEM;
452
453         return 0;
454 }
455
456 static int aspeed_rsa_set_e(struct aspeed_acry_ctx *ctx, u8 *value,
457                             size_t len)
458 {
459         ctx->e_sz = len;
460         ctx->e = aspeed_rsa_key_copy(value, len);
461         if (!ctx->e)
462                 return -ENOMEM;
463
464         return 0;
465 }
466
467 static int aspeed_rsa_set_d(struct aspeed_acry_ctx *ctx, u8 *value,
468                             size_t len)
469 {
470         ctx->d_sz = len;
471         ctx->d = aspeed_rsa_key_copy(value, len);
472         if (!ctx->d)
473                 return -ENOMEM;
474
475         return 0;
476 }
477
478 static void aspeed_rsa_key_free(struct aspeed_acry_ctx *ctx)
479 {
480         kfree_sensitive(ctx->n);
481         kfree_sensitive(ctx->e);
482         kfree_sensitive(ctx->d);
483         ctx->n_sz = 0;
484         ctx->e_sz = 0;
485         ctx->d_sz = 0;
486 }
487
488 static int aspeed_acry_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
489                                   unsigned int keylen, int priv)
490 {
491         struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
492         struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
493         int ret;
494
495         if (priv)
496                 ret = rsa_parse_priv_key(&ctx->key, key, keylen);
497         else
498                 ret = rsa_parse_pub_key(&ctx->key, key, keylen);
499
500         if (ret) {
501                 dev_err(acry_dev->dev, "rsa parse key failed, ret:0x%x\n",
502                         ret);
503                 return ret;
504         }
505
506         /* Aspeed engine supports up to 4096 bits,
507          * Use software fallback instead.
508          */
509         if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN)
510                 return 0;
511
512         ret = aspeed_rsa_set_n(ctx, (u8 *)ctx->key.n, ctx->key.n_sz);
513         if (ret)
514                 goto err;
515
516         ret = aspeed_rsa_set_e(ctx, (u8 *)ctx->key.e, ctx->key.e_sz);
517         if (ret)
518                 goto err;
519
520         if (priv) {
521                 ret = aspeed_rsa_set_d(ctx, (u8 *)ctx->key.d, ctx->key.d_sz);
522                 if (ret)
523                         goto err;
524         }
525
526         return 0;
527
528 err:
529         dev_err(acry_dev->dev, "rsa set key failed\n");
530         aspeed_rsa_key_free(ctx);
531
532         return ret;
533 }
534
535 static int aspeed_acry_rsa_set_pub_key(struct crypto_akcipher *tfm,
536                                        const void *key,
537                                        unsigned int keylen)
538 {
539         struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
540         int ret;
541
542         ret = crypto_akcipher_set_pub_key(ctx->fallback_tfm, key, keylen);
543         if (ret)
544                 return ret;
545
546         return aspeed_acry_rsa_setkey(tfm, key, keylen, 0);
547 }
548
549 static int aspeed_acry_rsa_set_priv_key(struct crypto_akcipher *tfm,
550                                         const void *key,
551                                         unsigned int keylen)
552 {
553         struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
554         int ret;
555
556         ret = crypto_akcipher_set_priv_key(ctx->fallback_tfm, key, keylen);
557         if (ret)
558                 return ret;
559
560         return aspeed_acry_rsa_setkey(tfm, key, keylen, 1);
561 }
562
563 static unsigned int aspeed_acry_rsa_max_size(struct crypto_akcipher *tfm)
564 {
565         struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
566
567         if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN)
568                 return crypto_akcipher_maxsize(ctx->fallback_tfm);
569
570         return ctx->n_sz;
571 }
572
573 static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm)
574 {
575         struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
576         struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
577         const char *name = crypto_tfm_alg_name(&tfm->base);
578         struct aspeed_acry_alg *acry_alg;
579
580         acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher.base);
581
582         ctx->acry_dev = acry_alg->acry_dev;
583
584         ctx->fallback_tfm = crypto_alloc_akcipher(name, 0, CRYPTO_ALG_ASYNC |
585                                                   CRYPTO_ALG_NEED_FALLBACK);
586         if (IS_ERR(ctx->fallback_tfm)) {
587                 dev_err(ctx->acry_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
588                         name, PTR_ERR(ctx->fallback_tfm));
589                 return PTR_ERR(ctx->fallback_tfm);
590         }
591
592         return 0;
593 }
594
595 static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher *tfm)
596 {
597         struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
598
599         crypto_free_akcipher(ctx->fallback_tfm);
600 }
601
602 static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
603         {
604                 .akcipher.base = {
605                         .encrypt = aspeed_acry_rsa_enc,
606                         .decrypt = aspeed_acry_rsa_dec,
607                         .sign = aspeed_acry_rsa_dec,
608                         .verify = aspeed_acry_rsa_enc,
609                         .set_pub_key = aspeed_acry_rsa_set_pub_key,
610                         .set_priv_key = aspeed_acry_rsa_set_priv_key,
611                         .max_size = aspeed_acry_rsa_max_size,
612                         .init = aspeed_acry_rsa_init_tfm,
613                         .exit = aspeed_acry_rsa_exit_tfm,
614                         .base = {
615                                 .cra_name = "rsa",
616                                 .cra_driver_name = "aspeed-rsa",
617                                 .cra_priority = 300,
618                                 .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER |
619                                              CRYPTO_ALG_ASYNC |
620                                              CRYPTO_ALG_KERN_DRIVER_ONLY |
621                                              CRYPTO_ALG_NEED_FALLBACK,
622                                 .cra_module = THIS_MODULE,
623                                 .cra_ctxsize = sizeof(struct aspeed_acry_ctx),
624                         },
625                 },
626                 .akcipher.op = {
627                         .do_one_request = aspeed_acry_do_request,
628                 },
629         },
630 };
631
632 static void aspeed_acry_register(struct aspeed_acry_dev *acry_dev)
633 {
634         int i, rc;
635
636         for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) {
637                 aspeed_acry_akcipher_algs[i].acry_dev = acry_dev;
638                 rc = crypto_engine_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
639                 if (rc) {
640                         ACRY_DBG(acry_dev, "Failed to register %s\n",
641                                  aspeed_acry_akcipher_algs[i].akcipher.base.base.cra_name);
642                 }
643         }
644 }
645
646 static void aspeed_acry_unregister(struct aspeed_acry_dev *acry_dev)
647 {
648         int i;
649
650         for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++)
651                 crypto_engine_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
652 }
653
654 /* ACRY interrupt service routine. */
655 static irqreturn_t aspeed_acry_irq(int irq, void *dev)
656 {
657         struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)dev;
658         u32 sts;
659
660         sts = ast_acry_read(acry_dev, ASPEED_ACRY_STATUS);
661         ast_acry_write(acry_dev, sts, ASPEED_ACRY_STATUS);
662
663         ACRY_DBG(acry_dev, "irq sts:0x%x\n", sts);
664
665         if (sts & ACRY_RSA_ISR) {
666                 /* Stop RSA engine */
667                 ast_acry_write(acry_dev, 0, ASPEED_ACRY_TRIGGER);
668
669                 if (acry_dev->flags & CRYPTO_FLAGS_BUSY)
670                         tasklet_schedule(&acry_dev->done_task);
671                 else
672                         dev_err(acry_dev->dev, "RSA no active requests.\n");
673         }
674
675         return IRQ_HANDLED;
676 }
677
678 /*
679  * ACRY SRAM has its own memory layout.
680  * Set the DRAM to SRAM indexing for future used.
681  */
682 static void aspeed_acry_sram_mapping(struct aspeed_acry_dev *acry_dev)
683 {
684         int i, j = 0;
685
686         for (i = 0; i < (ASPEED_ACRY_SRAM_MAX_LEN / BYTES_PER_DWORD); i++) {
687                 acry_dev->exp_dw_mapping[i] = j;
688                 acry_dev->mod_dw_mapping[i] = j + 4;
689                 acry_dev->data_byte_mapping[(i * 4)] = (j + 8) * 4;
690                 acry_dev->data_byte_mapping[(i * 4) + 1] = (j + 8) * 4 + 1;
691                 acry_dev->data_byte_mapping[(i * 4) + 2] = (j + 8) * 4 + 2;
692                 acry_dev->data_byte_mapping[(i * 4) + 3] = (j + 8) * 4 + 3;
693                 j++;
694                 j = j % 4 ? j : j + 8;
695         }
696 }
697
698 static void aspeed_acry_done_task(unsigned long data)
699 {
700         struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)data;
701
702         (void)acry_dev->resume(acry_dev);
703 }
704
705 static const struct of_device_id aspeed_acry_of_matches[] = {
706         { .compatible = "aspeed,ast2600-acry", },
707         {},
708 };
709
710 static int aspeed_acry_probe(struct platform_device *pdev)
711 {
712         struct aspeed_acry_dev *acry_dev;
713         struct device *dev = &pdev->dev;
714         int rc;
715
716         acry_dev = devm_kzalloc(dev, sizeof(struct aspeed_acry_dev),
717                                 GFP_KERNEL);
718         if (!acry_dev)
719                 return -ENOMEM;
720
721         acry_dev->dev = dev;
722
723         platform_set_drvdata(pdev, acry_dev);
724
725         acry_dev->regs = devm_platform_ioremap_resource(pdev, 0);
726         if (IS_ERR(acry_dev->regs))
727                 return PTR_ERR(acry_dev->regs);
728
729         acry_dev->acry_sram = devm_platform_ioremap_resource(pdev, 1);
730         if (IS_ERR(acry_dev->acry_sram))
731                 return PTR_ERR(acry_dev->acry_sram);
732
733         /* Get irq number and register it */
734         acry_dev->irq = platform_get_irq(pdev, 0);
735         if (acry_dev->irq < 0)
736                 return -ENXIO;
737
738         rc = devm_request_irq(dev, acry_dev->irq, aspeed_acry_irq, 0,
739                               dev_name(dev), acry_dev);
740         if (rc) {
741                 dev_err(dev, "Failed to request irq.\n");
742                 return rc;
743         }
744
745         acry_dev->clk = devm_clk_get_enabled(dev, NULL);
746         if (IS_ERR(acry_dev->clk)) {
747                 dev_err(dev, "Failed to get acry clk\n");
748                 return PTR_ERR(acry_dev->clk);
749         }
750
751         acry_dev->ahbc = syscon_regmap_lookup_by_phandle(dev->of_node,
752                                                          "aspeed,ahbc");
753         if (IS_ERR(acry_dev->ahbc)) {
754                 dev_err(dev, "Failed to get AHBC regmap\n");
755                 return -ENODEV;
756         }
757
758         /* Initialize crypto hardware engine structure for RSA */
759         acry_dev->crypt_engine_rsa = crypto_engine_alloc_init(dev, true);
760         if (!acry_dev->crypt_engine_rsa) {
761                 rc = -ENOMEM;
762                 goto clk_exit;
763         }
764
765         rc = crypto_engine_start(acry_dev->crypt_engine_rsa);
766         if (rc)
767                 goto err_engine_rsa_start;
768
769         tasklet_init(&acry_dev->done_task, aspeed_acry_done_task,
770                      (unsigned long)acry_dev);
771
772         /* Set Data Memory to AHB(CPU) Access Mode */
773         ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD);
774
775         /* Initialize ACRY SRAM index */
776         aspeed_acry_sram_mapping(acry_dev);
777
778         acry_dev->buf_addr = dmam_alloc_coherent(dev, ASPEED_ACRY_BUFF_SIZE,
779                                                  &acry_dev->buf_dma_addr,
780                                                  GFP_KERNEL);
781         if (!acry_dev->buf_addr) {
782                 rc = -ENOMEM;
783                 goto err_engine_rsa_start;
784         }
785
786         aspeed_acry_register(acry_dev);
787
788         dev_info(dev, "Aspeed ACRY Accelerator successfully registered\n");
789
790         return 0;
791
792 err_engine_rsa_start:
793         crypto_engine_exit(acry_dev->crypt_engine_rsa);
794 clk_exit:
795         clk_disable_unprepare(acry_dev->clk);
796
797         return rc;
798 }
799
800 static int aspeed_acry_remove(struct platform_device *pdev)
801 {
802         struct aspeed_acry_dev *acry_dev = platform_get_drvdata(pdev);
803
804         aspeed_acry_unregister(acry_dev);
805         crypto_engine_exit(acry_dev->crypt_engine_rsa);
806         tasklet_kill(&acry_dev->done_task);
807         clk_disable_unprepare(acry_dev->clk);
808
809         return 0;
810 }
811
812 MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
813
814 static struct platform_driver aspeed_acry_driver = {
815         .probe          = aspeed_acry_probe,
816         .remove         = aspeed_acry_remove,
817         .driver         = {
818                 .name   = KBUILD_MODNAME,
819                 .of_match_table = aspeed_acry_of_matches,
820         },
821 };
822
823 module_platform_driver(aspeed_acry_driver);
824
825 MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
826 MODULE_DESCRIPTION("ASPEED ACRY driver for hardware RSA Engine");
827 MODULE_LICENSE("GPL");