crypto: sl3516 - Use new crypto_engine_op interface
[platform/kernel/linux-starfive.git] / drivers / crypto / gemini / sl3516-ce-core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sl3516-ce-core.c - hardware cryptographic offloader for Storlink SL3516 SoC
4  *
5  * Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com>
6  *
7  * Core file which registers crypto algorithms supported by the CryptoEngine
8  */
9
10 #include <crypto/engine.h>
11 #include <crypto/internal/rng.h>
12 #include <crypto/internal/skcipher.h>
13 #include <linux/clk.h>
14 #include <linux/debugfs.h>
15 #include <linux/dev_printk.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/err.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/irq.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_device.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
28
29 #include "sl3516-ce.h"
30
31 static int sl3516_ce_desc_init(struct sl3516_ce_dev *ce)
32 {
33         const size_t sz = sizeof(struct descriptor) * MAXDESC;
34         int i;
35
36         ce->tx = dma_alloc_coherent(ce->dev, sz, &ce->dtx, GFP_KERNEL);
37         if (!ce->tx)
38                 return -ENOMEM;
39         ce->rx = dma_alloc_coherent(ce->dev, sz, &ce->drx, GFP_KERNEL);
40         if (!ce->rx)
41                 goto err_rx;
42
43         for (i = 0; i < MAXDESC; i++) {
44                 ce->tx[i].frame_ctrl.bits.own = CE_CPU;
45                 ce->tx[i].next_desc.next_descriptor = ce->dtx + (i + 1) * sizeof(struct descriptor);
46         }
47         ce->tx[MAXDESC - 1].next_desc.next_descriptor = ce->dtx;
48
49         for (i = 0; i < MAXDESC; i++) {
50                 ce->rx[i].frame_ctrl.bits.own = CE_CPU;
51                 ce->rx[i].next_desc.next_descriptor = ce->drx + (i + 1) * sizeof(struct descriptor);
52         }
53         ce->rx[MAXDESC - 1].next_desc.next_descriptor = ce->drx;
54
55         ce->pctrl = dma_alloc_coherent(ce->dev, sizeof(struct pkt_control_ecb),
56                                        &ce->dctrl, GFP_KERNEL);
57         if (!ce->pctrl)
58                 goto err_pctrl;
59
60         return 0;
61 err_pctrl:
62         dma_free_coherent(ce->dev, sz, ce->rx, ce->drx);
63 err_rx:
64         dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx);
65         return -ENOMEM;
66 }
67
68 static void sl3516_ce_free_descs(struct sl3516_ce_dev *ce)
69 {
70         const size_t sz = sizeof(struct descriptor) * MAXDESC;
71
72         dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx);
73         dma_free_coherent(ce->dev, sz, ce->rx, ce->drx);
74         dma_free_coherent(ce->dev, sizeof(struct pkt_control_ecb), ce->pctrl,
75                           ce->dctrl);
76 }
77
78 static void start_dma_tx(struct sl3516_ce_dev *ce)
79 {
80         u32 v;
81
82         v = TXDMA_CTRL_START | TXDMA_CTRL_CHAIN_MODE | TXDMA_CTRL_CONTINUE | \
83                 TXDMA_CTRL_INT_FAIL | TXDMA_CTRL_INT_PERR | TXDMA_CTRL_BURST_UNK;
84
85         writel(v, ce->base + IPSEC_TXDMA_CTRL);
86 }
87
88 static void start_dma_rx(struct sl3516_ce_dev *ce)
89 {
90         u32 v;
91
92         v = RXDMA_CTRL_START | RXDMA_CTRL_CHAIN_MODE | RXDMA_CTRL_CONTINUE | \
93                 RXDMA_CTRL_BURST_UNK | RXDMA_CTRL_INT_FINISH | \
94                 RXDMA_CTRL_INT_FAIL | RXDMA_CTRL_INT_PERR | \
95                 RXDMA_CTRL_INT_EOD | RXDMA_CTRL_INT_EOF;
96
97         writel(v, ce->base + IPSEC_RXDMA_CTRL);
98 }
99
100 static struct descriptor *get_desc_tx(struct sl3516_ce_dev *ce)
101 {
102         struct descriptor *dd;
103
104         dd = &ce->tx[ce->ctx];
105         ce->ctx++;
106         if (ce->ctx >= MAXDESC)
107                 ce->ctx = 0;
108         return dd;
109 }
110
111 static struct descriptor *get_desc_rx(struct sl3516_ce_dev *ce)
112 {
113         struct descriptor *rdd;
114
115         rdd = &ce->rx[ce->crx];
116         ce->crx++;
117         if (ce->crx >= MAXDESC)
118                 ce->crx = 0;
119         return rdd;
120 }
121
122 int sl3516_ce_run_task(struct sl3516_ce_dev *ce, struct sl3516_ce_cipher_req_ctx *rctx,
123                        const char *name)
124 {
125         struct descriptor *dd, *rdd = NULL;
126         u32 v;
127         int i, err = 0;
128
129         ce->stat_req++;
130
131         reinit_completion(&ce->complete);
132         ce->status = 0;
133
134         for (i = 0; i < rctx->nr_sgd; i++) {
135                 dev_dbg(ce->dev, "%s handle DST SG %d/%d len=%d\n", __func__,
136                         i, rctx->nr_sgd, rctx->t_dst[i].len);
137                 rdd = get_desc_rx(ce);
138                 rdd->buf_adr = rctx->t_dst[i].addr;
139                 rdd->frame_ctrl.bits.buffer_size = rctx->t_dst[i].len;
140                 rdd->frame_ctrl.bits.own = CE_DMA;
141         }
142         rdd->next_desc.bits.eofie = 1;
143
144         for (i = 0; i < rctx->nr_sgs; i++) {
145                 dev_dbg(ce->dev, "%s handle SRC SG %d/%d len=%d\n", __func__,
146                         i, rctx->nr_sgs, rctx->t_src[i].len);
147                 rctx->h->algorithm_len = rctx->t_src[i].len;
148
149                 dd = get_desc_tx(ce);
150                 dd->frame_ctrl.raw = 0;
151                 dd->flag_status.raw = 0;
152                 dd->frame_ctrl.bits.buffer_size = rctx->pctrllen;
153                 dd->buf_adr = ce->dctrl;
154                 dd->flag_status.tx_flag.tqflag = rctx->tqflag;
155                 dd->next_desc.bits.eofie = 0;
156                 dd->next_desc.bits.dec = 0;
157                 dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST;
158                 dd->frame_ctrl.bits.own = CE_DMA;
159
160                 dd = get_desc_tx(ce);
161                 dd->frame_ctrl.raw = 0;
162                 dd->flag_status.raw = 0;
163                 dd->frame_ctrl.bits.buffer_size = rctx->t_src[i].len;
164                 dd->buf_adr = rctx->t_src[i].addr;
165                 dd->flag_status.tx_flag.tqflag = 0;
166                 dd->next_desc.bits.eofie = 0;
167                 dd->next_desc.bits.dec = 0;
168                 dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST;
169                 dd->frame_ctrl.bits.own = CE_DMA;
170                 start_dma_tx(ce);
171                 start_dma_rx(ce);
172         }
173         wait_for_completion_interruptible_timeout(&ce->complete,
174                                                   msecs_to_jiffies(5000));
175         if (ce->status == 0) {
176                 dev_err(ce->dev, "DMA timeout for %s\n", name);
177                 err = -EFAULT;
178         }
179         v = readl(ce->base + IPSEC_STATUS_REG);
180         if (v & 0xFFF) {
181                 dev_err(ce->dev, "IPSEC_STATUS_REG %x\n", v);
182                 err = -EFAULT;
183         }
184
185         return err;
186 }
187
188 static irqreturn_t ce_irq_handler(int irq, void *data)
189 {
190         struct sl3516_ce_dev *ce = (struct sl3516_ce_dev *)data;
191         u32 v;
192
193         ce->stat_irq++;
194
195         v = readl(ce->base + IPSEC_DMA_STATUS);
196         writel(v, ce->base + IPSEC_DMA_STATUS);
197
198         if (v & DMA_STATUS_TS_DERR)
199                 dev_err(ce->dev, "AHB bus Error While Tx !!!\n");
200         if (v & DMA_STATUS_TS_PERR)
201                 dev_err(ce->dev, "Tx Descriptor Protocol Error !!!\n");
202         if (v & DMA_STATUS_RS_DERR)
203                 dev_err(ce->dev, "AHB bus Error While Rx !!!\n");
204         if (v & DMA_STATUS_RS_PERR)
205                 dev_err(ce->dev, "Rx Descriptor Protocol Error !!!\n");
206
207         if (v & DMA_STATUS_TS_EOFI)
208                 ce->stat_irq_tx++;
209         if (v & DMA_STATUS_RS_EOFI) {
210                 ce->status = 1;
211                 complete(&ce->complete);
212                 ce->stat_irq_rx++;
213                 return IRQ_HANDLED;
214         }
215
216         return IRQ_HANDLED;
217 }
218
219 static struct sl3516_ce_alg_template ce_algs[] = {
220 {
221         .type = CRYPTO_ALG_TYPE_SKCIPHER,
222         .mode = ECB_AES,
223         .alg.skcipher.base = {
224                 .base = {
225                         .cra_name = "ecb(aes)",
226                         .cra_driver_name = "ecb-aes-sl3516",
227                         .cra_priority = 400,
228                         .cra_blocksize = AES_BLOCK_SIZE,
229                         .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
230                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
231                         .cra_ctxsize = sizeof(struct sl3516_ce_cipher_tfm_ctx),
232                         .cra_module = THIS_MODULE,
233                         .cra_alignmask = 0xf,
234                         .cra_init = sl3516_ce_cipher_init,
235                         .cra_exit = sl3516_ce_cipher_exit,
236                 },
237                 .min_keysize    = AES_MIN_KEY_SIZE,
238                 .max_keysize    = AES_MAX_KEY_SIZE,
239                 .setkey         = sl3516_ce_aes_setkey,
240                 .encrypt        = sl3516_ce_skencrypt,
241                 .decrypt        = sl3516_ce_skdecrypt,
242         },
243         .alg.skcipher.op = {
244                 .do_one_request = sl3516_ce_handle_cipher_request,
245         },
246 },
247 };
248
249 static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
250 {
251         struct sl3516_ce_dev *ce = seq->private;
252         unsigned int i;
253
254         seq_printf(seq, "HWRNG %lu %lu\n",
255                    ce->hwrng_stat_req, ce->hwrng_stat_bytes);
256         seq_printf(seq, "IRQ %lu\n", ce->stat_irq);
257         seq_printf(seq, "IRQ TX %lu\n", ce->stat_irq_tx);
258         seq_printf(seq, "IRQ RX %lu\n", ce->stat_irq_rx);
259         seq_printf(seq, "nreq %lu\n", ce->stat_req);
260         seq_printf(seq, "fallback SG count TX %lu\n", ce->fallback_sg_count_tx);
261         seq_printf(seq, "fallback SG count RX %lu\n", ce->fallback_sg_count_rx);
262         seq_printf(seq, "fallback modulo16 %lu\n", ce->fallback_mod16);
263         seq_printf(seq, "fallback align16 %lu\n", ce->fallback_align16);
264         seq_printf(seq, "fallback not same len %lu\n", ce->fallback_not_same_len);
265
266         for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
267                 if (!ce_algs[i].ce)
268                         continue;
269                 switch (ce_algs[i].type) {
270                 case CRYPTO_ALG_TYPE_SKCIPHER:
271                         seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
272                                    ce_algs[i].alg.skcipher.base.base.cra_driver_name,
273                                    ce_algs[i].alg.skcipher.base.base.cra_name,
274                                    ce_algs[i].stat_req, ce_algs[i].stat_fb);
275                         break;
276                 }
277         }
278         return 0;
279 }
280
281 DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs);
282
283 static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
284 {
285         int err;
286         unsigned int i;
287
288         for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
289                 ce_algs[i].ce = ce;
290                 switch (ce_algs[i].type) {
291                 case CRYPTO_ALG_TYPE_SKCIPHER:
292                         dev_info(ce->dev, "DEBUG: Register %s\n",
293                                  ce_algs[i].alg.skcipher.base.base.cra_name);
294                         err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher);
295                         if (err) {
296                                 dev_err(ce->dev, "Fail to register %s\n",
297                                         ce_algs[i].alg.skcipher.base.base.cra_name);
298                                 ce_algs[i].ce = NULL;
299                                 return err;
300                         }
301                         break;
302                 default:
303                         ce_algs[i].ce = NULL;
304                         dev_err(ce->dev, "ERROR: tried to register an unknown algo\n");
305                 }
306         }
307         return 0;
308 }
309
310 static void sl3516_ce_unregister_algs(struct sl3516_ce_dev *ce)
311 {
312         unsigned int i;
313
314         for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
315                 if (!ce_algs[i].ce)
316                         continue;
317                 switch (ce_algs[i].type) {
318                 case CRYPTO_ALG_TYPE_SKCIPHER:
319                         dev_info(ce->dev, "Unregister %d %s\n", i,
320                                  ce_algs[i].alg.skcipher.base.base.cra_name);
321                         crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher);
322                         break;
323                 }
324         }
325 }
326
327 static void sl3516_ce_start(struct sl3516_ce_dev *ce)
328 {
329         ce->ctx = 0;
330         ce->crx = 0;
331         writel(ce->dtx, ce->base + IPSEC_TXDMA_CURR_DESC);
332         writel(ce->drx, ce->base + IPSEC_RXDMA_CURR_DESC);
333         writel(0, ce->base + IPSEC_DMA_STATUS);
334 }
335
336 /*
337  * Power management strategy: The device is suspended unless a TFM exists for
338  * one of the algorithms proposed by this driver.
339  */
340 static int sl3516_ce_pm_suspend(struct device *dev)
341 {
342         struct sl3516_ce_dev *ce = dev_get_drvdata(dev);
343
344         reset_control_assert(ce->reset);
345         clk_disable_unprepare(ce->clks);
346         return 0;
347 }
348
349 static int sl3516_ce_pm_resume(struct device *dev)
350 {
351         struct sl3516_ce_dev *ce = dev_get_drvdata(dev);
352         int err;
353
354         err = clk_prepare_enable(ce->clks);
355         if (err) {
356                 dev_err(ce->dev, "Cannot prepare_enable\n");
357                 goto error;
358         }
359         err = reset_control_deassert(ce->reset);
360         if (err) {
361                 dev_err(ce->dev, "Cannot deassert reset control\n");
362                 goto error;
363         }
364
365         sl3516_ce_start(ce);
366
367         return 0;
368 error:
369         sl3516_ce_pm_suspend(dev);
370         return err;
371 }
372
373 static const struct dev_pm_ops sl3516_ce_pm_ops = {
374         SET_RUNTIME_PM_OPS(sl3516_ce_pm_suspend, sl3516_ce_pm_resume, NULL)
375 };
376
377 static int sl3516_ce_pm_init(struct sl3516_ce_dev *ce)
378 {
379         int err;
380
381         pm_runtime_use_autosuspend(ce->dev);
382         pm_runtime_set_autosuspend_delay(ce->dev, 2000);
383
384         err = pm_runtime_set_suspended(ce->dev);
385         if (err)
386                 return err;
387         pm_runtime_enable(ce->dev);
388         return err;
389 }
390
391 static void sl3516_ce_pm_exit(struct sl3516_ce_dev *ce)
392 {
393         pm_runtime_disable(ce->dev);
394 }
395
396 static int sl3516_ce_probe(struct platform_device *pdev)
397 {
398         struct sl3516_ce_dev *ce;
399         int err, irq;
400         u32 v;
401
402         ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL);
403         if (!ce)
404                 return -ENOMEM;
405
406         ce->dev = &pdev->dev;
407         platform_set_drvdata(pdev, ce);
408
409         ce->base = devm_platform_ioremap_resource(pdev, 0);
410         if (IS_ERR(ce->base))
411                 return PTR_ERR(ce->base);
412
413         irq = platform_get_irq(pdev, 0);
414         if (irq < 0)
415                 return irq;
416
417         err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0, "crypto", ce);
418         if (err) {
419                 dev_err(ce->dev, "Cannot request Crypto Engine IRQ (err=%d)\n", err);
420                 return err;
421         }
422
423         ce->reset = devm_reset_control_get(&pdev->dev, NULL);
424         if (IS_ERR(ce->reset))
425                 return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset),
426                                      "No reset control found\n");
427         ce->clks = devm_clk_get(ce->dev, NULL);
428         if (IS_ERR(ce->clks)) {
429                 err = PTR_ERR(ce->clks);
430                 dev_err(ce->dev, "Cannot get clock err=%d\n", err);
431                 return err;
432         }
433
434         err = sl3516_ce_desc_init(ce);
435         if (err)
436                 return err;
437
438         err = sl3516_ce_pm_init(ce);
439         if (err)
440                 goto error_pm;
441
442         init_completion(&ce->complete);
443
444         ce->engine = crypto_engine_alloc_init(ce->dev, true);
445         if (!ce->engine) {
446                 dev_err(ce->dev, "Cannot allocate engine\n");
447                 err = -ENOMEM;
448                 goto error_engine;
449         }
450
451         err = crypto_engine_start(ce->engine);
452         if (err) {
453                 dev_err(ce->dev, "Cannot start engine\n");
454                 goto error_engine;
455         }
456
457         err = sl3516_ce_register_algs(ce);
458         if (err)
459                 goto error_alg;
460
461         err = sl3516_ce_rng_register(ce);
462         if (err)
463                 goto error_rng;
464
465         err = pm_runtime_resume_and_get(ce->dev);
466         if (err < 0)
467                 goto error_pmuse;
468
469         v = readl(ce->base + IPSEC_ID);
470         dev_info(ce->dev, "SL3516 dev %lx rev %lx\n",
471                  v & GENMASK(31, 4),
472                  v & GENMASK(3, 0));
473         v = readl(ce->base + IPSEC_DMA_DEVICE_ID);
474         dev_info(ce->dev, "SL3516 DMA dev %lx rev %lx\n",
475                  v & GENMASK(15, 4),
476                  v & GENMASK(3, 0));
477
478         pm_runtime_put_sync(ce->dev);
479
480         if (IS_ENABLED(CONFIG_CRYPTO_DEV_SL3516_DEBUG)) {
481                 struct dentry *dbgfs_dir __maybe_unused;
482                 struct dentry *dbgfs_stats __maybe_unused;
483
484                 /* Ignore error of debugfs */
485                 dbgfs_dir = debugfs_create_dir("sl3516", NULL);
486                 dbgfs_stats = debugfs_create_file("stats", 0444,
487                                                   dbgfs_dir, ce,
488                                                   &sl3516_ce_debugfs_fops);
489 #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
490                 ce->dbgfs_dir = dbgfs_dir;
491                 ce->dbgfs_stats = dbgfs_stats;
492 #endif
493         }
494
495         return 0;
496 error_pmuse:
497         sl3516_ce_rng_unregister(ce);
498 error_rng:
499         sl3516_ce_unregister_algs(ce);
500 error_alg:
501         crypto_engine_exit(ce->engine);
502 error_engine:
503         sl3516_ce_pm_exit(ce);
504 error_pm:
505         sl3516_ce_free_descs(ce);
506         return err;
507 }
508
509 static int sl3516_ce_remove(struct platform_device *pdev)
510 {
511         struct sl3516_ce_dev *ce = platform_get_drvdata(pdev);
512
513         sl3516_ce_rng_unregister(ce);
514         sl3516_ce_unregister_algs(ce);
515         crypto_engine_exit(ce->engine);
516         sl3516_ce_pm_exit(ce);
517         sl3516_ce_free_descs(ce);
518
519 #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
520         debugfs_remove_recursive(ce->dbgfs_dir);
521 #endif
522
523         return 0;
524 }
525
526 static const struct of_device_id sl3516_ce_crypto_of_match_table[] = {
527         { .compatible = "cortina,sl3516-crypto"},
528         {}
529 };
530 MODULE_DEVICE_TABLE(of, sl3516_ce_crypto_of_match_table);
531
532 static struct platform_driver sl3516_ce_driver = {
533         .probe           = sl3516_ce_probe,
534         .remove          = sl3516_ce_remove,
535         .driver          = {
536                 .name           = "sl3516-crypto",
537                 .pm             = &sl3516_ce_pm_ops,
538                 .of_match_table = sl3516_ce_crypto_of_match_table,
539         },
540 };
541
542 module_platform_driver(sl3516_ce_driver);
543
544 MODULE_DESCRIPTION("SL3516 cryptographic offloader");
545 MODULE_LICENSE("GPL");
546 MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");