1 // SPDX-License-Identifier: GPL-2.0
3 * sl3516-ce-core.c - hardware cryptographic offloader for Storlink SL3516 SoC
5 * Copyright (C) 2021 Corentin Labbe <clabbe@baylibre.com>
7 * Core file which registers crypto algorithms supported by the CryptoEngine
10 #include <crypto/engine.h>
11 #include <crypto/internal/rng.h>
12 #include <crypto/internal/skcipher.h>
13 #include <linux/clk.h>
14 #include <linux/debugfs.h>
15 #include <linux/dev_printk.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/err.h>
18 #include <linux/interrupt.h>
20 #include <linux/irq.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
24 #include <linux/of_device.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
29 #include "sl3516-ce.h"
31 static int sl3516_ce_desc_init(struct sl3516_ce_dev *ce)
33 const size_t sz = sizeof(struct descriptor) * MAXDESC;
36 ce->tx = dma_alloc_coherent(ce->dev, sz, &ce->dtx, GFP_KERNEL);
39 ce->rx = dma_alloc_coherent(ce->dev, sz, &ce->drx, GFP_KERNEL);
43 for (i = 0; i < MAXDESC; i++) {
44 ce->tx[i].frame_ctrl.bits.own = CE_CPU;
45 ce->tx[i].next_desc.next_descriptor = ce->dtx + (i + 1) * sizeof(struct descriptor);
47 ce->tx[MAXDESC - 1].next_desc.next_descriptor = ce->dtx;
49 for (i = 0; i < MAXDESC; i++) {
50 ce->rx[i].frame_ctrl.bits.own = CE_CPU;
51 ce->rx[i].next_desc.next_descriptor = ce->drx + (i + 1) * sizeof(struct descriptor);
53 ce->rx[MAXDESC - 1].next_desc.next_descriptor = ce->drx;
55 ce->pctrl = dma_alloc_coherent(ce->dev, sizeof(struct pkt_control_ecb),
56 &ce->dctrl, GFP_KERNEL);
62 dma_free_coherent(ce->dev, sz, ce->rx, ce->drx);
64 dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx);
68 static void sl3516_ce_free_descs(struct sl3516_ce_dev *ce)
70 const size_t sz = sizeof(struct descriptor) * MAXDESC;
72 dma_free_coherent(ce->dev, sz, ce->tx, ce->dtx);
73 dma_free_coherent(ce->dev, sz, ce->rx, ce->drx);
74 dma_free_coherent(ce->dev, sizeof(struct pkt_control_ecb), ce->pctrl,
78 static void start_dma_tx(struct sl3516_ce_dev *ce)
82 v = TXDMA_CTRL_START | TXDMA_CTRL_CHAIN_MODE | TXDMA_CTRL_CONTINUE | \
83 TXDMA_CTRL_INT_FAIL | TXDMA_CTRL_INT_PERR | TXDMA_CTRL_BURST_UNK;
85 writel(v, ce->base + IPSEC_TXDMA_CTRL);
88 static void start_dma_rx(struct sl3516_ce_dev *ce)
92 v = RXDMA_CTRL_START | RXDMA_CTRL_CHAIN_MODE | RXDMA_CTRL_CONTINUE | \
93 RXDMA_CTRL_BURST_UNK | RXDMA_CTRL_INT_FINISH | \
94 RXDMA_CTRL_INT_FAIL | RXDMA_CTRL_INT_PERR | \
95 RXDMA_CTRL_INT_EOD | RXDMA_CTRL_INT_EOF;
97 writel(v, ce->base + IPSEC_RXDMA_CTRL);
100 static struct descriptor *get_desc_tx(struct sl3516_ce_dev *ce)
102 struct descriptor *dd;
104 dd = &ce->tx[ce->ctx];
106 if (ce->ctx >= MAXDESC)
111 static struct descriptor *get_desc_rx(struct sl3516_ce_dev *ce)
113 struct descriptor *rdd;
115 rdd = &ce->rx[ce->crx];
117 if (ce->crx >= MAXDESC)
122 int sl3516_ce_run_task(struct sl3516_ce_dev *ce, struct sl3516_ce_cipher_req_ctx *rctx,
125 struct descriptor *dd, *rdd = NULL;
131 reinit_completion(&ce->complete);
134 for (i = 0; i < rctx->nr_sgd; i++) {
135 dev_dbg(ce->dev, "%s handle DST SG %d/%d len=%d\n", __func__,
136 i, rctx->nr_sgd, rctx->t_dst[i].len);
137 rdd = get_desc_rx(ce);
138 rdd->buf_adr = rctx->t_dst[i].addr;
139 rdd->frame_ctrl.bits.buffer_size = rctx->t_dst[i].len;
140 rdd->frame_ctrl.bits.own = CE_DMA;
142 rdd->next_desc.bits.eofie = 1;
144 for (i = 0; i < rctx->nr_sgs; i++) {
145 dev_dbg(ce->dev, "%s handle SRC SG %d/%d len=%d\n", __func__,
146 i, rctx->nr_sgs, rctx->t_src[i].len);
147 rctx->h->algorithm_len = rctx->t_src[i].len;
149 dd = get_desc_tx(ce);
150 dd->frame_ctrl.raw = 0;
151 dd->flag_status.raw = 0;
152 dd->frame_ctrl.bits.buffer_size = rctx->pctrllen;
153 dd->buf_adr = ce->dctrl;
154 dd->flag_status.tx_flag.tqflag = rctx->tqflag;
155 dd->next_desc.bits.eofie = 0;
156 dd->next_desc.bits.dec = 0;
157 dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST;
158 dd->frame_ctrl.bits.own = CE_DMA;
160 dd = get_desc_tx(ce);
161 dd->frame_ctrl.raw = 0;
162 dd->flag_status.raw = 0;
163 dd->frame_ctrl.bits.buffer_size = rctx->t_src[i].len;
164 dd->buf_adr = rctx->t_src[i].addr;
165 dd->flag_status.tx_flag.tqflag = 0;
166 dd->next_desc.bits.eofie = 0;
167 dd->next_desc.bits.dec = 0;
168 dd->next_desc.bits.sof_eof = DESC_FIRST | DESC_LAST;
169 dd->frame_ctrl.bits.own = CE_DMA;
173 wait_for_completion_interruptible_timeout(&ce->complete,
174 msecs_to_jiffies(5000));
175 if (ce->status == 0) {
176 dev_err(ce->dev, "DMA timeout for %s\n", name);
179 v = readl(ce->base + IPSEC_STATUS_REG);
181 dev_err(ce->dev, "IPSEC_STATUS_REG %x\n", v);
188 static irqreturn_t ce_irq_handler(int irq, void *data)
190 struct sl3516_ce_dev *ce = (struct sl3516_ce_dev *)data;
195 v = readl(ce->base + IPSEC_DMA_STATUS);
196 writel(v, ce->base + IPSEC_DMA_STATUS);
198 if (v & DMA_STATUS_TS_DERR)
199 dev_err(ce->dev, "AHB bus Error While Tx !!!\n");
200 if (v & DMA_STATUS_TS_PERR)
201 dev_err(ce->dev, "Tx Descriptor Protocol Error !!!\n");
202 if (v & DMA_STATUS_RS_DERR)
203 dev_err(ce->dev, "AHB bus Error While Rx !!!\n");
204 if (v & DMA_STATUS_RS_PERR)
205 dev_err(ce->dev, "Rx Descriptor Protocol Error !!!\n");
207 if (v & DMA_STATUS_TS_EOFI)
209 if (v & DMA_STATUS_RS_EOFI) {
211 complete(&ce->complete);
219 static struct sl3516_ce_alg_template ce_algs[] = {
221 .type = CRYPTO_ALG_TYPE_SKCIPHER,
223 .alg.skcipher.base = {
225 .cra_name = "ecb(aes)",
226 .cra_driver_name = "ecb-aes-sl3516",
228 .cra_blocksize = AES_BLOCK_SIZE,
229 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
230 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
231 .cra_ctxsize = sizeof(struct sl3516_ce_cipher_tfm_ctx),
232 .cra_module = THIS_MODULE,
233 .cra_alignmask = 0xf,
234 .cra_init = sl3516_ce_cipher_init,
235 .cra_exit = sl3516_ce_cipher_exit,
237 .min_keysize = AES_MIN_KEY_SIZE,
238 .max_keysize = AES_MAX_KEY_SIZE,
239 .setkey = sl3516_ce_aes_setkey,
240 .encrypt = sl3516_ce_skencrypt,
241 .decrypt = sl3516_ce_skdecrypt,
244 .do_one_request = sl3516_ce_handle_cipher_request,
249 static int sl3516_ce_debugfs_show(struct seq_file *seq, void *v)
251 struct sl3516_ce_dev *ce = seq->private;
254 seq_printf(seq, "HWRNG %lu %lu\n",
255 ce->hwrng_stat_req, ce->hwrng_stat_bytes);
256 seq_printf(seq, "IRQ %lu\n", ce->stat_irq);
257 seq_printf(seq, "IRQ TX %lu\n", ce->stat_irq_tx);
258 seq_printf(seq, "IRQ RX %lu\n", ce->stat_irq_rx);
259 seq_printf(seq, "nreq %lu\n", ce->stat_req);
260 seq_printf(seq, "fallback SG count TX %lu\n", ce->fallback_sg_count_tx);
261 seq_printf(seq, "fallback SG count RX %lu\n", ce->fallback_sg_count_rx);
262 seq_printf(seq, "fallback modulo16 %lu\n", ce->fallback_mod16);
263 seq_printf(seq, "fallback align16 %lu\n", ce->fallback_align16);
264 seq_printf(seq, "fallback not same len %lu\n", ce->fallback_not_same_len);
266 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
269 switch (ce_algs[i].type) {
270 case CRYPTO_ALG_TYPE_SKCIPHER:
271 seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n",
272 ce_algs[i].alg.skcipher.base.base.cra_driver_name,
273 ce_algs[i].alg.skcipher.base.base.cra_name,
274 ce_algs[i].stat_req, ce_algs[i].stat_fb);
281 DEFINE_SHOW_ATTRIBUTE(sl3516_ce_debugfs);
283 static int sl3516_ce_register_algs(struct sl3516_ce_dev *ce)
288 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
290 switch (ce_algs[i].type) {
291 case CRYPTO_ALG_TYPE_SKCIPHER:
292 dev_info(ce->dev, "DEBUG: Register %s\n",
293 ce_algs[i].alg.skcipher.base.base.cra_name);
294 err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher);
296 dev_err(ce->dev, "Fail to register %s\n",
297 ce_algs[i].alg.skcipher.base.base.cra_name);
298 ce_algs[i].ce = NULL;
303 ce_algs[i].ce = NULL;
304 dev_err(ce->dev, "ERROR: tried to register an unknown algo\n");
310 static void sl3516_ce_unregister_algs(struct sl3516_ce_dev *ce)
314 for (i = 0; i < ARRAY_SIZE(ce_algs); i++) {
317 switch (ce_algs[i].type) {
318 case CRYPTO_ALG_TYPE_SKCIPHER:
319 dev_info(ce->dev, "Unregister %d %s\n", i,
320 ce_algs[i].alg.skcipher.base.base.cra_name);
321 crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher);
327 static void sl3516_ce_start(struct sl3516_ce_dev *ce)
331 writel(ce->dtx, ce->base + IPSEC_TXDMA_CURR_DESC);
332 writel(ce->drx, ce->base + IPSEC_RXDMA_CURR_DESC);
333 writel(0, ce->base + IPSEC_DMA_STATUS);
337 * Power management strategy: The device is suspended unless a TFM exists for
338 * one of the algorithms proposed by this driver.
340 static int sl3516_ce_pm_suspend(struct device *dev)
342 struct sl3516_ce_dev *ce = dev_get_drvdata(dev);
344 reset_control_assert(ce->reset);
345 clk_disable_unprepare(ce->clks);
349 static int sl3516_ce_pm_resume(struct device *dev)
351 struct sl3516_ce_dev *ce = dev_get_drvdata(dev);
354 err = clk_prepare_enable(ce->clks);
356 dev_err(ce->dev, "Cannot prepare_enable\n");
359 err = reset_control_deassert(ce->reset);
361 dev_err(ce->dev, "Cannot deassert reset control\n");
369 sl3516_ce_pm_suspend(dev);
373 static const struct dev_pm_ops sl3516_ce_pm_ops = {
374 SET_RUNTIME_PM_OPS(sl3516_ce_pm_suspend, sl3516_ce_pm_resume, NULL)
377 static int sl3516_ce_pm_init(struct sl3516_ce_dev *ce)
381 pm_runtime_use_autosuspend(ce->dev);
382 pm_runtime_set_autosuspend_delay(ce->dev, 2000);
384 err = pm_runtime_set_suspended(ce->dev);
387 pm_runtime_enable(ce->dev);
391 static void sl3516_ce_pm_exit(struct sl3516_ce_dev *ce)
393 pm_runtime_disable(ce->dev);
396 static int sl3516_ce_probe(struct platform_device *pdev)
398 struct sl3516_ce_dev *ce;
402 ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL);
406 ce->dev = &pdev->dev;
407 platform_set_drvdata(pdev, ce);
409 ce->base = devm_platform_ioremap_resource(pdev, 0);
410 if (IS_ERR(ce->base))
411 return PTR_ERR(ce->base);
413 irq = platform_get_irq(pdev, 0);
417 err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0, "crypto", ce);
419 dev_err(ce->dev, "Cannot request Crypto Engine IRQ (err=%d)\n", err);
423 ce->reset = devm_reset_control_get(&pdev->dev, NULL);
424 if (IS_ERR(ce->reset))
425 return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset),
426 "No reset control found\n");
427 ce->clks = devm_clk_get(ce->dev, NULL);
428 if (IS_ERR(ce->clks)) {
429 err = PTR_ERR(ce->clks);
430 dev_err(ce->dev, "Cannot get clock err=%d\n", err);
434 err = sl3516_ce_desc_init(ce);
438 err = sl3516_ce_pm_init(ce);
442 init_completion(&ce->complete);
444 ce->engine = crypto_engine_alloc_init(ce->dev, true);
446 dev_err(ce->dev, "Cannot allocate engine\n");
451 err = crypto_engine_start(ce->engine);
453 dev_err(ce->dev, "Cannot start engine\n");
457 err = sl3516_ce_register_algs(ce);
461 err = sl3516_ce_rng_register(ce);
465 err = pm_runtime_resume_and_get(ce->dev);
469 v = readl(ce->base + IPSEC_ID);
470 dev_info(ce->dev, "SL3516 dev %lx rev %lx\n",
473 v = readl(ce->base + IPSEC_DMA_DEVICE_ID);
474 dev_info(ce->dev, "SL3516 DMA dev %lx rev %lx\n",
478 pm_runtime_put_sync(ce->dev);
480 if (IS_ENABLED(CONFIG_CRYPTO_DEV_SL3516_DEBUG)) {
481 struct dentry *dbgfs_dir __maybe_unused;
482 struct dentry *dbgfs_stats __maybe_unused;
484 /* Ignore error of debugfs */
485 dbgfs_dir = debugfs_create_dir("sl3516", NULL);
486 dbgfs_stats = debugfs_create_file("stats", 0444,
488 &sl3516_ce_debugfs_fops);
489 #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
490 ce->dbgfs_dir = dbgfs_dir;
491 ce->dbgfs_stats = dbgfs_stats;
497 sl3516_ce_rng_unregister(ce);
499 sl3516_ce_unregister_algs(ce);
501 crypto_engine_exit(ce->engine);
503 sl3516_ce_pm_exit(ce);
505 sl3516_ce_free_descs(ce);
509 static int sl3516_ce_remove(struct platform_device *pdev)
511 struct sl3516_ce_dev *ce = platform_get_drvdata(pdev);
513 sl3516_ce_rng_unregister(ce);
514 sl3516_ce_unregister_algs(ce);
515 crypto_engine_exit(ce->engine);
516 sl3516_ce_pm_exit(ce);
517 sl3516_ce_free_descs(ce);
519 #ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
520 debugfs_remove_recursive(ce->dbgfs_dir);
526 static const struct of_device_id sl3516_ce_crypto_of_match_table[] = {
527 { .compatible = "cortina,sl3516-crypto"},
530 MODULE_DEVICE_TABLE(of, sl3516_ce_crypto_of_match_table);
532 static struct platform_driver sl3516_ce_driver = {
533 .probe = sl3516_ce_probe,
534 .remove = sl3516_ce_remove,
536 .name = "sl3516-crypto",
537 .pm = &sl3516_ce_pm_ops,
538 .of_match_table = sl3516_ce_crypto_of_match_table,
542 module_platform_driver(sl3516_ce_driver);
544 MODULE_DESCRIPTION("SL3516 cryptographic offloader");
545 MODULE_LICENSE("GPL");
546 MODULE_AUTHOR("Corentin Labbe <clabbe@baylibre.com>");