1 // SPDX-License-Identifier: GPL-2.0
3 * CAAM/SEC 4.x QI transport/backend driver
4 * Queue Interface backend functionality
6 * Copyright 2013-2016 Freescale Semiconductor, Inc.
7 * Copyright 2016-2017, 2019-2020 NXP
10 #include <linux/cpumask.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/netdevice.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <soc/fsl/qman.h>
25 #include "desc_constr.h"
27 #define PREHDR_RSLS_SHIFT 31
28 #define PREHDR_ABS BIT(25)
31 * Use a reasonable backlog of frames (per CPU) as congestion threshold,
32 * so that resources used by the in-flight buffers do not become a memory hog.
34 #define MAX_RSP_FQ_BACKLOG_PER_CPU 256
36 #define CAAM_QI_ENQUEUE_RETRIES 10000
38 #define CAAM_NAPI_WEIGHT 63
41 * caam_napi - struct holding CAAM NAPI-related params
42 * @irqtask: IRQ task for QI backend
46 struct napi_struct irqtask;
47 struct qman_portal *p;
51 * caam_qi_pcpu_priv - percpu private data structure to main list of pending
52 * responses expected on each cpu.
53 * @caam_napi: CAAM NAPI params
54 * @net_dev: netdev used by NAPI
55 * @rsp_fq: response FQ from CAAM
57 struct caam_qi_pcpu_priv {
58 struct caam_napi caam_napi;
59 struct net_device net_dev;
60 struct qman_fq *rsp_fq;
61 } ____cacheline_aligned;
63 static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
64 static DEFINE_PER_CPU(int, last_cpu);
67 * caam_qi_priv - CAAM QI backend private params
68 * @cgr: QMan congestion group
74 static struct caam_qi_priv qipriv ____cacheline_aligned;
77 * This is written by only one core - the one that initialized the CGR - and
78 * read by multiple cores (all the others).
80 bool caam_congested __read_mostly;
81 EXPORT_SYMBOL(caam_congested);
84 * This is a cache of buffers, from which the users of CAAM QI driver
85 * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
86 * doing malloc on the hotpath.
87 * NOTE: A more elegant solution would be to have some headroom in the frames
88 * being processed. This could be added by the dpaa-ethernet driver.
89 * This would pose a problem for userspace application processing which
90 * cannot know of this limitation. So for now, this will work.
91 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
93 static struct kmem_cache *qi_cache;
95 static void *caam_iova_to_virt(struct iommu_domain *domain,
98 phys_addr_t phys_addr;
100 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
102 return phys_to_virt(phys_addr);
105 int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
113 qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
115 addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
117 if (dma_mapping_error(qidev, addr)) {
118 dev_err(qidev, "DMA mapping error for QI enqueue request\n");
121 qm_fd_addr_set64(&fd, addr);
124 ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
126 refcount_inc(&req->drv_ctx->refcnt);
133 } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
135 dev_err(qidev, "qman_enqueue failed: %d\n", ret);
139 EXPORT_SYMBOL(caam_qi_enqueue);
141 static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
142 const union qm_mr_entry *msg)
144 const struct qm_fd *fd;
145 struct caam_drv_req *drv_req;
146 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
147 struct caam_drv_private *priv = dev_get_drvdata(qidev);
151 drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
154 "Can't find original request for CAAM response\n");
158 refcount_dec(&drv_req->drv_ctx->refcnt);
160 if (qm_fd_get_format(fd) != qm_fd_compound) {
161 dev_err(qidev, "Non-compound FD from CAAM\n");
165 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
166 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
169 drv_req->cbk(drv_req, be32_to_cpu(fd->status));
171 drv_req->cbk(drv_req, JRSTA_SSRC_QI);
174 static struct qman_fq *create_caam_req_fq(struct device *qidev,
175 struct qman_fq *rsp_fq,
180 struct qman_fq *req_fq;
181 struct qm_mcc_initfq opts;
183 req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
185 return ERR_PTR(-ENOMEM);
187 req_fq->cb.ern = caam_fq_ern_cb;
188 req_fq->cb.fqs = NULL;
190 ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
191 QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
193 dev_err(qidev, "Failed to create session req FQ\n");
194 goto create_req_fq_fail;
197 memset(&opts, 0, sizeof(opts));
198 opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
199 QM_INITFQ_WE_CONTEXTB |
200 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
201 opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
202 qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
203 opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
204 qm_fqd_context_a_set64(&opts.fqd, hwdesc);
205 opts.fqd.cgid = qipriv.cgr.cgrid;
207 ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
209 dev_err(qidev, "Failed to init session req FQ\n");
210 goto init_req_fq_fail;
213 dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
218 qman_destroy_fq(req_fq);
224 static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
228 ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
229 QMAN_VOLATILE_FLAG_FINISH,
230 QM_VDQCR_PRECEDENCE_VDQCR |
231 QM_VDQCR_NUMFRAMES_TILLEMPTY);
233 dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
238 struct qman_portal *p;
240 p = qman_get_affine_portal(smp_processor_id());
241 qman_p_poll_dqrr(p, 16);
242 } while (fq->flags & QMAN_FQ_STATE_NE);
247 static int kill_fq(struct device *qidev, struct qman_fq *fq)
252 ret = qman_retire_fq(fq, &flags);
254 dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
261 /* Async FQ retirement condition */
263 /* Retry till FQ gets in retired state */
266 } while (fq->state != qman_fq_state_retired);
268 WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
269 WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
273 if (fq->flags & QMAN_FQ_STATE_NE) {
274 ret = empty_retired_fq(qidev, fq);
276 dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
282 ret = qman_oos_fq(fq);
284 dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
292 static int empty_caam_fq(struct qman_fq *fq, struct caam_drv_ctx *drv_ctx)
296 struct qm_mcr_queryfq_np np;
298 /* Wait till the older CAAM FQ get empty */
300 ret = qman_query_fq_np(fq, &np);
304 if (!qm_mcr_np_get(&np, frm_cnt))
310 /* Wait until pending jobs from this FQ are processed by CAAM */
312 if (refcount_read(&drv_ctx->refcnt) == 1)
319 dev_warn_once(drv_ctx->qidev, "%d frames from FQID %u still pending in CAAM\n",
320 refcount_read(&drv_ctx->refcnt), fq->fqid);
325 int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
329 struct qman_fq *new_fq, *old_fq;
330 struct device *qidev = drv_ctx->qidev;
332 num_words = desc_len(sh_desc);
333 if (num_words > MAX_SDLEN) {
334 dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
338 /* Note down older req FQ */
339 old_fq = drv_ctx->req_fq;
341 /* Create a new req FQ in parked state */
342 new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
343 drv_ctx->context_a, 0);
344 if (IS_ERR(new_fq)) {
345 dev_err(qidev, "FQ allocation for shdesc update failed\n");
346 return PTR_ERR(new_fq);
349 /* Hook up new FQ to context so that new requests keep queuing */
350 drv_ctx->req_fq = new_fq;
352 /* Empty and remove the older FQ */
353 ret = empty_caam_fq(old_fq, drv_ctx);
355 dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
357 /* We can revert to older FQ */
358 drv_ctx->req_fq = old_fq;
360 if (kill_fq(qidev, new_fq))
361 dev_warn(qidev, "New CAAM FQ kill failed\n");
367 * Re-initialise pre-header. Set RSLS and SDLEN.
368 * Update the shared descriptor for driver context.
370 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
372 drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
373 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
374 dma_sync_single_for_device(qidev, drv_ctx->context_a,
375 sizeof(drv_ctx->sh_desc) +
376 sizeof(drv_ctx->prehdr),
379 /* Put the new FQ in scheduled state */
380 ret = qman_schedule_fq(new_fq);
382 dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
385 * We can kill new FQ and revert to old FQ.
386 * Since the desc is already modified, it is success case
389 drv_ctx->req_fq = old_fq;
391 if (kill_fq(qidev, new_fq))
392 dev_warn(qidev, "New CAAM FQ kill failed\n");
393 } else if (kill_fq(qidev, old_fq)) {
394 dev_warn(qidev, "Old CAAM FQ kill failed\n");
399 EXPORT_SYMBOL(caam_drv_ctx_update);
401 struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
408 struct caam_drv_ctx *drv_ctx;
409 const cpumask_t *cpus = qman_affine_cpus();
411 num_words = desc_len(sh_desc);
412 if (num_words > MAX_SDLEN) {
413 dev_err(qidev, "Invalid descriptor len: %d words\n",
415 return ERR_PTR(-EINVAL);
418 drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
420 return ERR_PTR(-ENOMEM);
423 * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
426 drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
428 drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
429 memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
430 size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
431 hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
433 if (dma_mapping_error(qidev, hwdesc)) {
434 dev_err(qidev, "DMA map error for preheader + shdesc\n");
436 return ERR_PTR(-ENOMEM);
438 drv_ctx->context_a = hwdesc;
440 /* If given CPU does not own the portal, choose another one that does */
441 if (!cpumask_test_cpu(*cpu, cpus)) {
442 int *pcpu = &get_cpu_var(last_cpu);
444 *pcpu = cpumask_next(*pcpu, cpus);
445 if (*pcpu >= nr_cpu_ids)
446 *pcpu = cpumask_first(cpus);
449 put_cpu_var(last_cpu);
453 /* Find response FQ hooked with this CPU */
454 drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
456 /* Attach request FQ */
457 drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
458 QMAN_INITFQ_FLAG_SCHED);
459 if (IS_ERR(drv_ctx->req_fq)) {
460 dev_err(qidev, "create_caam_req_fq failed\n");
461 dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
463 return ERR_PTR(-ENOMEM);
466 /* init reference counter used to track references to request FQ */
467 refcount_set(&drv_ctx->refcnt, 1);
469 drv_ctx->qidev = qidev;
472 EXPORT_SYMBOL(caam_drv_ctx_init);
474 void *qi_cache_alloc(gfp_t flags)
476 return kmem_cache_alloc(qi_cache, flags);
478 EXPORT_SYMBOL(qi_cache_alloc);
480 void qi_cache_free(void *obj)
482 kmem_cache_free(qi_cache, obj);
484 EXPORT_SYMBOL(qi_cache_free);
486 static int caam_qi_poll(struct napi_struct *napi, int budget)
488 struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
490 int cleaned = qman_p_poll_dqrr(np->p, budget);
492 if (cleaned < budget) {
494 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
500 void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
502 if (IS_ERR_OR_NULL(drv_ctx))
505 /* Remove request FQ */
506 if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
507 dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
509 dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
510 sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
514 EXPORT_SYMBOL(caam_drv_ctx_rel);
516 static void caam_qi_shutdown(void *data)
519 struct device *qidev = data;
520 struct caam_qi_priv *priv = &qipriv;
521 const cpumask_t *cpus = qman_affine_cpus();
523 for_each_cpu(i, cpus) {
524 struct napi_struct *irqtask;
526 irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
527 napi_disable(irqtask);
528 netif_napi_del(irqtask);
530 if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
531 dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
534 qman_delete_cgr_safe(&priv->cgr);
535 qman_release_cgrid(priv->cgr.cgrid);
537 kmem_cache_destroy(qi_cache);
540 static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
542 caam_congested = congested;
545 caam_debugfs_qi_congested();
547 pr_debug_ratelimited("CAAM entered congestion\n");
550 pr_debug_ratelimited("CAAM exited congestion\n");
554 static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np,
558 /* Disable QMan IRQ source and invoke NAPI */
559 qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
561 napi_schedule(&np->irqtask);
567 static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
568 struct qman_fq *rsp_fq,
569 const struct qm_dqrr_entry *dqrr,
572 struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
573 struct caam_drv_req *drv_req;
574 const struct qm_fd *fd;
575 struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
576 struct caam_drv_private *priv = dev_get_drvdata(qidev);
579 if (caam_qi_napi_schedule(p, caam_napi, sched_napi))
580 return qman_cb_dqrr_stop;
584 drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
585 if (unlikely(!drv_req)) {
587 "Can't find original request for caam response\n");
588 return qman_cb_dqrr_consume;
591 refcount_dec(&drv_req->drv_ctx->refcnt);
593 status = be32_to_cpu(fd->status);
594 if (unlikely(status)) {
595 u32 ssrc = status & JRSTA_SSRC_MASK;
596 u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
598 if (ssrc != JRSTA_SSRC_CCB_ERROR ||
599 err_id != JRSTA_CCBERR_ERRID_ICVCHK)
600 dev_err_ratelimited(qidev,
601 "Error: %#x in CAAM response FD\n",
605 if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
606 dev_err(qidev, "Non-compound FD from CAAM\n");
607 return qman_cb_dqrr_consume;
610 dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
611 sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
613 drv_req->cbk(drv_req, status);
614 return qman_cb_dqrr_consume;
617 static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
619 struct qm_mcc_initfq opts;
623 fq = kzalloc(sizeof(*fq), GFP_KERNEL);
627 fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
629 ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
630 QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
632 dev_err(qidev, "Rsp FQ create failed\n");
637 memset(&opts, 0, sizeof(opts));
638 opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
639 QM_INITFQ_WE_CONTEXTB |
640 QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
641 opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
642 QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
643 qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
644 opts.fqd.cgid = qipriv.cgr.cgrid;
645 opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
646 QM_STASHING_EXCL_DATA;
647 qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
649 ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
651 dev_err(qidev, "Rsp FQ init failed\n");
656 per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
658 dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
662 static int init_cgr(struct device *qidev)
665 struct qm_mcc_initcgr opts;
666 const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
667 MAX_RSP_FQ_BACKLOG_PER_CPU;
669 ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
671 dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
675 qipriv.cgr.cb = cgr_cb;
676 memset(&opts, 0, sizeof(opts));
677 opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
679 opts.cgr.cscn_en = QM_CGR_EN;
680 opts.cgr.mode = QMAN_CGR_MODE_FRAME;
681 qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
683 ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
685 dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
690 dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
694 static int alloc_rsp_fqs(struct device *qidev)
697 const cpumask_t *cpus = qman_affine_cpus();
699 /*Now create response FQs*/
700 for_each_cpu(i, cpus) {
701 ret = alloc_rsp_fq_cpu(qidev, i);
703 dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
711 static void free_rsp_fqs(void)
714 const cpumask_t *cpus = qman_affine_cpus();
716 for_each_cpu(i, cpus)
717 kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
720 int caam_qi_init(struct platform_device *caam_pdev)
723 struct device *ctrldev = &caam_pdev->dev, *qidev;
724 struct caam_drv_private *ctrlpriv;
725 const cpumask_t *cpus = qman_affine_cpus();
727 ctrlpriv = dev_get_drvdata(ctrldev);
730 /* Initialize the congestion detection */
731 err = init_cgr(qidev);
733 dev_err(qidev, "CGR initialization failed: %d\n", err);
737 /* Initialise response FQs */
738 err = alloc_rsp_fqs(qidev);
740 dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
746 * Enable the NAPI contexts on each of the core which has an affine
749 for_each_cpu(i, cpus) {
750 struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
751 struct caam_napi *caam_napi = &priv->caam_napi;
752 struct napi_struct *irqtask = &caam_napi->irqtask;
753 struct net_device *net_dev = &priv->net_dev;
755 net_dev->dev = *qidev;
756 INIT_LIST_HEAD(&net_dev->napi_list);
758 netif_napi_add_tx_weight(net_dev, irqtask, caam_qi_poll,
761 napi_enable(irqtask);
764 qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE,
765 dma_get_cache_alignment(), 0, NULL);
767 dev_err(qidev, "Can't allocate CAAM cache\n");
772 caam_debugfs_qi_init(ctrlpriv);
774 err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
778 dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");