1 // SPDX-License-Identifier: GPL-2.0-only
3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
6 #include <linux/bitfield.h>
7 #include <linux/bits.h>
8 #include <linux/completion.h>
9 #include <linux/delay.h>
10 #include <linux/dma-buf.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/interrupt.h>
13 #include <linux/kref.h>
14 #include <linux/list.h>
15 #include <linux/math64.h>
17 #include <linux/moduleparam.h>
18 #include <linux/scatterlist.h>
19 #include <linux/spinlock.h>
20 #include <linux/srcu.h>
21 #include <linux/types.h>
22 #include <linux/uaccess.h>
23 #include <linux/wait.h>
24 #include <drm/drm_file.h>
25 #include <drm/drm_gem.h>
26 #include <drm/drm_prime.h>
27 #include <drm/drm_print.h>
28 #include <uapi/drm/qaic_accel.h>
32 #define SEM_VAL_MASK GENMASK_ULL(11, 0)
33 #define SEM_INDEX_MASK GENMASK_ULL(4, 0)
34 #define BULK_XFER BIT(3)
35 #define GEN_COMPLETION BIT(4)
36 #define INBOUND_XFER 1
37 #define OUTBOUND_XFER 2
38 #define REQHP_OFF 0x0 /* we read this */
39 #define REQTP_OFF 0x4 /* we write this */
40 #define RSPHP_OFF 0x8 /* we write this */
41 #define RSPTP_OFF 0xc /* we read this */
43 #define ENCODE_SEM(val, index, sync, cmd, flags) \
45 FIELD_PREP(GENMASK(11, 0), (val)) | \
46 FIELD_PREP(GENMASK(20, 16), (index)) | \
47 FIELD_PREP(BIT(22), (sync)) | \
48 FIELD_PREP(GENMASK(26, 24), (cmd)) | \
49 FIELD_PREP(GENMASK(30, 29), (flags)) | \
50 FIELD_PREP(BIT(31), (cmd) ? 1 : 0); \
52 #define NUM_EVENTS 128
55 static unsigned int wait_exec_default_timeout_ms = 5000; /* 5 sec default */
56 module_param(wait_exec_default_timeout_ms, uint, 0600);
57 MODULE_PARM_DESC(wait_exec_default_timeout_ms, "Default timeout for DRM_IOCTL_QAIC_WAIT_BO");
59 static unsigned int datapath_poll_interval_us = 100; /* 100 usec default */
60 module_param(datapath_poll_interval_us, uint, 0600);
61 MODULE_PARM_DESC(datapath_poll_interval_us,
62 "Amount of time to sleep between activity when datapath polling is enabled");
66 * A request ID is assigned to each memory handle going in DMA queue.
67 * As a single memory handle can enqueue multiple elements in DMA queue
68 * all of them will have the same request ID.
74 * Special encoded variable
75 * 7 0 - Do not force to generate MSI after DMA is completed
76 * 1 - Force to generate MSI after DMA is completed
78 * 4 1 - Generate completion element in the response queue
79 * 0 - No Completion Code
80 * 3 0 - DMA request is a Link list transfer
81 * 1 - DMA request is a Bulk transfer
83 * 1:0 00 - No DMA transfer involved
84 * 01 - DMA transfer is part of inbound transfer
85 * 10 - DMA transfer has outbound transfer
90 /* Source address for the transfer */
92 /* Destination address for the transfer */
94 /* Length of transfer request */
97 /* Doorbell address */
100 * Special encoded variable
101 * 7 1 - Doorbell(db) write
102 * 0 - No doorbell write
104 * 1:0 00 - 32 bit access, db address must be aligned to 32bit-boundary
105 * 01 - 16 bit access, db address must be aligned to 16bit-boundary
106 * 10 - 8 bit access, db address must be aligned to 8bit-boundary
112 /* 32 bit data written to doorbell address */
115 * Special encoded variable
116 * All the fields of sem_cmdX are passed from user and all are ORed
117 * together to form sem_cmd.
118 * 0:11 Semaphore value
120 * 20:16 Semaphore index
124 * 26:24 Semaphore command
126 * 29 Semaphore DMA out bound sync fence
127 * 30 Semaphore DMA in bound sync fence
128 * 31 Enable semaphore command
137 /* Request ID of the memory handle whose DMA transaction is completed */
139 /* Status of the DMA transaction. 0 : Success otherwise failure */
143 inline int get_dbc_req_elem_size(void)
145 return sizeof(struct dbc_req);
148 inline int get_dbc_rsp_elem_size(void)
150 return sizeof(struct dbc_rsp);
153 static void free_slice(struct kref *kref)
155 struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count);
157 list_del(&slice->slice);
158 drm_gem_object_put(&slice->bo->base);
159 sg_free_table(slice->sgt);
165 static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
166 struct sg_table *sgt_in, u64 size, u64 offset)
168 int total_len, len, nents, offf = 0, offl = 0;
169 struct scatterlist *sg, *sgn, *sgf, *sgl;
170 struct sg_table *sgt;
173 /* find out number of relevant nents needed for this mem */
179 size = size ? size : PAGE_SIZE;
180 for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) {
181 len = sg_dma_len(sg);
185 if (offset >= total_len && offset < total_len + len) {
187 offf = offset - total_len;
191 if (offset + size >= total_len &&
192 offset + size <= total_len + len) {
194 offl = offset + size - total_len;
205 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
211 ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
215 /* copy relevant sg node and fix page and length */
217 for_each_sgtable_sg(sgt, sg, j) {
218 memcpy(sg, sgn, sizeof(*sg));
220 sg_dma_address(sg) += offf;
221 sg_dma_len(sg) -= offf;
222 sg_set_page(sg, sg_page(sgn), sg_dma_len(sg), offf);
227 sg_dma_len(sg) = offl - offf;
228 sg_set_page(sg, sg_page(sgn), offl - offf, offf);
245 static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
246 struct qaic_attach_slice_entry *req)
248 __le64 db_addr = cpu_to_le64(req->db_addr);
249 __le32 db_data = cpu_to_le32(req->db_data);
250 struct scatterlist *sg;
251 __u8 cmd = BULK_XFER;
258 cmd |= (slice->dir == DMA_TO_DEVICE ? INBOUND_XFER : OUTBOUND_XFER);
260 if (req->db_len && !IS_ALIGNED(req->db_addr, req->db_len / 8))
263 presync_sem = req->sem0.presync + req->sem1.presync + req->sem2.presync + req->sem3.presync;
267 presync_sem = req->sem0.presync << 0 | req->sem1.presync << 1 |
268 req->sem2.presync << 2 | req->sem3.presync << 3;
270 switch (req->db_len) {
281 db_len = 0; /* doorbell is not active for this command */
284 return -EINVAL; /* should never hit this */
288 * When we end up splitting up a single request (ie a buf slice) into
289 * multiple DMA requests, we have to manage the sync data carefully.
290 * There can only be one presync sem. That needs to be on every xfer
291 * so that the DMA engine doesn't transfer data before the receiver is
292 * ready. We only do the doorbell and postsync sems after the xfer.
293 * To guarantee previous xfers for the request are complete, we use a
296 dev_addr = req->dev_addr;
297 for_each_sgtable_sg(slice->sgt, sg, i) {
298 slice->reqs[i].cmd = cmd;
299 slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
300 sg_dma_address(sg) : dev_addr);
301 slice->reqs[i].dest_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
302 dev_addr : sg_dma_address(sg));
304 * sg_dma_len(sg) returns size of a DMA segment, maximum DMA
305 * segment size is set to UINT_MAX by qaic and hence return
306 * values of sg_dma_len(sg) can never exceed u32 range. So,
307 * by down sizing we are not corrupting the value.
309 slice->reqs[i].len = cpu_to_le32((u32)sg_dma_len(sg));
310 switch (presync_sem) {
312 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val,
319 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val,
326 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val,
333 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val,
340 dev_addr += sg_dma_len(sg);
342 /* add post transfer stuff to last segment */
344 slice->reqs[i].cmd |= GEN_COMPLETION;
345 slice->reqs[i].db_addr = db_addr;
346 slice->reqs[i].db_len = db_len;
347 slice->reqs[i].db_data = db_data;
349 * Add a fence if we have more than one request going to the hardware
350 * representing the entirety of the user request, and the user request
351 * has no presync condition.
352 * Fences are expensive, so we try to avoid them. We rely on the
353 * hardware behavior to avoid needing one when there is a presync
354 * condition. When a presync exists, all requests for that same
355 * presync will be queued into a fifo. Thus, since we queue the
356 * post xfer activity only on the last request we queue, the hardware
357 * will ensure that the last queued request is processed last, thus
358 * making sure the post xfer activity happens at the right time without
361 if (i && !presync_sem)
362 req->sem0.flags |= (slice->dir == DMA_TO_DEVICE ?
363 QAIC_SEM_INSYNCFENCE : QAIC_SEM_OUTSYNCFENCE);
364 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, req->sem0.index,
365 req->sem0.presync, req->sem0.cmd,
367 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, req->sem1.index,
368 req->sem1.presync, req->sem1.cmd,
370 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, req->sem2.index,
371 req->sem2.presync, req->sem2.cmd,
373 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, req->sem3.index,
374 req->sem3.presync, req->sem3.cmd,
380 static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo,
381 struct qaic_attach_slice_entry *slice_ent)
383 struct sg_table *sgt = NULL;
384 struct bo_slice *slice;
387 ret = clone_range_of_sgt_for_slice(qdev, &sgt, bo->sgt, slice_ent->size, slice_ent->offset);
391 slice = kmalloc(sizeof(*slice), GFP_KERNEL);
397 slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL);
403 slice->no_xfer = !slice_ent->size;
405 slice->nents = sgt->nents;
406 slice->dir = bo->dir;
408 slice->size = slice_ent->size;
409 slice->offset = slice_ent->offset;
411 ret = encode_reqs(qdev, slice, slice_ent);
415 bo->total_slice_nents += sgt->nents;
416 kref_init(&slice->ref_count);
417 drm_gem_object_get(&bo->base);
418 list_add_tail(&slice->slice, &bo->slices);
433 static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 size)
435 struct scatterlist *sg;
436 struct sg_table *sgt;
447 nr_pages = DIV_ROUND_UP(size, PAGE_SIZE);
449 * calculate how much extra we are going to allocate, to remove
452 buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE;
453 max_order = min(MAX_ORDER - 1, get_order(size));
455 /* allocate a single page for book keeping */
461 pages = kvmalloc_array(nr_pages, sizeof(*pages) + sizeof(*pages_order), GFP_KERNEL);
466 pages_order = (void *)pages + sizeof(*pages) * nr_pages;
469 * Allocate requested memory using alloc_pages. It is possible to allocate
470 * the requested memory in multiple chunks by calling alloc_pages
471 * multiple times. Use SG table to handle multiple allocated pages.
474 while (nr_pages > 0) {
475 order = min(get_order(nr_pages * PAGE_SIZE), max_order);
477 pages[i] = alloc_pages(GFP_KERNEL | GFP_HIGHUSER |
478 __GFP_NOWARN | __GFP_ZERO |
479 (order ? __GFP_NORETRY : __GFP_RETRY_MAYFAIL),
485 goto free_partial_alloc;
490 pages_order[i] = order;
492 nr_pages -= 1 << order;
494 /* account for over allocation */
495 buf_extra += abs(nr_pages) * PAGE_SIZE;
499 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
502 goto free_partial_alloc;
505 if (sg_alloc_table(sgt, i, GFP_KERNEL)) {
510 /* Populate the SG table with the allocated memory pages */
512 for (k = 0; k < i; k++, sg = sg_next(sg)) {
513 /* Last entry requires special handling */
515 sg_set_page(sg, pages[k], PAGE_SIZE << pages_order[k], 0);
517 sg_set_page(sg, pages[k], (PAGE_SIZE << pages_order[k]) - buf_extra, 0);
529 for (j = 0; j < i; j++)
530 __free_pages(pages[j], pages_order[j]);
537 static bool invalid_sem(struct qaic_sem *sem)
539 if (sem->val & ~SEM_VAL_MASK || sem->index & ~SEM_INDEX_MASK ||
540 !(sem->presync == 0 || sem->presync == 1) || sem->pad ||
541 sem->flags & ~(QAIC_SEM_INSYNCFENCE | QAIC_SEM_OUTSYNCFENCE) ||
542 sem->cmd > QAIC_SEM_WAIT_GT_0)
547 static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
548 u32 count, u64 total_size)
552 for (i = 0; i < count; i++) {
553 if (!(slice_ent[i].db_len == 32 || slice_ent[i].db_len == 16 ||
554 slice_ent[i].db_len == 8 || slice_ent[i].db_len == 0) ||
555 invalid_sem(&slice_ent[i].sem0) || invalid_sem(&slice_ent[i].sem1) ||
556 invalid_sem(&slice_ent[i].sem2) || invalid_sem(&slice_ent[i].sem3))
559 if (slice_ent[i].offset + slice_ent[i].size > total_size)
566 static void qaic_free_sgt(struct sg_table *sgt)
568 struct scatterlist *sg;
570 for (sg = sgt->sgl; sg; sg = sg_next(sg))
572 __free_pages(sg_page(sg), get_order(sg->length));
577 static void qaic_gem_print_info(struct drm_printer *p, unsigned int indent,
578 const struct drm_gem_object *obj)
580 struct qaic_bo *bo = to_qaic_bo(obj);
582 drm_printf_indent(p, indent, "user requested size=%llu\n", bo->size);
585 static const struct vm_operations_struct drm_vm_ops = {
586 .open = drm_gem_vm_open,
587 .close = drm_gem_vm_close,
590 static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
592 struct qaic_bo *bo = to_qaic_bo(obj);
593 unsigned long offset = 0;
594 struct scatterlist *sg;
597 if (obj->import_attach)
600 for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) {
602 ret = remap_pfn_range(vma, vma->vm_start + offset, page_to_pfn(sg_page(sg)),
603 sg->length, vma->vm_page_prot);
606 offset += sg->length;
614 static void qaic_free_object(struct drm_gem_object *obj)
616 struct qaic_bo *bo = to_qaic_bo(obj);
618 if (obj->import_attach) {
619 /* DMABUF/PRIME Path */
620 drm_prime_gem_destroy(obj, NULL);
622 /* Private buffer allocation path */
623 qaic_free_sgt(bo->sgt);
626 drm_gem_object_release(obj);
630 static const struct drm_gem_object_funcs qaic_gem_funcs = {
631 .free = qaic_free_object,
632 .print_info = qaic_gem_print_info,
633 .mmap = qaic_gem_object_mmap,
634 .vm_ops = &drm_vm_ops,
637 static struct qaic_bo *qaic_alloc_init_bo(void)
641 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
643 return ERR_PTR(-ENOMEM);
645 INIT_LIST_HEAD(&bo->slices);
646 init_completion(&bo->xfer_done);
647 complete_all(&bo->xfer_done);
652 int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
654 struct qaic_create_bo *args = data;
655 int usr_rcu_id, qdev_rcu_id;
656 struct drm_gem_object *obj;
657 struct qaic_device *qdev;
658 struct qaic_user *usr;
666 size = PAGE_ALIGN(args->size);
670 usr = file_priv->driver_priv;
671 usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
674 goto unlock_usr_srcu;
677 qdev = usr->qddev->qdev;
678 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
679 if (qdev->in_reset) {
681 goto unlock_dev_srcu;
684 bo = qaic_alloc_init_bo();
687 goto unlock_dev_srcu;
691 drm_gem_private_object_init(dev, obj, size);
693 obj->funcs = &qaic_gem_funcs;
694 ret = create_sgt(qdev, &bo->sgt, size);
698 bo->size = args->size;
700 ret = drm_gem_handle_create(file_priv, obj, &args->handle);
704 bo->handle = args->handle;
705 drm_gem_object_put(obj);
706 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
707 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
712 qaic_free_sgt(bo->sgt);
716 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
718 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
722 int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
724 struct qaic_mmap_bo *args = data;
725 int usr_rcu_id, qdev_rcu_id;
726 struct drm_gem_object *obj;
727 struct qaic_device *qdev;
728 struct qaic_user *usr;
731 usr = file_priv->driver_priv;
732 usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
735 goto unlock_usr_srcu;
738 qdev = usr->qddev->qdev;
739 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
740 if (qdev->in_reset) {
742 goto unlock_dev_srcu;
745 obj = drm_gem_object_lookup(file_priv, args->handle);
748 goto unlock_dev_srcu;
751 ret = drm_gem_create_mmap_offset(obj);
753 args->offset = drm_vma_node_offset_addr(&obj->vma_node);
755 drm_gem_object_put(obj);
758 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
760 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
764 struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
766 struct dma_buf_attachment *attach;
767 struct drm_gem_object *obj;
771 bo = qaic_alloc_init_bo();
778 get_dma_buf(dma_buf);
780 attach = dma_buf_attach(dma_buf, dev->dev);
781 if (IS_ERR(attach)) {
782 ret = PTR_ERR(attach);
786 if (!attach->dmabuf->size) {
788 goto size_align_fail;
791 drm_gem_private_object_init(dev, obj, attach->dmabuf->size);
793 * skipping dma_buf_map_attachment() as we do not know the direction
794 * just yet. Once the direction is known in the subsequent IOCTL to
795 * attach slicing, we can do it then.
798 obj->funcs = &qaic_gem_funcs;
799 obj->import_attach = attach;
800 obj->resv = dma_buf->resv;
805 dma_buf_detach(dma_buf, attach);
807 dma_buf_put(dma_buf);
813 static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_hdr *hdr)
815 struct drm_gem_object *obj = &bo->base;
816 struct sg_table *sgt;
819 if (obj->import_attach->dmabuf->size < hdr->size)
822 sgt = dma_buf_map_attachment(obj->import_attach, hdr->dir);
829 bo->size = hdr->size;
834 static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo,
835 struct qaic_attach_slice_hdr *hdr)
839 if (bo->size != hdr->size)
842 ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0);
849 static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
850 struct qaic_attach_slice_hdr *hdr)
854 if (bo->base.import_attach)
855 ret = qaic_prepare_import_bo(bo, hdr);
857 ret = qaic_prepare_export_bo(qdev, bo, hdr);
865 static void qaic_unprepare_import_bo(struct qaic_bo *bo)
867 dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, bo->dir);
872 static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo)
874 dma_unmap_sgtable(&qdev->pdev->dev, bo->sgt, bo->dir, 0);
877 static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
879 if (bo->base.import_attach)
880 qaic_unprepare_import_bo(bo);
882 qaic_unprepare_export_bo(qdev, bo);
887 static void qaic_free_slices_bo(struct qaic_bo *bo)
889 struct bo_slice *slice, *temp;
891 list_for_each_entry_safe(slice, temp, &bo->slices, slice)
892 kref_put(&slice->ref_count, free_slice);
895 static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo,
896 struct qaic_attach_slice_hdr *hdr,
897 struct qaic_attach_slice_entry *slice_ent)
901 for (i = 0; i < hdr->count; i++) {
902 ret = qaic_map_one_slice(qdev, bo, &slice_ent[i]);
904 qaic_free_slices_bo(bo);
909 if (bo->total_slice_nents > qdev->dbc[hdr->dbc_id].nelem) {
910 qaic_free_slices_bo(bo);
915 bo->nr_slice = hdr->count;
916 list_add_tail(&bo->bo_list, &qdev->dbc[hdr->dbc_id].bo_lists);
921 int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
923 struct qaic_attach_slice_entry *slice_ent;
924 struct qaic_attach_slice *args = data;
925 int rcu_id, usr_rcu_id, qdev_rcu_id;
926 struct dma_bridge_chan *dbc;
927 struct drm_gem_object *obj;
928 struct qaic_device *qdev;
929 unsigned long arg_size;
930 struct qaic_user *usr;
931 u8 __user *user_data;
935 if (args->hdr.count == 0)
938 arg_size = args->hdr.count * sizeof(*slice_ent);
939 if (arg_size / args->hdr.count != sizeof(*slice_ent))
942 if (args->hdr.size == 0)
945 if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE))
951 usr = file_priv->driver_priv;
952 usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
955 goto unlock_usr_srcu;
958 qdev = usr->qddev->qdev;
959 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
960 if (qdev->in_reset) {
962 goto unlock_dev_srcu;
965 if (args->hdr.dbc_id >= qdev->num_dbc) {
967 goto unlock_dev_srcu;
970 user_data = u64_to_user_ptr(args->data);
972 slice_ent = kzalloc(arg_size, GFP_KERNEL);
975 goto unlock_dev_srcu;
978 ret = copy_from_user(slice_ent, user_data, arg_size);
984 ret = qaic_validate_req(qdev, slice_ent, args->hdr.count, args->hdr.size);
988 obj = drm_gem_object_lookup(file_priv, args->hdr.handle);
994 bo = to_qaic_bo(obj);
1001 dbc = &qdev->dbc[args->hdr.dbc_id];
1002 rcu_id = srcu_read_lock(&dbc->ch_lock);
1003 if (dbc->usr != usr) {
1005 goto unlock_ch_srcu;
1008 ret = qaic_prepare_bo(qdev, bo, &args->hdr);
1010 goto unlock_ch_srcu;
1012 ret = qaic_attach_slicing_bo(qdev, bo, &args->hdr, slice_ent);
1016 if (args->hdr.dir == DMA_TO_DEVICE)
1017 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir);
1020 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1021 drm_gem_object_put(obj);
1023 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1024 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1029 qaic_unprepare_bo(qdev, bo);
1031 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1033 drm_gem_object_put(obj);
1037 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1039 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1043 static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id,
1044 u32 head, u32 *ptail)
1046 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
1047 struct dbc_req *reqs = slice->reqs;
1051 avail = head - tail;
1053 avail += dbc->nelem;
1057 if (avail < slice->nents)
1060 if (tail + slice->nents > dbc->nelem) {
1061 avail = dbc->nelem - tail;
1062 avail = min_t(u32, avail, slice->nents);
1063 memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
1064 sizeof(*reqs) * avail);
1066 avail = slice->nents - avail;
1068 memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail);
1070 memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
1071 sizeof(*reqs) * slice->nents);
1074 *ptail = (tail + slice->nents) % dbc->nelem;
1080 * Based on the value of resize we may only need to transmit first_n
1081 * entries and the last entry, with last_bytes to send from the last entry.
1082 * Note that first_n could be 0.
1084 static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice,
1085 u64 resize, u32 dbc_id, u32 head, u32 *ptail)
1087 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
1088 struct dbc_req *reqs = slice->reqs;
1089 struct dbc_req *last_req;
1098 avail = head - tail;
1100 avail += dbc->nelem;
1105 for (i = 0; i < slice->nents; i++) {
1106 total_bytes += le32_to_cpu(reqs[i].len);
1107 if (total_bytes >= resize)
1111 if (total_bytes < resize) {
1112 /* User space should have used the full buffer path. */
1118 last_bytes = i ? resize + le32_to_cpu(reqs[i].len) - total_bytes : resize;
1120 if (avail < (first_n + 1))
1124 if (tail + first_n > dbc->nelem) {
1125 avail = dbc->nelem - tail;
1126 avail = min_t(u32, avail, first_n);
1127 memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
1128 sizeof(*reqs) * avail);
1129 last_req = reqs + avail;
1130 avail = first_n - avail;
1132 memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail);
1134 memcpy(dbc->req_q_base + tail * get_dbc_req_elem_size(), reqs,
1135 sizeof(*reqs) * first_n);
1139 /* Copy over the last entry. Here we need to adjust len to the left over
1140 * size, and set src and dst to the entry it is copied to.
1142 last_req = dbc->req_q_base + (tail + first_n) % dbc->nelem * get_dbc_req_elem_size();
1143 memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs));
1146 * last_bytes holds size of a DMA segment, maximum DMA segment size is
1147 * set to UINT_MAX by qaic and hence last_bytes can never exceed u32
1148 * range. So, by down sizing we are not corrupting the value.
1150 last_req->len = cpu_to_le32((u32)last_bytes);
1151 last_req->src_addr = reqs[first_n].src_addr;
1152 last_req->dest_addr = reqs[first_n].dest_addr;
1154 *ptail = (tail + first_n + 1) % dbc->nelem;
1159 static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *file_priv,
1160 struct qaic_execute_entry *exec, unsigned int count,
1161 bool is_partial, struct dma_bridge_chan *dbc, u32 head,
1164 struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec;
1165 struct drm_gem_object *obj;
1166 struct bo_slice *slice;
1167 unsigned long flags;
1173 for (i = 0; i < count; i++) {
1175 * ref count will be decremented when the transfer of this
1176 * buffer is complete. It is inside dbc_irq_threaded_fn().
1178 obj = drm_gem_object_lookup(file_priv,
1179 is_partial ? pexec[i].handle : exec[i].handle);
1182 goto failed_to_send_bo;
1185 bo = to_qaic_bo(obj);
1189 goto failed_to_send_bo;
1192 if (is_partial && pexec[i].resize > bo->size) {
1194 goto failed_to_send_bo;
1197 spin_lock_irqsave(&dbc->xfer_lock, flags);
1198 queued = bo->queued;
1201 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1203 goto failed_to_send_bo;
1206 bo->req_id = dbc->next_req_id++;
1208 list_for_each_entry(slice, &bo->slices, slice) {
1210 * If this slice does not fall under the given
1211 * resize then skip this slice and continue the loop
1213 if (is_partial && pexec[i].resize && pexec[i].resize <= slice->offset)
1216 for (j = 0; j < slice->nents; j++)
1217 slice->reqs[j].req_id = cpu_to_le16(bo->req_id);
1220 * If it is a partial execute ioctl call then check if
1221 * resize has cut this slice short then do a partial copy
1222 * else do complete copy
1224 if (is_partial && pexec[i].resize &&
1225 pexec[i].resize < slice->offset + slice->size)
1226 ret = copy_partial_exec_reqs(qdev, slice,
1227 pexec[i].resize - slice->offset,
1228 dbc->id, head, tail);
1230 ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
1233 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1234 goto failed_to_send_bo;
1237 reinit_completion(&bo->xfer_done);
1238 list_add_tail(&bo->xfer_list, &dbc->xfer_list);
1239 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1240 dma_sync_sgtable_for_device(&qdev->pdev->dev, bo->sgt, bo->dir);
1247 drm_gem_object_put(obj);
1248 for (j = 0; j < i; j++) {
1249 spin_lock_irqsave(&dbc->xfer_lock, flags);
1250 bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list);
1253 list_del(&bo->xfer_list);
1254 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1255 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
1256 drm_gem_object_put(obj);
1261 static void update_profiling_data(struct drm_file *file_priv,
1262 struct qaic_execute_entry *exec, unsigned int count,
1263 bool is_partial, u64 received_ts, u64 submit_ts, u32 queue_level)
1265 struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec;
1266 struct drm_gem_object *obj;
1270 for (i = 0; i < count; i++) {
1272 * Since we already committed the BO to hardware, the only way
1273 * this should fail is a pending signal. We can't cancel the
1274 * submit to hardware, so we have to just skip the profiling
1275 * data. In case the signal is not fatal to the process, we
1276 * return success so that the user doesn't try to resubmit.
1278 obj = drm_gem_object_lookup(file_priv,
1279 is_partial ? pexec[i].handle : exec[i].handle);
1282 bo = to_qaic_bo(obj);
1283 bo->perf_stats.req_received_ts = received_ts;
1284 bo->perf_stats.req_submit_ts = submit_ts;
1285 bo->perf_stats.queue_level_before = queue_level;
1286 queue_level += bo->total_slice_nents;
1287 drm_gem_object_put(obj);
1291 static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv,
1294 struct qaic_execute *args = data;
1295 struct qaic_execute_entry *exec;
1296 struct dma_bridge_chan *dbc;
1297 int usr_rcu_id, qdev_rcu_id;
1298 struct qaic_device *qdev;
1299 struct qaic_user *usr;
1300 u8 __user *user_data;
1311 received_ts = ktime_get_ns();
1313 size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec);
1314 n = (unsigned long)size * args->hdr.count;
1315 if (args->hdr.count == 0 || n / args->hdr.count != size)
1318 user_data = u64_to_user_ptr(args->data);
1320 exec = kcalloc(args->hdr.count, size, GFP_KERNEL);
1324 if (copy_from_user(exec, user_data, n)) {
1329 usr = file_priv->driver_priv;
1330 usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1333 goto unlock_usr_srcu;
1336 qdev = usr->qddev->qdev;
1337 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1338 if (qdev->in_reset) {
1340 goto unlock_dev_srcu;
1343 if (args->hdr.dbc_id >= qdev->num_dbc) {
1345 goto unlock_dev_srcu;
1348 dbc = &qdev->dbc[args->hdr.dbc_id];
1350 rcu_id = srcu_read_lock(&dbc->ch_lock);
1351 if (!dbc->usr || dbc->usr->handle != usr->handle) {
1353 goto release_ch_rcu;
1356 head = readl(dbc->dbc_base + REQHP_OFF);
1357 tail = readl(dbc->dbc_base + REQTP_OFF);
1359 if (head == U32_MAX || tail == U32_MAX) {
1360 /* PCI link error */
1362 goto release_ch_rcu;
1365 queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail);
1367 ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc,
1370 goto release_ch_rcu;
1372 /* Finalize commit to hardware */
1373 submit_ts = ktime_get_ns();
1374 writel(tail, dbc->dbc_base + REQTP_OFF);
1376 update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts,
1377 submit_ts, queue_level);
1379 if (datapath_polling)
1380 schedule_work(&dbc->poll_work);
1383 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1385 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1387 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1393 int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1395 return __qaic_execute_bo_ioctl(dev, data, file_priv, false);
1398 int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1400 return __qaic_execute_bo_ioctl(dev, data, file_priv, true);
1404 * Our interrupt handling is a bit more complicated than a simple ideal, but
1407 * Each dbc has a completion queue. Entries in the queue correspond to DMA
1408 * requests which the device has processed. The hardware already has a built
1409 * in irq mitigation. When the device puts an entry into the queue, it will
1410 * only trigger an interrupt if the queue was empty. Therefore, when adding
1411 * the Nth event to a non-empty queue, the hardware doesn't trigger an
1412 * interrupt. This means the host doesn't get additional interrupts signaling
1413 * the same thing - the queue has something to process.
1414 * This behavior can be overridden in the DMA request.
1415 * This means that when the host receives an interrupt, it is required to
1418 * This behavior is what NAPI attempts to accomplish, although we can't use
1419 * NAPI as we don't have a netdev. We use threaded irqs instead.
1421 * However, there is a situation where the host drains the queue fast enough
1422 * that every event causes an interrupt. Typically this is not a problem as
1423 * the rate of events would be low. However, that is not the case with
1424 * lprnet for example. On an Intel Xeon D-2191 where we run 8 instances of
1425 * lprnet, the host receives roughly 80k interrupts per second from the device
1426 * (per /proc/interrupts). While NAPI documentation indicates the host should
1427 * just chug along, sadly that behavior causes instability in some hosts.
1429 * Therefore, we implement an interrupt disable scheme similar to NAPI. The
1430 * key difference is that we will delay after draining the queue for a small
1431 * time to allow additional events to come in via polling. Using the above
1432 * lprnet workload, this reduces the number of interrupts processed from
1433 * ~80k/sec to about 64 in 5 minutes and appears to solve the system
1436 irqreturn_t dbc_irq_handler(int irq, void *data)
1438 struct dma_bridge_chan *dbc = data;
1443 rcu_id = srcu_read_lock(&dbc->ch_lock);
1446 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1450 head = readl(dbc->dbc_base + RSPHP_OFF);
1451 if (head == U32_MAX) { /* PCI link error */
1452 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1456 tail = readl(dbc->dbc_base + RSPTP_OFF);
1457 if (tail == U32_MAX) { /* PCI link error */
1458 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1462 if (head == tail) { /* queue empty */
1463 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1467 disable_irq_nosync(irq);
1468 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1469 return IRQ_WAKE_THREAD;
1472 void irq_polling_work(struct work_struct *work)
1474 struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan, poll_work);
1475 unsigned long flags;
1480 rcu_id = srcu_read_lock(&dbc->ch_lock);
1483 if (dbc->qdev->in_reset) {
1484 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1488 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1491 spin_lock_irqsave(&dbc->xfer_lock, flags);
1492 if (list_empty(&dbc->xfer_list)) {
1493 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1494 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1497 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1499 head = readl(dbc->dbc_base + RSPHP_OFF);
1500 if (head == U32_MAX) { /* PCI link error */
1501 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1505 tail = readl(dbc->dbc_base + RSPTP_OFF);
1506 if (tail == U32_MAX) { /* PCI link error */
1507 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1512 irq_wake_thread(dbc->irq, dbc);
1513 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1518 usleep_range(datapath_poll_interval_us, 2 * datapath_poll_interval_us);
1522 irqreturn_t dbc_irq_threaded_fn(int irq, void *data)
1524 struct dma_bridge_chan *dbc = data;
1525 int event_count = NUM_EVENTS;
1526 int delay_count = NUM_DELAYS;
1527 struct qaic_device *qdev;
1528 struct qaic_bo *bo, *i;
1529 struct dbc_rsp *rsp;
1530 unsigned long flags;
1537 rcu_id = srcu_read_lock(&dbc->ch_lock);
1539 head = readl(dbc->dbc_base + RSPHP_OFF);
1540 if (head == U32_MAX) /* PCI link error */
1547 event_count = NUM_EVENTS;
1552 * if this channel isn't assigned or gets unassigned during processing
1553 * we have nothing further to do
1558 tail = readl(dbc->dbc_base + RSPTP_OFF);
1559 if (tail == U32_MAX) /* PCI link error */
1562 if (head == tail) { /* queue empty */
1565 usleep_range(100, 200);
1566 goto read_fifo; /* check for a new event */
1571 delay_count = NUM_DELAYS;
1572 while (head != tail) {
1576 rsp = dbc->rsp_q_base + head * sizeof(*rsp);
1577 req_id = le16_to_cpu(rsp->req_id);
1578 status = le16_to_cpu(rsp->status);
1580 pci_dbg(qdev->pdev, "req_id %d failed with status %d\n", req_id, status);
1581 spin_lock_irqsave(&dbc->xfer_lock, flags);
1583 * A BO can receive multiple interrupts, since a BO can be
1584 * divided into multiple slices and a buffer receives as many
1585 * interrupts as slices. So until it receives interrupts for
1586 * all the slices we cannot mark that buffer complete.
1588 list_for_each_entry_safe(bo, i, &dbc->xfer_list, xfer_list) {
1589 if (bo->req_id == req_id)
1590 bo->nr_slice_xfer_done++;
1594 if (bo->nr_slice_xfer_done < bo->nr_slice)
1598 * At this point we have received all the interrupts for
1599 * BO, which means BO execution is complete.
1601 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
1602 bo->nr_slice_xfer_done = 0;
1604 list_del(&bo->xfer_list);
1605 bo->perf_stats.req_processed_ts = ktime_get_ns();
1606 complete_all(&bo->xfer_done);
1607 drm_gem_object_put(&bo->base);
1610 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1611 head = (head + 1) % dbc->nelem;
1615 * Update the head pointer of response queue and let the device know
1616 * that we have consumed elements from the queue.
1618 writel(head, dbc->dbc_base + RSPHP_OFF);
1620 /* elements might have been put in the queue while we were processing */
1624 if (likely(!datapath_polling))
1627 schedule_work(&dbc->poll_work);
1628 /* checking the fifo and enabling irqs is a race, missed event check */
1629 tail = readl(dbc->dbc_base + RSPTP_OFF);
1630 if (tail != U32_MAX && head != tail) {
1631 if (likely(!datapath_polling))
1632 disable_irq_nosync(irq);
1635 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1639 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1640 if (likely(!datapath_polling))
1643 schedule_work(&dbc->poll_work);
1648 int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1650 struct qaic_wait *args = data;
1651 int usr_rcu_id, qdev_rcu_id;
1652 struct dma_bridge_chan *dbc;
1653 struct drm_gem_object *obj;
1654 struct qaic_device *qdev;
1655 unsigned long timeout;
1656 struct qaic_user *usr;
1664 usr = file_priv->driver_priv;
1665 usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1668 goto unlock_usr_srcu;
1671 qdev = usr->qddev->qdev;
1672 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1673 if (qdev->in_reset) {
1675 goto unlock_dev_srcu;
1678 if (args->dbc_id >= qdev->num_dbc) {
1680 goto unlock_dev_srcu;
1683 dbc = &qdev->dbc[args->dbc_id];
1685 rcu_id = srcu_read_lock(&dbc->ch_lock);
1686 if (dbc->usr != usr) {
1688 goto unlock_ch_srcu;
1691 obj = drm_gem_object_lookup(file_priv, args->handle);
1694 goto unlock_ch_srcu;
1697 bo = to_qaic_bo(obj);
1698 timeout = args->timeout ? args->timeout : wait_exec_default_timeout_ms;
1699 timeout = msecs_to_jiffies(timeout);
1700 ret = wait_for_completion_interruptible_timeout(&bo->xfer_done, timeout);
1712 drm_gem_object_put(obj);
1714 srcu_read_unlock(&dbc->ch_lock, rcu_id);
1716 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1718 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1722 int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1724 struct qaic_perf_stats_entry *ent = NULL;
1725 struct qaic_perf_stats *args = data;
1726 int usr_rcu_id, qdev_rcu_id;
1727 struct drm_gem_object *obj;
1728 struct qaic_device *qdev;
1729 struct qaic_user *usr;
1733 usr = file_priv->driver_priv;
1734 usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1737 goto unlock_usr_srcu;
1740 qdev = usr->qddev->qdev;
1741 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1742 if (qdev->in_reset) {
1744 goto unlock_dev_srcu;
1747 if (args->hdr.dbc_id >= qdev->num_dbc) {
1749 goto unlock_dev_srcu;
1752 ent = kcalloc(args->hdr.count, sizeof(*ent), GFP_KERNEL);
1755 goto unlock_dev_srcu;
1758 ret = copy_from_user(ent, u64_to_user_ptr(args->data), args->hdr.count * sizeof(*ent));
1764 for (i = 0; i < args->hdr.count; i++) {
1765 obj = drm_gem_object_lookup(file_priv, ent[i].handle);
1770 bo = to_qaic_bo(obj);
1772 * perf stats ioctl is called before wait ioctl is complete then
1773 * the latency information is invalid.
1775 if (bo->perf_stats.req_processed_ts < bo->perf_stats.req_submit_ts) {
1776 ent[i].device_latency_us = 0;
1778 ent[i].device_latency_us = div_u64((bo->perf_stats.req_processed_ts -
1779 bo->perf_stats.req_submit_ts), 1000);
1781 ent[i].submit_latency_us = div_u64((bo->perf_stats.req_submit_ts -
1782 bo->perf_stats.req_received_ts), 1000);
1783 ent[i].queue_level_before = bo->perf_stats.queue_level_before;
1784 ent[i].num_queue_element = bo->total_slice_nents;
1785 drm_gem_object_put(obj);
1788 if (copy_to_user(u64_to_user_ptr(args->data), ent, args->hdr.count * sizeof(*ent)))
1794 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1796 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1800 static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc)
1802 unsigned long flags;
1805 spin_lock_irqsave(&dbc->xfer_lock, flags);
1806 while (!list_empty(&dbc->xfer_list)) {
1807 bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list);
1809 list_del(&bo->xfer_list);
1810 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1811 dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
1812 complete_all(&bo->xfer_done);
1813 drm_gem_object_put(&bo->base);
1814 spin_lock_irqsave(&dbc->xfer_lock, flags);
1816 spin_unlock_irqrestore(&dbc->xfer_lock, flags);
1819 int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
1821 if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle)
1824 qdev->dbc[dbc_id].usr = NULL;
1825 synchronize_srcu(&qdev->dbc[dbc_id].ch_lock);
1830 * enable_dbc - Enable the DBC. DBCs are disabled by removing the context of
1831 * user. Add user context back to DBC to enable it. This function trusts the
1832 * DBC ID passed and expects the DBC to be disabled.
1833 * @qdev: Qranium device handle
1834 * @dbc_id: ID of the DBC
1835 * @usr: User context
1837 void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
1839 qdev->dbc[dbc_id].usr = usr;
1842 void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id)
1844 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
1847 empty_xfer_list(qdev, dbc);
1848 synchronize_srcu(&dbc->ch_lock);
1850 * Threads holding channel lock, may add more elements in the xfer_list.
1851 * Flush out these elements from xfer_list.
1853 empty_xfer_list(qdev, dbc);
1856 void release_dbc(struct qaic_device *qdev, u32 dbc_id)
1858 struct bo_slice *slice, *slice_temp;
1859 struct qaic_bo *bo, *bo_temp;
1860 struct dma_bridge_chan *dbc;
1862 dbc = &qdev->dbc[dbc_id];
1866 wakeup_dbc(qdev, dbc_id);
1868 dma_free_coherent(&qdev->pdev->dev, dbc->total_size, dbc->req_q_base, dbc->dma_addr);
1869 dbc->total_size = 0;
1870 dbc->req_q_base = NULL;
1875 list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) {
1876 list_for_each_entry_safe(slice, slice_temp, &bo->slices, slice)
1877 kref_put(&slice->ref_count, free_slice);
1879 INIT_LIST_HEAD(&bo->slices);
1880 bo->total_slice_nents = 0;
1884 bo->nr_slice_xfer_done = 0;
1887 init_completion(&bo->xfer_done);
1888 complete_all(&bo->xfer_done);
1889 list_del(&bo->bo_list);
1890 bo->perf_stats.req_received_ts = 0;
1891 bo->perf_stats.req_submit_ts = 0;
1892 bo->perf_stats.req_processed_ts = 0;
1893 bo->perf_stats.queue_level_before = 0;
1896 dbc->in_use = false;
1897 wake_up(&dbc->dbc_release);