1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <uapi/linux/idxd.h>
11 static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
13 struct idxd_desc *desc;
14 struct idxd_device *idxd = wq->idxd;
16 desc = wq->descs[idx];
17 memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
18 memset(desc->completion, 0, idxd->data->compl_size);
21 if (device_pasid_enabled(idxd))
22 desc->hw->pasid = idxd->pasid;
25 * Descriptor completion vectors are 1...N for MSIX. We will round
26 * robin through the N vectors.
28 wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
29 if (!idxd->int_handles) {
30 desc->hw->int_handle = wq->vec_ptr;
32 desc->vector = wq->vec_ptr;
34 * int_handles are only for descriptor completion. However for device
35 * MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
36 * though we are rotating through 1...N for descriptor interrupts, we
37 * need to acqurie the int_handles from 0..N-1.
39 desc->hw->int_handle = idxd->int_handles[desc->vector - 1];
45 struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
48 struct idxd_device *idxd = wq->idxd;
49 DEFINE_SBQ_WAIT(wait);
50 struct sbq_wait_state *ws;
51 struct sbitmap_queue *sbq;
53 if (idxd->state != IDXD_DEV_ENABLED)
57 idx = sbitmap_queue_get(sbq, &cpu);
59 if (optype == IDXD_OP_NONBLOCK)
60 return ERR_PTR(-EAGAIN);
62 return __get_desc(wq, idx, cpu);
67 sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_INTERRUPTIBLE);
68 if (signal_pending_state(TASK_INTERRUPTIBLE, current))
70 idx = sbitmap_queue_get(sbq, &cpu);
76 sbitmap_finish_wait(sbq, ws, &wait);
78 return ERR_PTR(-EAGAIN);
80 return __get_desc(wq, idx, cpu);
83 void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
88 sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
91 int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
93 struct idxd_device *idxd = wq->idxd;
97 if (idxd->state != IDXD_DEV_ENABLED)
100 if (!percpu_ref_tryget_live(&wq->wq_active))
106 * The wmb() flushes writes to coherent DMA data before
107 * possibly triggering a DMA read. The wmb() is necessary
108 * even on UP because the recipient is a device.
111 if (wq_dedicated(wq)) {
112 iosubmit_cmds512(portal, desc->hw, 1);
115 * It's not likely that we would receive queue full rejection
116 * since the descriptor allocation gates at wq size. If we
117 * receive a -EAGAIN, that means something went wrong such as the
118 * device is not accepting descriptor at all.
120 rc = enqcmds(portal, desc->hw);
125 percpu_ref_put(&wq->wq_active);
128 * Pending the descriptor to the lockless list for the irq_entry
129 * that we designated the descriptor to.
131 if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
135 * If the driver is on host kernel, it would be the value
136 * assigned to interrupt handle, which is index for MSIX
137 * vector. If it's guest then can't use the int_handle since
138 * that is the index to IMS for the entire device. The guest
139 * device local index will be used.
141 vec = !idxd->int_handles ? desc->hw->int_handle : desc->vector;
142 llist_add(&desc->llnode, &idxd->irq_entries[vec].pending_llist);