1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <uapi/linux/idxd.h>
10 #include "../dmaengine.h"
12 #include "registers.h"
16 IRQ_WORK_PROCESS_FAULT,
20 struct work_struct work;
22 struct idxd_device *idxd;
25 static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
26 enum irq_work_type wtype,
27 int *processed, u64 data);
28 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
29 enum irq_work_type wtype,
30 int *processed, u64 data);
32 static void idxd_device_reinit(struct work_struct *work)
34 struct idxd_device *idxd = container_of(work, struct idxd_device, work);
35 struct device *dev = &idxd->pdev->dev;
38 idxd_device_reset(idxd);
39 rc = idxd_device_config(idxd);
43 rc = idxd_device_enable(idxd);
47 for (i = 0; i < idxd->max_wqs; i++) {
48 struct idxd_wq *wq = idxd->wqs[i];
50 if (wq->state == IDXD_WQ_ENABLED) {
51 rc = idxd_wq_enable(wq);
53 dev_warn(dev, "Unable to re-enable wq %s\n",
54 dev_name(&wq->conf_dev));
62 idxd_device_wqs_clear_state(idxd);
65 static void idxd_device_fault_work(struct work_struct *work)
67 struct idxd_fault *fault = container_of(work, struct idxd_fault, work);
68 struct idxd_irq_entry *ie;
71 int irqcnt = fault->idxd->num_wq_irqs + 1;
73 for (i = 1; i < irqcnt; i++) {
74 ie = &fault->idxd->irq_entries[i];
75 irq_process_work_list(ie, IRQ_WORK_PROCESS_FAULT,
76 &processed, fault->addr);
80 irq_process_pending_llist(ie, IRQ_WORK_PROCESS_FAULT,
81 &processed, fault->addr);
89 static int idxd_device_schedule_fault_process(struct idxd_device *idxd,
92 struct idxd_fault *fault;
94 fault = kmalloc(sizeof(*fault), GFP_ATOMIC);
98 fault->addr = fault_addr;
100 INIT_WORK(&fault->work, idxd_device_fault_work);
101 queue_work(idxd->wq, &fault->work);
105 static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
107 struct device *dev = &idxd->pdev->dev;
108 union gensts_reg gensts;
113 if (cause & IDXD_INTC_ERR) {
114 spin_lock_bh(&idxd->dev_lock);
115 for (i = 0; i < 4; i++)
116 idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
117 IDXD_SWERR_OFFSET + i * sizeof(u64));
119 iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
120 idxd->reg_base + IDXD_SWERR_OFFSET);
122 if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
123 int id = idxd->sw_err.wq_idx;
124 struct idxd_wq *wq = idxd->wqs[id];
126 if (wq->type == IDXD_WQT_USER)
127 wake_up_interruptible(&wq->err_queue);
131 for (i = 0; i < idxd->max_wqs; i++) {
132 struct idxd_wq *wq = idxd->wqs[i];
134 if (wq->type == IDXD_WQT_USER)
135 wake_up_interruptible(&wq->err_queue);
139 spin_unlock_bh(&idxd->dev_lock);
140 val |= IDXD_INTC_ERR;
142 for (i = 0; i < 4; i++)
143 dev_warn(dev, "err[%d]: %#16.16llx\n",
144 i, idxd->sw_err.bits[i]);
148 if (cause & IDXD_INTC_CMD) {
149 val |= IDXD_INTC_CMD;
150 complete(idxd->cmd_done);
153 if (cause & IDXD_INTC_OCCUPY) {
154 /* Driver does not utilize occupancy interrupt */
155 val |= IDXD_INTC_OCCUPY;
158 if (cause & IDXD_INTC_PERFMON_OVFL) {
160 * Driver does not utilize perfmon counter overflow interrupt
163 val |= IDXD_INTC_PERFMON_OVFL;
168 dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n",
175 * This case should rarely happen and typically is due to software
176 * programming error by the driver.
178 if (idxd->sw_err.valid &&
179 idxd->sw_err.desc_valid &&
180 idxd->sw_err.fault_addr)
181 idxd_device_schedule_fault_process(idxd, idxd->sw_err.fault_addr);
183 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
184 if (gensts.state == IDXD_DEVICE_STATE_HALT) {
185 idxd->state = IDXD_DEV_HALTED;
186 if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) {
188 * If we need a software reset, we will throw the work
189 * on a system workqueue in order to allow interrupts
190 * for the device command completions.
192 INIT_WORK(&idxd->work, idxd_device_reinit);
193 queue_work(idxd->wq, &idxd->work);
195 spin_lock_bh(&idxd->dev_lock);
196 idxd_wqs_quiesce(idxd);
197 idxd_wqs_unmap_portal(idxd);
198 idxd_device_wqs_clear_state(idxd);
199 dev_err(&idxd->pdev->dev,
200 "idxd halted, need %s.\n",
201 gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
202 "FLR" : "system reset");
203 spin_unlock_bh(&idxd->dev_lock);
211 irqreturn_t idxd_misc_thread(int vec, void *data)
213 struct idxd_irq_entry *irq_entry = data;
214 struct idxd_device *idxd = irq_entry->idxd;
218 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
220 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
223 rc = process_misc_interrupts(idxd, cause);
226 cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
228 iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
234 static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
237 * Completion address can be bad as well. Check fault address match for descriptor
238 * and completion address.
240 if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) {
241 struct idxd_device *idxd = desc->wq->idxd;
242 struct device *dev = &idxd->pdev->dev;
244 dev_warn(dev, "desc with fault address: %#llx\n", fault_addr);
251 static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
253 idxd_dma_complete_txd(desc, reason);
254 idxd_free_desc(desc->wq, desc);
257 static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
258 enum irq_work_type wtype,
259 int *processed, u64 data)
261 struct idxd_desc *desc, *t;
262 struct llist_node *head;
265 enum idxd_complete_type reason;
268 head = llist_del_all(&irq_entry->pending_llist);
272 if (wtype == IRQ_WORK_NORMAL)
273 reason = IDXD_COMPLETE_NORMAL;
275 reason = IDXD_COMPLETE_DEV_FAIL;
277 llist_for_each_entry_safe(desc, t, head, llnode) {
278 if (desc->completion->status) {
279 if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
280 match_fault(desc, data);
281 complete_desc(desc, reason);
284 spin_lock_irqsave(&irq_entry->list_lock, flags);
285 list_add_tail(&desc->list,
286 &irq_entry->work_list);
287 spin_unlock_irqrestore(&irq_entry->list_lock, flags);
296 static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
297 enum irq_work_type wtype,
298 int *processed, u64 data)
303 struct idxd_desc *desc, *n;
304 enum idxd_complete_type reason;
307 if (wtype == IRQ_WORK_NORMAL)
308 reason = IDXD_COMPLETE_NORMAL;
310 reason = IDXD_COMPLETE_DEV_FAIL;
313 * This lock protects list corruption from access of list outside of the irq handler
316 spin_lock_irqsave(&irq_entry->list_lock, flags);
317 if (list_empty(&irq_entry->work_list)) {
318 spin_unlock_irqrestore(&irq_entry->list_lock, flags);
322 list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
323 if (desc->completion->status) {
324 list_del(&desc->list);
326 list_add_tail(&desc->list, &flist);
332 spin_unlock_irqrestore(&irq_entry->list_lock, flags);
334 list_for_each_entry(desc, &flist, list) {
335 if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
336 match_fault(desc, data);
337 complete_desc(desc, reason);
343 static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
345 int rc, processed, total = 0;
348 * There are two lists we are processing. The pending_llist is where
349 * submmiter adds all the submitted descriptor after sending it to
350 * the workqueue. It's a lockless singly linked list. The work_list
351 * is the common linux double linked list. We are in a scenario of
352 * multiple producers and a single consumer. The producers are all
353 * the kernel submitters of descriptors, and the consumer is the
354 * kernel irq handler thread for the msix vector when using threaded
355 * irq. To work with the restrictions of llist to remain lockless,
356 * we are doing the following steps:
357 * 1. Iterate through the work_list and process any completed
358 * descriptor. Delete the completed entries during iteration.
359 * 2. llist_del_all() from the pending list.
360 * 3. Iterate through the llist that was deleted from the pending list
361 * and process the completed entries.
362 * 4. If the entry is still waiting on hardware, list_add_tail() to
364 * 5. Repeat until no more descriptors.
367 rc = irq_process_work_list(irq_entry, IRQ_WORK_NORMAL,
373 rc = irq_process_pending_llist(irq_entry, IRQ_WORK_NORMAL,
381 irqreturn_t idxd_wq_thread(int irq, void *data)
383 struct idxd_irq_entry *irq_entry = data;
386 processed = idxd_desc_process(irq_entry);