dmaengine: idxd: remove interrupt disable for cmd_lock
[platform/kernel/linux-rpi.git] / drivers / dma / idxd / device.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <linux/irq.h>
10 #include <linux/msi.h>
11 #include <uapi/linux/idxd.h>
12 #include "../dmaengine.h"
13 #include "idxd.h"
14 #include "registers.h"
15
16 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
17                           u32 *status);
18 static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
19 static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
20
21 /* Interrupt control bits */
22 void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
23 {
24         struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
25
26         pci_msi_mask_irq(data);
27 }
28
29 void idxd_mask_msix_vectors(struct idxd_device *idxd)
30 {
31         struct pci_dev *pdev = idxd->pdev;
32         int msixcnt = pci_msix_vec_count(pdev);
33         int i;
34
35         for (i = 0; i < msixcnt; i++)
36                 idxd_mask_msix_vector(idxd, i);
37 }
38
39 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
40 {
41         struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
42
43         pci_msi_unmask_irq(data);
44 }
45
46 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
47 {
48         union genctrl_reg genctrl;
49
50         genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
51         genctrl.softerr_int_en = 1;
52         genctrl.halt_int_en = 1;
53         iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
54 }
55
56 void idxd_mask_error_interrupts(struct idxd_device *idxd)
57 {
58         union genctrl_reg genctrl;
59
60         genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
61         genctrl.softerr_int_en = 0;
62         genctrl.halt_int_en = 0;
63         iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
64 }
65
66 static void free_hw_descs(struct idxd_wq *wq)
67 {
68         int i;
69
70         for (i = 0; i < wq->num_descs; i++)
71                 kfree(wq->hw_descs[i]);
72
73         kfree(wq->hw_descs);
74 }
75
76 static int alloc_hw_descs(struct idxd_wq *wq, int num)
77 {
78         struct device *dev = &wq->idxd->pdev->dev;
79         int i;
80         int node = dev_to_node(dev);
81
82         wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
83                                     GFP_KERNEL, node);
84         if (!wq->hw_descs)
85                 return -ENOMEM;
86
87         for (i = 0; i < num; i++) {
88                 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
89                                                GFP_KERNEL, node);
90                 if (!wq->hw_descs[i]) {
91                         free_hw_descs(wq);
92                         return -ENOMEM;
93                 }
94         }
95
96         return 0;
97 }
98
99 static void free_descs(struct idxd_wq *wq)
100 {
101         int i;
102
103         for (i = 0; i < wq->num_descs; i++)
104                 kfree(wq->descs[i]);
105
106         kfree(wq->descs);
107 }
108
109 static int alloc_descs(struct idxd_wq *wq, int num)
110 {
111         struct device *dev = &wq->idxd->pdev->dev;
112         int i;
113         int node = dev_to_node(dev);
114
115         wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
116                                  GFP_KERNEL, node);
117         if (!wq->descs)
118                 return -ENOMEM;
119
120         for (i = 0; i < num; i++) {
121                 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
122                                             GFP_KERNEL, node);
123                 if (!wq->descs[i]) {
124                         free_descs(wq);
125                         return -ENOMEM;
126                 }
127         }
128
129         return 0;
130 }
131
132 /* WQ control bits */
133 int idxd_wq_alloc_resources(struct idxd_wq *wq)
134 {
135         struct idxd_device *idxd = wq->idxd;
136         struct device *dev = &idxd->pdev->dev;
137         int rc, num_descs, i;
138         int align;
139         u64 tmp;
140
141         if (wq->type != IDXD_WQT_KERNEL)
142                 return 0;
143
144         num_descs = wq_dedicated(wq) ? wq->size : wq->threshold;
145         wq->num_descs = num_descs;
146
147         rc = alloc_hw_descs(wq, num_descs);
148         if (rc < 0)
149                 return rc;
150
151         align = idxd->data->align;
152         wq->compls_size = num_descs * idxd->data->compl_size + align;
153         wq->compls_raw = dma_alloc_coherent(dev, wq->compls_size,
154                                             &wq->compls_addr_raw, GFP_KERNEL);
155         if (!wq->compls_raw) {
156                 rc = -ENOMEM;
157                 goto fail_alloc_compls;
158         }
159
160         /* Adjust alignment */
161         wq->compls_addr = (wq->compls_addr_raw + (align - 1)) & ~(align - 1);
162         tmp = (u64)wq->compls_raw;
163         tmp = (tmp + (align - 1)) & ~(align - 1);
164         wq->compls = (struct dsa_completion_record *)tmp;
165
166         rc = alloc_descs(wq, num_descs);
167         if (rc < 0)
168                 goto fail_alloc_descs;
169
170         rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
171                                      dev_to_node(dev));
172         if (rc < 0)
173                 goto fail_sbitmap_init;
174
175         for (i = 0; i < num_descs; i++) {
176                 struct idxd_desc *desc = wq->descs[i];
177
178                 desc->hw = wq->hw_descs[i];
179                 if (idxd->data->type == IDXD_TYPE_DSA)
180                         desc->completion = &wq->compls[i];
181                 else if (idxd->data->type == IDXD_TYPE_IAX)
182                         desc->iax_completion = &wq->iax_compls[i];
183                 desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i;
184                 desc->id = i;
185                 desc->wq = wq;
186                 desc->cpu = -1;
187         }
188
189         return 0;
190
191  fail_sbitmap_init:
192         free_descs(wq);
193  fail_alloc_descs:
194         dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
195                           wq->compls_addr_raw);
196  fail_alloc_compls:
197         free_hw_descs(wq);
198         return rc;
199 }
200
201 void idxd_wq_free_resources(struct idxd_wq *wq)
202 {
203         struct device *dev = &wq->idxd->pdev->dev;
204
205         if (wq->type != IDXD_WQT_KERNEL)
206                 return;
207
208         free_hw_descs(wq);
209         free_descs(wq);
210         dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
211                           wq->compls_addr_raw);
212         sbitmap_queue_free(&wq->sbq);
213 }
214
215 int idxd_wq_enable(struct idxd_wq *wq)
216 {
217         struct idxd_device *idxd = wq->idxd;
218         struct device *dev = &idxd->pdev->dev;
219         u32 status;
220
221         if (wq->state == IDXD_WQ_ENABLED) {
222                 dev_dbg(dev, "WQ %d already enabled\n", wq->id);
223                 return -ENXIO;
224         }
225
226         idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
227
228         if (status != IDXD_CMDSTS_SUCCESS &&
229             status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
230                 dev_dbg(dev, "WQ enable failed: %#x\n", status);
231                 return -ENXIO;
232         }
233
234         wq->state = IDXD_WQ_ENABLED;
235         dev_dbg(dev, "WQ %d enabled\n", wq->id);
236         return 0;
237 }
238
239 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
240 {
241         struct idxd_device *idxd = wq->idxd;
242         struct device *dev = &idxd->pdev->dev;
243         u32 status, operand;
244
245         dev_dbg(dev, "Disabling WQ %d\n", wq->id);
246
247         if (wq->state != IDXD_WQ_ENABLED) {
248                 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
249                 return 0;
250         }
251
252         operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
253         idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
254
255         if (status != IDXD_CMDSTS_SUCCESS) {
256                 dev_dbg(dev, "WQ disable failed: %#x\n", status);
257                 return -ENXIO;
258         }
259
260         if (reset_config)
261                 idxd_wq_disable_cleanup(wq);
262         wq->state = IDXD_WQ_DISABLED;
263         dev_dbg(dev, "WQ %d disabled\n", wq->id);
264         return 0;
265 }
266
267 void idxd_wq_drain(struct idxd_wq *wq)
268 {
269         struct idxd_device *idxd = wq->idxd;
270         struct device *dev = &idxd->pdev->dev;
271         u32 operand;
272
273         if (wq->state != IDXD_WQ_ENABLED) {
274                 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
275                 return;
276         }
277
278         dev_dbg(dev, "Draining WQ %d\n", wq->id);
279         operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
280         idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
281 }
282
283 void idxd_wq_reset(struct idxd_wq *wq)
284 {
285         struct idxd_device *idxd = wq->idxd;
286         struct device *dev = &idxd->pdev->dev;
287         u32 operand;
288
289         if (wq->state != IDXD_WQ_ENABLED) {
290                 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
291                 return;
292         }
293
294         operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
295         idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
296         idxd_wq_disable_cleanup(wq);
297         wq->state = IDXD_WQ_DISABLED;
298 }
299
300 int idxd_wq_map_portal(struct idxd_wq *wq)
301 {
302         struct idxd_device *idxd = wq->idxd;
303         struct pci_dev *pdev = idxd->pdev;
304         struct device *dev = &pdev->dev;
305         resource_size_t start;
306
307         start = pci_resource_start(pdev, IDXD_WQ_BAR);
308         start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
309
310         wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
311         if (!wq->portal)
312                 return -ENOMEM;
313
314         return 0;
315 }
316
317 void idxd_wq_unmap_portal(struct idxd_wq *wq)
318 {
319         struct device *dev = &wq->idxd->pdev->dev;
320
321         devm_iounmap(dev, wq->portal);
322         wq->portal = NULL;
323         wq->portal_offset = 0;
324 }
325
326 void idxd_wqs_unmap_portal(struct idxd_device *idxd)
327 {
328         int i;
329
330         for (i = 0; i < idxd->max_wqs; i++) {
331                 struct idxd_wq *wq = idxd->wqs[i];
332
333                 if (wq->portal)
334                         idxd_wq_unmap_portal(wq);
335         }
336 }
337
338 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
339 {
340         struct idxd_device *idxd = wq->idxd;
341         int rc;
342         union wqcfg wqcfg;
343         unsigned int offset;
344         unsigned long flags;
345
346         rc = idxd_wq_disable(wq, false);
347         if (rc < 0)
348                 return rc;
349
350         offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
351         spin_lock_irqsave(&idxd->dev_lock, flags);
352         wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
353         wqcfg.pasid_en = 1;
354         wqcfg.pasid = pasid;
355         iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
356         spin_unlock_irqrestore(&idxd->dev_lock, flags);
357
358         rc = idxd_wq_enable(wq);
359         if (rc < 0)
360                 return rc;
361
362         return 0;
363 }
364
365 int idxd_wq_disable_pasid(struct idxd_wq *wq)
366 {
367         struct idxd_device *idxd = wq->idxd;
368         int rc;
369         union wqcfg wqcfg;
370         unsigned int offset;
371         unsigned long flags;
372
373         rc = idxd_wq_disable(wq, false);
374         if (rc < 0)
375                 return rc;
376
377         offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
378         spin_lock_irqsave(&idxd->dev_lock, flags);
379         wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
380         wqcfg.pasid_en = 0;
381         wqcfg.pasid = 0;
382         iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
383         spin_unlock_irqrestore(&idxd->dev_lock, flags);
384
385         rc = idxd_wq_enable(wq);
386         if (rc < 0)
387                 return rc;
388
389         return 0;
390 }
391
392 static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
393 {
394         struct idxd_device *idxd = wq->idxd;
395
396         lockdep_assert_held(&wq->wq_lock);
397         memset(wq->wqcfg, 0, idxd->wqcfg_size);
398         wq->type = IDXD_WQT_NONE;
399         wq->size = 0;
400         wq->group = NULL;
401         wq->threshold = 0;
402         wq->priority = 0;
403         wq->ats_dis = 0;
404         clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
405         clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
406         memset(wq->name, 0, WQ_NAME_SIZE);
407 }
408
409 static void idxd_wq_ref_release(struct percpu_ref *ref)
410 {
411         struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active);
412
413         complete(&wq->wq_dead);
414 }
415
416 int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
417 {
418         int rc;
419
420         memset(&wq->wq_active, 0, sizeof(wq->wq_active));
421         rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 0, GFP_KERNEL);
422         if (rc < 0)
423                 return rc;
424         reinit_completion(&wq->wq_dead);
425         return 0;
426 }
427
428 void idxd_wq_quiesce(struct idxd_wq *wq)
429 {
430         percpu_ref_kill(&wq->wq_active);
431         wait_for_completion(&wq->wq_dead);
432         percpu_ref_exit(&wq->wq_active);
433 }
434
435 /* Device control bits */
436 static inline bool idxd_is_enabled(struct idxd_device *idxd)
437 {
438         union gensts_reg gensts;
439
440         gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
441
442         if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
443                 return true;
444         return false;
445 }
446
447 static inline bool idxd_device_is_halted(struct idxd_device *idxd)
448 {
449         union gensts_reg gensts;
450
451         gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
452
453         return (gensts.state == IDXD_DEVICE_STATE_HALT);
454 }
455
456 /*
457  * This is function is only used for reset during probe and will
458  * poll for completion. Once the device is setup with interrupts,
459  * all commands will be done via interrupt completion.
460  */
461 int idxd_device_init_reset(struct idxd_device *idxd)
462 {
463         struct device *dev = &idxd->pdev->dev;
464         union idxd_command_reg cmd;
465
466         if (idxd_device_is_halted(idxd)) {
467                 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
468                 return -ENXIO;
469         }
470
471         memset(&cmd, 0, sizeof(cmd));
472         cmd.cmd = IDXD_CMD_RESET_DEVICE;
473         dev_dbg(dev, "%s: sending reset for init.\n", __func__);
474         spin_lock(&idxd->cmd_lock);
475         iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
476
477         while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
478                IDXD_CMDSTS_ACTIVE)
479                 cpu_relax();
480         spin_unlock(&idxd->cmd_lock);
481         return 0;
482 }
483
484 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
485                           u32 *status)
486 {
487         union idxd_command_reg cmd;
488         DECLARE_COMPLETION_ONSTACK(done);
489         u32 stat;
490
491         if (idxd_device_is_halted(idxd)) {
492                 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
493                 if (status)
494                         *status = IDXD_CMDSTS_HW_ERR;
495                 return;
496         }
497
498         memset(&cmd, 0, sizeof(cmd));
499         cmd.cmd = cmd_code;
500         cmd.operand = operand;
501         cmd.int_req = 1;
502
503         spin_lock(&idxd->cmd_lock);
504         wait_event_lock_irq(idxd->cmd_waitq,
505                             !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
506                             idxd->cmd_lock);
507
508         dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
509                 __func__, cmd_code, operand);
510
511         idxd->cmd_status = 0;
512         __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
513         idxd->cmd_done = &done;
514         iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
515
516         /*
517          * After command submitted, release lock and go to sleep until
518          * the command completes via interrupt.
519          */
520         spin_unlock(&idxd->cmd_lock);
521         wait_for_completion(&done);
522         stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
523         spin_lock(&idxd->cmd_lock);
524         if (status)
525                 *status = stat;
526         idxd->cmd_status = stat & GENMASK(7, 0);
527
528         __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
529         /* Wake up other pending commands */
530         wake_up(&idxd->cmd_waitq);
531         spin_unlock(&idxd->cmd_lock);
532 }
533
534 int idxd_device_enable(struct idxd_device *idxd)
535 {
536         struct device *dev = &idxd->pdev->dev;
537         u32 status;
538
539         if (idxd_is_enabled(idxd)) {
540                 dev_dbg(dev, "Device already enabled\n");
541                 return -ENXIO;
542         }
543
544         idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
545
546         /* If the command is successful or if the device was enabled */
547         if (status != IDXD_CMDSTS_SUCCESS &&
548             status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
549                 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
550                 return -ENXIO;
551         }
552
553         idxd->state = IDXD_DEV_ENABLED;
554         return 0;
555 }
556
557 int idxd_device_disable(struct idxd_device *idxd)
558 {
559         struct device *dev = &idxd->pdev->dev;
560         u32 status;
561         unsigned long flags;
562
563         if (!idxd_is_enabled(idxd)) {
564                 dev_dbg(dev, "Device is not enabled\n");
565                 return 0;
566         }
567
568         idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
569
570         /* If the command is successful or if the device was disabled */
571         if (status != IDXD_CMDSTS_SUCCESS &&
572             !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
573                 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
574                 return -ENXIO;
575         }
576
577         spin_lock_irqsave(&idxd->dev_lock, flags);
578         idxd_device_clear_state(idxd);
579         idxd->state = IDXD_DEV_DISABLED;
580         spin_unlock_irqrestore(&idxd->dev_lock, flags);
581         return 0;
582 }
583
584 void idxd_device_reset(struct idxd_device *idxd)
585 {
586         unsigned long flags;
587
588         idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
589         spin_lock_irqsave(&idxd->dev_lock, flags);
590         idxd_device_clear_state(idxd);
591         idxd->state = IDXD_DEV_DISABLED;
592         spin_unlock_irqrestore(&idxd->dev_lock, flags);
593 }
594
595 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
596 {
597         struct device *dev = &idxd->pdev->dev;
598         u32 operand;
599
600         operand = pasid;
601         dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
602         idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
603         dev_dbg(dev, "pasid %d drained\n", pasid);
604 }
605
606 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
607                                    enum idxd_interrupt_type irq_type)
608 {
609         struct device *dev = &idxd->pdev->dev;
610         u32 operand, status;
611
612         if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)))
613                 return -EOPNOTSUPP;
614
615         dev_dbg(dev, "get int handle, idx %d\n", idx);
616
617         operand = idx & GENMASK(15, 0);
618         if (irq_type == IDXD_IRQ_IMS)
619                 operand |= CMD_INT_HANDLE_IMS;
620
621         dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand);
622
623         idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status);
624
625         if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
626                 dev_dbg(dev, "request int handle failed: %#x\n", status);
627                 return -ENXIO;
628         }
629
630         *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0);
631
632         dev_dbg(dev, "int handle acquired: %u\n", *handle);
633         return 0;
634 }
635
636 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
637                                    enum idxd_interrupt_type irq_type)
638 {
639         struct device *dev = &idxd->pdev->dev;
640         u32 operand, status;
641         union idxd_command_reg cmd;
642
643         if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
644                 return -EOPNOTSUPP;
645
646         dev_dbg(dev, "release int handle, handle %d\n", handle);
647
648         memset(&cmd, 0, sizeof(cmd));
649         operand = handle & GENMASK(15, 0);
650
651         if (irq_type == IDXD_IRQ_IMS)
652                 operand |= CMD_INT_HANDLE_IMS;
653
654         cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE;
655         cmd.operand = operand;
656
657         dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
658
659         spin_lock(&idxd->cmd_lock);
660         iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
661
662         while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
663                 cpu_relax();
664         status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
665         spin_unlock(&idxd->cmd_lock);
666
667         if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
668                 dev_dbg(dev, "release int handle failed: %#x\n", status);
669                 return -ENXIO;
670         }
671
672         dev_dbg(dev, "int handle released.\n");
673         return 0;
674 }
675
676 /* Device configuration bits */
677 static void idxd_engines_clear_state(struct idxd_device *idxd)
678 {
679         struct idxd_engine *engine;
680         int i;
681
682         lockdep_assert_held(&idxd->dev_lock);
683         for (i = 0; i < idxd->max_engines; i++) {
684                 engine = idxd->engines[i];
685                 engine->group = NULL;
686         }
687 }
688
689 static void idxd_groups_clear_state(struct idxd_device *idxd)
690 {
691         struct idxd_group *group;
692         int i;
693
694         lockdep_assert_held(&idxd->dev_lock);
695         for (i = 0; i < idxd->max_groups; i++) {
696                 group = idxd->groups[i];
697                 memset(&group->grpcfg, 0, sizeof(group->grpcfg));
698                 group->num_engines = 0;
699                 group->num_wqs = 0;
700                 group->use_token_limit = false;
701                 group->tokens_allowed = 0;
702                 group->tokens_reserved = 0;
703                 group->tc_a = -1;
704                 group->tc_b = -1;
705         }
706 }
707
708 static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
709 {
710         int i;
711
712         lockdep_assert_held(&idxd->dev_lock);
713         for (i = 0; i < idxd->max_wqs; i++) {
714                 struct idxd_wq *wq = idxd->wqs[i];
715
716                 if (wq->state == IDXD_WQ_ENABLED) {
717                         idxd_wq_disable_cleanup(wq);
718                         wq->state = IDXD_WQ_DISABLED;
719                 }
720         }
721 }
722
723 void idxd_device_clear_state(struct idxd_device *idxd)
724 {
725         idxd_groups_clear_state(idxd);
726         idxd_engines_clear_state(idxd);
727         idxd_device_wqs_clear_state(idxd);
728 }
729
730 void idxd_msix_perm_setup(struct idxd_device *idxd)
731 {
732         union msix_perm mperm;
733         int i, msixcnt;
734
735         msixcnt = pci_msix_vec_count(idxd->pdev);
736         if (msixcnt < 0)
737                 return;
738
739         mperm.bits = 0;
740         mperm.pasid = idxd->pasid;
741         mperm.pasid_en = device_pasid_enabled(idxd);
742         for (i = 1; i < msixcnt; i++)
743                 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
744 }
745
746 void idxd_msix_perm_clear(struct idxd_device *idxd)
747 {
748         union msix_perm mperm;
749         int i, msixcnt;
750
751         msixcnt = pci_msix_vec_count(idxd->pdev);
752         if (msixcnt < 0)
753                 return;
754
755         mperm.bits = 0;
756         for (i = 1; i < msixcnt; i++)
757                 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
758 }
759
760 static void idxd_group_config_write(struct idxd_group *group)
761 {
762         struct idxd_device *idxd = group->idxd;
763         struct device *dev = &idxd->pdev->dev;
764         int i;
765         u32 grpcfg_offset;
766
767         dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
768
769         /* setup GRPWQCFG */
770         for (i = 0; i < GRPWQCFG_STRIDES; i++) {
771                 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
772                 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
773                 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
774                         group->id, i, grpcfg_offset,
775                         ioread64(idxd->reg_base + grpcfg_offset));
776         }
777
778         /* setup GRPENGCFG */
779         grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
780         iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
781         dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
782                 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
783
784         /* setup GRPFLAGS */
785         grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
786         iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
787         dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
788                 group->id, grpcfg_offset,
789                 ioread32(idxd->reg_base + grpcfg_offset));
790 }
791
792 static int idxd_groups_config_write(struct idxd_device *idxd)
793
794 {
795         union gencfg_reg reg;
796         int i;
797         struct device *dev = &idxd->pdev->dev;
798
799         /* Setup bandwidth token limit */
800         if (idxd->token_limit) {
801                 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
802                 reg.token_limit = idxd->token_limit;
803                 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
804         }
805
806         dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
807                 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
808
809         for (i = 0; i < idxd->max_groups; i++) {
810                 struct idxd_group *group = idxd->groups[i];
811
812                 idxd_group_config_write(group);
813         }
814
815         return 0;
816 }
817
818 static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
819 {
820         struct pci_dev *pdev = idxd->pdev;
821
822         if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV))
823                 return true;
824         return false;
825 }
826
827 static int idxd_wq_config_write(struct idxd_wq *wq)
828 {
829         struct idxd_device *idxd = wq->idxd;
830         struct device *dev = &idxd->pdev->dev;
831         u32 wq_offset;
832         int i;
833
834         if (!wq->group)
835                 return 0;
836
837         /*
838          * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
839          * wq reset. This will copy back the sticky values that are present on some devices.
840          */
841         for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
842                 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
843                 wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
844         }
845
846         /* byte 0-3 */
847         wq->wqcfg->wq_size = wq->size;
848
849         if (wq->size == 0) {
850                 idxd->cmd_status = IDXD_SCMD_WQ_NO_SIZE;
851                 dev_warn(dev, "Incorrect work queue size: 0\n");
852                 return -EINVAL;
853         }
854
855         /* bytes 4-7 */
856         wq->wqcfg->wq_thresh = wq->threshold;
857
858         /* byte 8-11 */
859         if (wq_dedicated(wq))
860                 wq->wqcfg->mode = 1;
861
862         if (device_pasid_enabled(idxd)) {
863                 wq->wqcfg->pasid_en = 1;
864                 if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq))
865                         wq->wqcfg->pasid = idxd->pasid;
866         }
867
868         /*
869          * Here the priv bit is set depending on the WQ type. priv = 1 if the
870          * WQ type is kernel to indicate privileged access. This setting only
871          * matters for dedicated WQ. According to the DSA spec:
872          * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
873          * Privileged Mode Enable field of the PCI Express PASID capability
874          * is 0, this field must be 0.
875          *
876          * In the case of a dedicated kernel WQ that is not able to support
877          * the PASID cap, then the configuration will be rejected.
878          */
879         wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
880         if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
881             !idxd_device_pasid_priv_enabled(idxd) &&
882             wq->type == IDXD_WQT_KERNEL) {
883                 idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
884                 return -EOPNOTSUPP;
885         }
886
887         wq->wqcfg->priority = wq->priority;
888
889         if (idxd->hw.gen_cap.block_on_fault &&
890             test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags))
891                 wq->wqcfg->bof = 1;
892
893         if (idxd->hw.wq_cap.wq_ats_support)
894                 wq->wqcfg->wq_ats_disable = wq->ats_dis;
895
896         /* bytes 12-15 */
897         wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
898         wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
899
900         dev_dbg(dev, "WQ %d CFGs\n", wq->id);
901         for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
902                 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
903                 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
904                 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
905                         wq->id, i, wq_offset,
906                         ioread32(idxd->reg_base + wq_offset));
907         }
908
909         return 0;
910 }
911
912 static int idxd_wqs_config_write(struct idxd_device *idxd)
913 {
914         int i, rc;
915
916         for (i = 0; i < idxd->max_wqs; i++) {
917                 struct idxd_wq *wq = idxd->wqs[i];
918
919                 rc = idxd_wq_config_write(wq);
920                 if (rc < 0)
921                         return rc;
922         }
923
924         return 0;
925 }
926
927 static void idxd_group_flags_setup(struct idxd_device *idxd)
928 {
929         int i;
930
931         /* TC-A 0 and TC-B 1 should be defaults */
932         for (i = 0; i < idxd->max_groups; i++) {
933                 struct idxd_group *group = idxd->groups[i];
934
935                 if (group->tc_a == -1)
936                         group->tc_a = group->grpcfg.flags.tc_a = 0;
937                 else
938                         group->grpcfg.flags.tc_a = group->tc_a;
939                 if (group->tc_b == -1)
940                         group->tc_b = group->grpcfg.flags.tc_b = 1;
941                 else
942                         group->grpcfg.flags.tc_b = group->tc_b;
943                 group->grpcfg.flags.use_token_limit = group->use_token_limit;
944                 group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
945                 if (group->tokens_allowed)
946                         group->grpcfg.flags.tokens_allowed =
947                                 group->tokens_allowed;
948                 else
949                         group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
950         }
951 }
952
953 static int idxd_engines_setup(struct idxd_device *idxd)
954 {
955         int i, engines = 0;
956         struct idxd_engine *eng;
957         struct idxd_group *group;
958
959         for (i = 0; i < idxd->max_groups; i++) {
960                 group = idxd->groups[i];
961                 group->grpcfg.engines = 0;
962         }
963
964         for (i = 0; i < idxd->max_engines; i++) {
965                 eng = idxd->engines[i];
966                 group = eng->group;
967
968                 if (!group)
969                         continue;
970
971                 group->grpcfg.engines |= BIT(eng->id);
972                 engines++;
973         }
974
975         if (!engines)
976                 return -EINVAL;
977
978         return 0;
979 }
980
981 static int idxd_wqs_setup(struct idxd_device *idxd)
982 {
983         struct idxd_wq *wq;
984         struct idxd_group *group;
985         int i, j, configured = 0;
986         struct device *dev = &idxd->pdev->dev;
987
988         for (i = 0; i < idxd->max_groups; i++) {
989                 group = idxd->groups[i];
990                 for (j = 0; j < 4; j++)
991                         group->grpcfg.wqs[j] = 0;
992         }
993
994         for (i = 0; i < idxd->max_wqs; i++) {
995                 wq = idxd->wqs[i];
996                 group = wq->group;
997
998                 if (!wq->group)
999                         continue;
1000                 if (!wq->size)
1001                         continue;
1002
1003                 if (wq_shared(wq) && !device_swq_supported(idxd)) {
1004                         idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
1005                         dev_warn(dev, "No shared wq support but configured.\n");
1006                         return -EINVAL;
1007                 }
1008
1009                 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
1010                 configured++;
1011         }
1012
1013         if (configured == 0) {
1014                 idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
1015                 return -EINVAL;
1016         }
1017
1018         return 0;
1019 }
1020
1021 int idxd_device_config(struct idxd_device *idxd)
1022 {
1023         int rc;
1024
1025         lockdep_assert_held(&idxd->dev_lock);
1026         rc = idxd_wqs_setup(idxd);
1027         if (rc < 0)
1028                 return rc;
1029
1030         rc = idxd_engines_setup(idxd);
1031         if (rc < 0)
1032                 return rc;
1033
1034         idxd_group_flags_setup(idxd);
1035
1036         rc = idxd_wqs_config_write(idxd);
1037         if (rc < 0)
1038                 return rc;
1039
1040         rc = idxd_groups_config_write(idxd);
1041         if (rc < 0)
1042                 return rc;
1043
1044         return 0;
1045 }
1046
1047 static int idxd_wq_load_config(struct idxd_wq *wq)
1048 {
1049         struct idxd_device *idxd = wq->idxd;
1050         struct device *dev = &idxd->pdev->dev;
1051         int wqcfg_offset;
1052         int i;
1053
1054         wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
1055         memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
1056
1057         wq->size = wq->wqcfg->wq_size;
1058         wq->threshold = wq->wqcfg->wq_thresh;
1059         if (wq->wqcfg->priv)
1060                 wq->type = IDXD_WQT_KERNEL;
1061
1062         /* The driver does not support shared WQ mode in read-only config yet */
1063         if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
1064                 return -EOPNOTSUPP;
1065
1066         set_bit(WQ_FLAG_DEDICATED, &wq->flags);
1067
1068         wq->priority = wq->wqcfg->priority;
1069
1070         for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
1071                 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
1072                 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
1073         }
1074
1075         return 0;
1076 }
1077
1078 static void idxd_group_load_config(struct idxd_group *group)
1079 {
1080         struct idxd_device *idxd = group->idxd;
1081         struct device *dev = &idxd->pdev->dev;
1082         int i, j, grpcfg_offset;
1083
1084         /*
1085          * Load WQS bit fields
1086          * Iterate through all 256 bits 64 bits at a time
1087          */
1088         for (i = 0; i < GRPWQCFG_STRIDES; i++) {
1089                 struct idxd_wq *wq;
1090
1091                 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
1092                 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
1093                 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
1094                         group->id, i, grpcfg_offset, group->grpcfg.wqs[i]);
1095
1096                 if (i * 64 >= idxd->max_wqs)
1097                         break;
1098
1099                 /* Iterate through all 64 bits and check for wq set */
1100                 for (j = 0; j < 64; j++) {
1101                         int id = i * 64 + j;
1102
1103                         /* No need to check beyond max wqs */
1104                         if (id >= idxd->max_wqs)
1105                                 break;
1106
1107                         /* Set group assignment for wq if wq bit is set */
1108                         if (group->grpcfg.wqs[i] & BIT(j)) {
1109                                 wq = idxd->wqs[id];
1110                                 wq->group = group;
1111                         }
1112                 }
1113         }
1114
1115         grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
1116         group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
1117         dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
1118                 grpcfg_offset, group->grpcfg.engines);
1119
1120         /* Iterate through all 64 bits to check engines set */
1121         for (i = 0; i < 64; i++) {
1122                 if (i >= idxd->max_engines)
1123                         break;
1124
1125                 if (group->grpcfg.engines & BIT(i)) {
1126                         struct idxd_engine *engine = idxd->engines[i];
1127
1128                         engine->group = group;
1129                 }
1130         }
1131
1132         grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
1133         group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset);
1134         dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
1135                 group->id, grpcfg_offset, group->grpcfg.flags.bits);
1136 }
1137
1138 int idxd_device_load_config(struct idxd_device *idxd)
1139 {
1140         union gencfg_reg reg;
1141         int i, rc;
1142
1143         reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
1144         idxd->token_limit = reg.token_limit;
1145
1146         for (i = 0; i < idxd->max_groups; i++) {
1147                 struct idxd_group *group = idxd->groups[i];
1148
1149                 idxd_group_load_config(group);
1150         }
1151
1152         for (i = 0; i < idxd->max_wqs; i++) {
1153                 struct idxd_wq *wq = idxd->wqs[i];
1154
1155                 rc = idxd_wq_load_config(wq);
1156                 if (rc < 0)
1157                         return rc;
1158         }
1159
1160         return 0;
1161 }
1162
1163 int __drv_enable_wq(struct idxd_wq *wq)
1164 {
1165         struct idxd_device *idxd = wq->idxd;
1166         struct device *dev = &idxd->pdev->dev;
1167         unsigned long flags;
1168         int rc = -ENXIO;
1169
1170         lockdep_assert_held(&wq->wq_lock);
1171
1172         if (idxd->state != IDXD_DEV_ENABLED) {
1173                 idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
1174                 goto err;
1175         }
1176
1177         if (wq->state != IDXD_WQ_DISABLED) {
1178                 dev_dbg(dev, "wq %d already enabled.\n", wq->id);
1179                 idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
1180                 rc = -EBUSY;
1181                 goto err;
1182         }
1183
1184         if (!wq->group) {
1185                 dev_dbg(dev, "wq %d not attached to group.\n", wq->id);
1186                 idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
1187                 goto err;
1188         }
1189
1190         if (strlen(wq->name) == 0) {
1191                 idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
1192                 dev_dbg(dev, "wq %d name not set.\n", wq->id);
1193                 goto err;
1194         }
1195
1196         /* Shared WQ checks */
1197         if (wq_shared(wq)) {
1198                 if (!device_swq_supported(idxd)) {
1199                         idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
1200                         dev_dbg(dev, "PASID not enabled and shared wq.\n");
1201                         goto err;
1202                 }
1203                 /*
1204                  * Shared wq with the threshold set to 0 means the user
1205                  * did not set the threshold or transitioned from a
1206                  * dedicated wq but did not set threshold. A value
1207                  * of 0 would effectively disable the shared wq. The
1208                  * driver does not allow a value of 0 to be set for
1209                  * threshold via sysfs.
1210                  */
1211                 if (wq->threshold == 0) {
1212                         idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
1213                         dev_dbg(dev, "Shared wq and threshold 0.\n");
1214                         goto err;
1215                 }
1216         }
1217
1218         rc = 0;
1219         spin_lock_irqsave(&idxd->dev_lock, flags);
1220         if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1221                 rc = idxd_device_config(idxd);
1222         spin_unlock_irqrestore(&idxd->dev_lock, flags);
1223         if (rc < 0) {
1224                 dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
1225                 goto err;
1226         }
1227
1228         rc = idxd_wq_enable(wq);
1229         if (rc < 0) {
1230                 dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
1231                 goto err;
1232         }
1233
1234         rc = idxd_wq_map_portal(wq);
1235         if (rc < 0) {
1236                 idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
1237                 dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc);
1238                 goto err_map_portal;
1239         }
1240
1241         wq->client_count = 0;
1242         return 0;
1243
1244 err_map_portal:
1245         rc = idxd_wq_disable(wq, false);
1246         if (rc < 0)
1247                 dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
1248 err:
1249         return rc;
1250 }
1251
1252 int drv_enable_wq(struct idxd_wq *wq)
1253 {
1254         int rc;
1255
1256         mutex_lock(&wq->wq_lock);
1257         rc = __drv_enable_wq(wq);
1258         mutex_unlock(&wq->wq_lock);
1259         return rc;
1260 }
1261
1262 void __drv_disable_wq(struct idxd_wq *wq)
1263 {
1264         struct idxd_device *idxd = wq->idxd;
1265         struct device *dev = &idxd->pdev->dev;
1266
1267         lockdep_assert_held(&wq->wq_lock);
1268
1269         if (idxd_wq_refcount(wq))
1270                 dev_warn(dev, "Clients has claim on wq %d: %d\n",
1271                          wq->id, idxd_wq_refcount(wq));
1272
1273         idxd_wq_unmap_portal(wq);
1274
1275         idxd_wq_drain(wq);
1276         idxd_wq_reset(wq);
1277
1278         wq->client_count = 0;
1279 }
1280
1281 void drv_disable_wq(struct idxd_wq *wq)
1282 {
1283         mutex_lock(&wq->wq_lock);
1284         __drv_disable_wq(wq);
1285         mutex_unlock(&wq->wq_lock);
1286 }
1287
1288 int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
1289 {
1290         struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1291         unsigned long flags;
1292         int rc = 0;
1293
1294         /*
1295          * Device should be in disabled state for the idxd_drv to load. If it's in
1296          * enabled state, then the device was altered outside of driver's control.
1297          * If the state is in halted state, then we don't want to proceed.
1298          */
1299         if (idxd->state != IDXD_DEV_DISABLED) {
1300                 idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
1301                 return -ENXIO;
1302         }
1303
1304         /* Device configuration */
1305         spin_lock_irqsave(&idxd->dev_lock, flags);
1306         if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1307                 rc = idxd_device_config(idxd);
1308         spin_unlock_irqrestore(&idxd->dev_lock, flags);
1309         if (rc < 0)
1310                 return -ENXIO;
1311
1312         /* Start device */
1313         rc = idxd_device_enable(idxd);
1314         if (rc < 0)
1315                 return rc;
1316
1317         /* Setup DMA device without channels */
1318         rc = idxd_register_dma_device(idxd);
1319         if (rc < 0) {
1320                 idxd_device_disable(idxd);
1321                 idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
1322                 return rc;
1323         }
1324
1325         idxd->cmd_status = 0;
1326         return 0;
1327 }
1328
1329 void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
1330 {
1331         struct device *dev = &idxd_dev->conf_dev;
1332         struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1333         int i;
1334
1335         for (i = 0; i < idxd->max_wqs; i++) {
1336                 struct idxd_wq *wq = idxd->wqs[i];
1337                 struct device *wq_dev = wq_confdev(wq);
1338
1339                 if (wq->state == IDXD_WQ_DISABLED)
1340                         continue;
1341                 dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev));
1342                 device_release_driver(wq_dev);
1343         }
1344
1345         idxd_unregister_dma_device(idxd);
1346         idxd_device_disable(idxd);
1347         if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1348                 idxd_device_reset(idxd);
1349 }
1350
1351 static enum idxd_dev_type dev_types[] = {
1352         IDXD_DEV_DSA,
1353         IDXD_DEV_IAX,
1354         IDXD_DEV_NONE,
1355 };
1356
1357 struct idxd_device_driver idxd_drv = {
1358         .type = dev_types,
1359         .probe = idxd_device_drv_probe,
1360         .remove = idxd_device_drv_remove,
1361         .name = "idxd",
1362 };
1363 EXPORT_SYMBOL_GPL(idxd_drv);