1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
10 #include <uapi/linux/idxd.h>
11 #include "../dmaengine.h"
13 #include "registers.h"
15 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
17 static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
18 static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
20 /* Interrupt control bits */
21 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
23 union genctrl_reg genctrl;
25 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
26 genctrl.softerr_int_en = 1;
27 genctrl.halt_int_en = 1;
28 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
31 void idxd_mask_error_interrupts(struct idxd_device *idxd)
33 union genctrl_reg genctrl;
35 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
36 genctrl.softerr_int_en = 0;
37 genctrl.halt_int_en = 0;
38 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
41 static void free_hw_descs(struct idxd_wq *wq)
45 for (i = 0; i < wq->num_descs; i++)
46 kfree(wq->hw_descs[i]);
51 static int alloc_hw_descs(struct idxd_wq *wq, int num)
53 struct device *dev = &wq->idxd->pdev->dev;
55 int node = dev_to_node(dev);
57 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
62 for (i = 0; i < num; i++) {
63 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
65 if (!wq->hw_descs[i]) {
74 static void free_descs(struct idxd_wq *wq)
78 for (i = 0; i < wq->num_descs; i++)
84 static int alloc_descs(struct idxd_wq *wq, int num)
86 struct device *dev = &wq->idxd->pdev->dev;
88 int node = dev_to_node(dev);
90 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
95 for (i = 0; i < num; i++) {
96 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
107 /* WQ control bits */
108 int idxd_wq_alloc_resources(struct idxd_wq *wq)
110 struct idxd_device *idxd = wq->idxd;
111 struct device *dev = &idxd->pdev->dev;
112 int rc, num_descs, i;
114 if (wq->type != IDXD_WQT_KERNEL)
117 num_descs = wq_dedicated(wq) ? wq->size : wq->threshold;
118 wq->num_descs = num_descs;
120 rc = alloc_hw_descs(wq, num_descs);
124 wq->compls_size = num_descs * idxd->data->compl_size;
125 wq->compls = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr, GFP_KERNEL);
128 goto fail_alloc_compls;
131 rc = alloc_descs(wq, num_descs);
133 goto fail_alloc_descs;
135 rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
138 goto fail_sbitmap_init;
140 for (i = 0; i < num_descs; i++) {
141 struct idxd_desc *desc = wq->descs[i];
143 desc->hw = wq->hw_descs[i];
144 if (idxd->data->type == IDXD_TYPE_DSA)
145 desc->completion = &wq->compls[i];
146 else if (idxd->data->type == IDXD_TYPE_IAX)
147 desc->iax_completion = &wq->iax_compls[i];
148 desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i;
159 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
165 void idxd_wq_free_resources(struct idxd_wq *wq)
167 struct device *dev = &wq->idxd->pdev->dev;
169 if (wq->type != IDXD_WQT_KERNEL)
174 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
175 sbitmap_queue_free(&wq->sbq);
178 int idxd_wq_enable(struct idxd_wq *wq)
180 struct idxd_device *idxd = wq->idxd;
181 struct device *dev = &idxd->pdev->dev;
184 if (wq->state == IDXD_WQ_ENABLED) {
185 dev_dbg(dev, "WQ %d already enabled\n", wq->id);
189 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
191 if (status != IDXD_CMDSTS_SUCCESS &&
192 status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
193 dev_dbg(dev, "WQ enable failed: %#x\n", status);
197 wq->state = IDXD_WQ_ENABLED;
198 set_bit(wq->id, idxd->wq_enable_map);
199 dev_dbg(dev, "WQ %d enabled\n", wq->id);
203 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
205 struct idxd_device *idxd = wq->idxd;
206 struct device *dev = &idxd->pdev->dev;
209 dev_dbg(dev, "Disabling WQ %d\n", wq->id);
211 if (wq->state != IDXD_WQ_ENABLED) {
212 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
216 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
217 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
219 if (status != IDXD_CMDSTS_SUCCESS) {
220 dev_dbg(dev, "WQ disable failed: %#x\n", status);
225 idxd_wq_disable_cleanup(wq);
226 clear_bit(wq->id, idxd->wq_enable_map);
227 wq->state = IDXD_WQ_DISABLED;
228 dev_dbg(dev, "WQ %d disabled\n", wq->id);
232 void idxd_wq_drain(struct idxd_wq *wq)
234 struct idxd_device *idxd = wq->idxd;
235 struct device *dev = &idxd->pdev->dev;
238 if (wq->state != IDXD_WQ_ENABLED) {
239 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
243 dev_dbg(dev, "Draining WQ %d\n", wq->id);
244 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
245 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
248 void idxd_wq_reset(struct idxd_wq *wq)
250 struct idxd_device *idxd = wq->idxd;
251 struct device *dev = &idxd->pdev->dev;
254 if (wq->state != IDXD_WQ_ENABLED) {
255 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
259 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
260 idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
261 idxd_wq_disable_cleanup(wq);
264 int idxd_wq_map_portal(struct idxd_wq *wq)
266 struct idxd_device *idxd = wq->idxd;
267 struct pci_dev *pdev = idxd->pdev;
268 struct device *dev = &pdev->dev;
269 resource_size_t start;
271 start = pci_resource_start(pdev, IDXD_WQ_BAR);
272 start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
274 wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
281 void idxd_wq_unmap_portal(struct idxd_wq *wq)
283 struct device *dev = &wq->idxd->pdev->dev;
285 devm_iounmap(dev, wq->portal);
287 wq->portal_offset = 0;
290 void idxd_wqs_unmap_portal(struct idxd_device *idxd)
294 for (i = 0; i < idxd->max_wqs; i++) {
295 struct idxd_wq *wq = idxd->wqs[i];
298 idxd_wq_unmap_portal(wq);
302 static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
304 struct idxd_device *idxd = wq->idxd;
308 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
309 spin_lock(&idxd->dev_lock);
310 wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
312 wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
313 iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
314 spin_unlock(&idxd->dev_lock);
317 static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
319 struct idxd_device *idxd = wq->idxd;
323 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
324 spin_lock(&idxd->dev_lock);
325 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
328 wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX];
329 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
330 spin_unlock(&idxd->dev_lock);
333 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
337 rc = idxd_wq_disable(wq, false);
341 __idxd_wq_set_pasid_locked(wq, pasid);
343 rc = idxd_wq_enable(wq);
350 int idxd_wq_disable_pasid(struct idxd_wq *wq)
352 struct idxd_device *idxd = wq->idxd;
357 rc = idxd_wq_disable(wq, false);
361 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
362 spin_lock(&idxd->dev_lock);
363 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
366 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
367 spin_unlock(&idxd->dev_lock);
369 rc = idxd_wq_enable(wq);
376 static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
378 struct idxd_device *idxd = wq->idxd;
380 lockdep_assert_held(&wq->wq_lock);
381 wq->state = IDXD_WQ_DISABLED;
382 memset(wq->wqcfg, 0, idxd->wqcfg_size);
383 wq->type = IDXD_WQT_NONE;
386 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
387 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
388 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
389 clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
390 memset(wq->name, 0, WQ_NAME_SIZE);
391 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
392 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
394 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
397 static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
399 lockdep_assert_held(&wq->wq_lock);
405 static void idxd_wq_ref_release(struct percpu_ref *ref)
407 struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active);
409 complete(&wq->wq_dead);
412 int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
416 memset(&wq->wq_active, 0, sizeof(wq->wq_active));
417 rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release,
418 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
421 reinit_completion(&wq->wq_dead);
422 reinit_completion(&wq->wq_resurrect);
426 void __idxd_wq_quiesce(struct idxd_wq *wq)
428 lockdep_assert_held(&wq->wq_lock);
429 reinit_completion(&wq->wq_resurrect);
430 percpu_ref_kill(&wq->wq_active);
431 complete_all(&wq->wq_resurrect);
432 wait_for_completion(&wq->wq_dead);
435 void idxd_wq_quiesce(struct idxd_wq *wq)
437 mutex_lock(&wq->wq_lock);
438 __idxd_wq_quiesce(wq);
439 mutex_unlock(&wq->wq_lock);
442 /* Device control bits */
443 static inline bool idxd_is_enabled(struct idxd_device *idxd)
445 union gensts_reg gensts;
447 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
449 if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
454 static inline bool idxd_device_is_halted(struct idxd_device *idxd)
456 union gensts_reg gensts;
458 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
460 return (gensts.state == IDXD_DEVICE_STATE_HALT);
464 * This is function is only used for reset during probe and will
465 * poll for completion. Once the device is setup with interrupts,
466 * all commands will be done via interrupt completion.
468 int idxd_device_init_reset(struct idxd_device *idxd)
470 struct device *dev = &idxd->pdev->dev;
471 union idxd_command_reg cmd;
473 if (idxd_device_is_halted(idxd)) {
474 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
478 memset(&cmd, 0, sizeof(cmd));
479 cmd.cmd = IDXD_CMD_RESET_DEVICE;
480 dev_dbg(dev, "%s: sending reset for init.\n", __func__);
481 spin_lock(&idxd->cmd_lock);
482 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
484 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
487 spin_unlock(&idxd->cmd_lock);
491 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
494 union idxd_command_reg cmd;
495 DECLARE_COMPLETION_ONSTACK(done);
498 if (idxd_device_is_halted(idxd)) {
499 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
501 *status = IDXD_CMDSTS_HW_ERR;
505 memset(&cmd, 0, sizeof(cmd));
507 cmd.operand = operand;
510 spin_lock(&idxd->cmd_lock);
511 wait_event_lock_irq(idxd->cmd_waitq,
512 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
515 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
516 __func__, cmd_code, operand);
518 idxd->cmd_status = 0;
519 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
520 idxd->cmd_done = &done;
521 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
524 * After command submitted, release lock and go to sleep until
525 * the command completes via interrupt.
527 spin_unlock(&idxd->cmd_lock);
528 wait_for_completion(&done);
529 stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
530 spin_lock(&idxd->cmd_lock);
533 idxd->cmd_status = stat & GENMASK(7, 0);
535 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
536 /* Wake up other pending commands */
537 wake_up(&idxd->cmd_waitq);
538 spin_unlock(&idxd->cmd_lock);
541 int idxd_device_enable(struct idxd_device *idxd)
543 struct device *dev = &idxd->pdev->dev;
546 if (idxd_is_enabled(idxd)) {
547 dev_dbg(dev, "Device already enabled\n");
551 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
553 /* If the command is successful or if the device was enabled */
554 if (status != IDXD_CMDSTS_SUCCESS &&
555 status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
556 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
560 idxd->state = IDXD_DEV_ENABLED;
564 int idxd_device_disable(struct idxd_device *idxd)
566 struct device *dev = &idxd->pdev->dev;
569 if (!idxd_is_enabled(idxd)) {
570 dev_dbg(dev, "Device is not enabled\n");
574 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
576 /* If the command is successful or if the device was disabled */
577 if (status != IDXD_CMDSTS_SUCCESS &&
578 !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
579 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
583 idxd_device_clear_state(idxd);
587 void idxd_device_reset(struct idxd_device *idxd)
589 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
590 idxd_device_clear_state(idxd);
591 spin_lock(&idxd->dev_lock);
592 idxd_unmask_error_interrupts(idxd);
593 spin_unlock(&idxd->dev_lock);
596 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
598 struct device *dev = &idxd->pdev->dev;
602 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
603 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
604 dev_dbg(dev, "pasid %d drained\n", pasid);
607 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
608 enum idxd_interrupt_type irq_type)
610 struct device *dev = &idxd->pdev->dev;
613 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)))
616 dev_dbg(dev, "get int handle, idx %d\n", idx);
618 operand = idx & GENMASK(15, 0);
619 if (irq_type == IDXD_IRQ_IMS)
620 operand |= CMD_INT_HANDLE_IMS;
622 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand);
624 idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status);
626 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
627 dev_dbg(dev, "request int handle failed: %#x\n", status);
631 *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0);
633 dev_dbg(dev, "int handle acquired: %u\n", *handle);
637 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
638 enum idxd_interrupt_type irq_type)
640 struct device *dev = &idxd->pdev->dev;
642 union idxd_command_reg cmd;
644 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
647 dev_dbg(dev, "release int handle, handle %d\n", handle);
649 memset(&cmd, 0, sizeof(cmd));
650 operand = handle & GENMASK(15, 0);
652 if (irq_type == IDXD_IRQ_IMS)
653 operand |= CMD_INT_HANDLE_IMS;
655 cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE;
656 cmd.operand = operand;
658 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
660 spin_lock(&idxd->cmd_lock);
661 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
663 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
665 status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
666 spin_unlock(&idxd->cmd_lock);
668 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
669 dev_dbg(dev, "release int handle failed: %#x\n", status);
673 dev_dbg(dev, "int handle released.\n");
677 /* Device configuration bits */
678 static void idxd_engines_clear_state(struct idxd_device *idxd)
680 struct idxd_engine *engine;
683 lockdep_assert_held(&idxd->dev_lock);
684 for (i = 0; i < idxd->max_engines; i++) {
685 engine = idxd->engines[i];
686 engine->group = NULL;
690 static void idxd_groups_clear_state(struct idxd_device *idxd)
692 struct idxd_group *group;
695 lockdep_assert_held(&idxd->dev_lock);
696 for (i = 0; i < idxd->max_groups; i++) {
697 group = idxd->groups[i];
698 memset(&group->grpcfg, 0, sizeof(group->grpcfg));
699 group->num_engines = 0;
701 group->use_rdbuf_limit = false;
703 * The default value is the same as the value of
704 * total read buffers in GRPCAP.
706 group->rdbufs_allowed = idxd->max_rdbufs;
707 group->rdbufs_reserved = 0;
708 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
715 group->desc_progress_limit = 0;
716 group->batch_progress_limit = 0;
720 static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
724 for (i = 0; i < idxd->max_wqs; i++) {
725 struct idxd_wq *wq = idxd->wqs[i];
727 mutex_lock(&wq->wq_lock);
728 idxd_wq_disable_cleanup(wq);
729 idxd_wq_device_reset_cleanup(wq);
730 mutex_unlock(&wq->wq_lock);
734 void idxd_device_clear_state(struct idxd_device *idxd)
736 /* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */
737 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
739 * Clearing wq state is protected by wq lock.
740 * So no need to be protected by device lock.
742 idxd_device_wqs_clear_state(idxd);
744 spin_lock(&idxd->dev_lock);
745 idxd_groups_clear_state(idxd);
746 idxd_engines_clear_state(idxd);
748 spin_lock(&idxd->dev_lock);
751 idxd->state = IDXD_DEV_DISABLED;
752 spin_unlock(&idxd->dev_lock);
755 static int idxd_device_evl_setup(struct idxd_device *idxd)
757 union gencfg_reg gencfg;
758 union evlcfg_reg evlcfg;
759 union genctrl_reg genctrl;
760 struct device *dev = &idxd->pdev->dev;
764 struct idxd_evl *evl = idxd->evl;
771 size = evl_size(idxd);
773 bmap = bitmap_zalloc(size, GFP_KERNEL);
780 * Address needs to be page aligned. However, dma_alloc_coherent() provides
781 * at minimal page size aligned address. No manual alignment required.
783 addr = dma_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
789 spin_lock(&evl->lock);
792 evl->log_size = size;
795 memset(&evlcfg, 0, sizeof(evlcfg));
796 evlcfg.bits[0] = dma_addr & GENMASK(63, 12);
797 evlcfg.size = evl->size;
799 iowrite64(evlcfg.bits[0], idxd->reg_base + IDXD_EVLCFG_OFFSET);
800 iowrite64(evlcfg.bits[1], idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
802 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
803 genctrl.evl_int_en = 1;
804 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
806 gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
808 iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
810 spin_unlock(&evl->lock);
819 static void idxd_device_evl_free(struct idxd_device *idxd)
821 union gencfg_reg gencfg;
822 union genctrl_reg genctrl;
823 struct device *dev = &idxd->pdev->dev;
824 struct idxd_evl *evl = idxd->evl;
826 gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
830 spin_lock(&evl->lock);
832 iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
834 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
835 genctrl.evl_int_en = 0;
836 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
838 iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
839 iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
841 dma_free_coherent(dev, evl->log_size, evl->log, evl->dma);
842 bitmap_free(evl->bmap);
844 evl->size = IDXD_EVL_SIZE_MIN;
845 spin_unlock(&evl->lock);
848 static void idxd_group_config_write(struct idxd_group *group)
850 struct idxd_device *idxd = group->idxd;
851 struct device *dev = &idxd->pdev->dev;
855 dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
858 for (i = 0; i < GRPWQCFG_STRIDES; i++) {
859 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
860 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
861 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
862 group->id, i, grpcfg_offset,
863 ioread64(idxd->reg_base + grpcfg_offset));
866 /* setup GRPENGCFG */
867 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
868 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
869 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
870 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
873 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
874 iowrite64(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
875 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
876 group->id, grpcfg_offset,
877 ioread64(idxd->reg_base + grpcfg_offset));
880 static int idxd_groups_config_write(struct idxd_device *idxd)
883 union gencfg_reg reg;
885 struct device *dev = &idxd->pdev->dev;
887 /* Setup bandwidth rdbuf limit */
888 if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) {
889 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
890 reg.rdbuf_limit = idxd->rdbuf_limit;
891 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
894 dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
895 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
897 for (i = 0; i < idxd->max_groups; i++) {
898 struct idxd_group *group = idxd->groups[i];
900 idxd_group_config_write(group);
906 static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
908 struct pci_dev *pdev = idxd->pdev;
910 if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV))
915 static int idxd_wq_config_write(struct idxd_wq *wq)
917 struct idxd_device *idxd = wq->idxd;
918 struct device *dev = &idxd->pdev->dev;
926 * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
927 * wq reset. This will copy back the sticky values that are present on some devices.
929 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
930 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
931 wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset);
934 if (wq->size == 0 && wq->type != IDXD_WQT_NONE)
935 wq->size = WQ_DEFAULT_QUEUE_DEPTH;
938 wq->wqcfg->wq_size = wq->size;
941 wq->wqcfg->wq_thresh = wq->threshold;
944 if (wq_dedicated(wq))
948 * The WQ priv bit is set depending on the WQ type. priv = 1 if the
949 * WQ type is kernel to indicate privileged access. This setting only
950 * matters for dedicated WQ. According to the DSA spec:
951 * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
952 * Privileged Mode Enable field of the PCI Express PASID capability
953 * is 0, this field must be 0.
955 * In the case of a dedicated kernel WQ that is not able to support
956 * the PASID cap, then the configuration will be rejected.
958 if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
959 !idxd_device_pasid_priv_enabled(idxd) &&
960 wq->type == IDXD_WQT_KERNEL) {
961 idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
965 wq->wqcfg->priority = wq->priority;
967 if (idxd->hw.gen_cap.block_on_fault &&
968 test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags) &&
969 !test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags))
972 if (idxd->hw.wq_cap.wq_ats_support)
973 wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
975 if (idxd->hw.wq_cap.wq_prs_support)
976 wq->wqcfg->wq_prs_disable = test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
979 wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
980 idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
983 if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
984 memset(wq->wqcfg->op_config, 0, IDXD_MAX_OPCAP_BITS / 8);
985 for_each_set_bit(n, wq->opcap_bmap, IDXD_MAX_OPCAP_BITS) {
986 int pos = n % BITS_PER_LONG_LONG;
987 int idx = n / BITS_PER_LONG_LONG;
989 wq->wqcfg->op_config[idx] |= BIT(pos);
993 dev_dbg(dev, "WQ %d CFGs\n", wq->id);
994 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
995 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
996 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
997 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
998 wq->id, i, wq_offset,
999 ioread32(idxd->reg_base + wq_offset));
1005 static int idxd_wqs_config_write(struct idxd_device *idxd)
1009 for (i = 0; i < idxd->max_wqs; i++) {
1010 struct idxd_wq *wq = idxd->wqs[i];
1012 rc = idxd_wq_config_write(wq);
1020 static void idxd_group_flags_setup(struct idxd_device *idxd)
1024 /* TC-A 0 and TC-B 1 should be defaults */
1025 for (i = 0; i < idxd->max_groups; i++) {
1026 struct idxd_group *group = idxd->groups[i];
1028 if (group->tc_a == -1)
1029 group->tc_a = group->grpcfg.flags.tc_a = 0;
1031 group->grpcfg.flags.tc_a = group->tc_a;
1032 if (group->tc_b == -1)
1033 group->tc_b = group->grpcfg.flags.tc_b = 1;
1035 group->grpcfg.flags.tc_b = group->tc_b;
1036 group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
1037 group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
1038 group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
1039 group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit;
1040 group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit;
1044 static int idxd_engines_setup(struct idxd_device *idxd)
1047 struct idxd_engine *eng;
1048 struct idxd_group *group;
1050 for (i = 0; i < idxd->max_groups; i++) {
1051 group = idxd->groups[i];
1052 group->grpcfg.engines = 0;
1055 for (i = 0; i < idxd->max_engines; i++) {
1056 eng = idxd->engines[i];
1062 group->grpcfg.engines |= BIT(eng->id);
1072 static int idxd_wqs_setup(struct idxd_device *idxd)
1075 struct idxd_group *group;
1076 int i, j, configured = 0;
1077 struct device *dev = &idxd->pdev->dev;
1079 for (i = 0; i < idxd->max_groups; i++) {
1080 group = idxd->groups[i];
1081 for (j = 0; j < 4; j++)
1082 group->grpcfg.wqs[j] = 0;
1085 for (i = 0; i < idxd->max_wqs; i++) {
1092 if (wq_shared(wq) && !wq_shared_supported(wq)) {
1093 idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
1094 dev_warn(dev, "No shared wq support but configured.\n");
1098 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
1102 if (configured == 0) {
1103 idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
1110 int idxd_device_config(struct idxd_device *idxd)
1114 lockdep_assert_held(&idxd->dev_lock);
1115 rc = idxd_wqs_setup(idxd);
1119 rc = idxd_engines_setup(idxd);
1123 idxd_group_flags_setup(idxd);
1125 rc = idxd_wqs_config_write(idxd);
1129 rc = idxd_groups_config_write(idxd);
1136 static int idxd_wq_load_config(struct idxd_wq *wq)
1138 struct idxd_device *idxd = wq->idxd;
1139 struct device *dev = &idxd->pdev->dev;
1143 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
1144 memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
1146 wq->size = wq->wqcfg->wq_size;
1147 wq->threshold = wq->wqcfg->wq_thresh;
1149 /* The driver does not support shared WQ mode in read-only config yet */
1150 if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
1153 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
1155 wq->priority = wq->wqcfg->priority;
1157 wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
1158 idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift);
1160 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
1161 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
1162 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
1168 static void idxd_group_load_config(struct idxd_group *group)
1170 struct idxd_device *idxd = group->idxd;
1171 struct device *dev = &idxd->pdev->dev;
1172 int i, j, grpcfg_offset;
1175 * Load WQS bit fields
1176 * Iterate through all 256 bits 64 bits at a time
1178 for (i = 0; i < GRPWQCFG_STRIDES; i++) {
1181 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
1182 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
1183 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
1184 group->id, i, grpcfg_offset, group->grpcfg.wqs[i]);
1186 if (i * 64 >= idxd->max_wqs)
1189 /* Iterate through all 64 bits and check for wq set */
1190 for (j = 0; j < 64; j++) {
1191 int id = i * 64 + j;
1193 /* No need to check beyond max wqs */
1194 if (id >= idxd->max_wqs)
1197 /* Set group assignment for wq if wq bit is set */
1198 if (group->grpcfg.wqs[i] & BIT(j)) {
1205 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
1206 group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
1207 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
1208 grpcfg_offset, group->grpcfg.engines);
1210 /* Iterate through all 64 bits to check engines set */
1211 for (i = 0; i < 64; i++) {
1212 if (i >= idxd->max_engines)
1215 if (group->grpcfg.engines & BIT(i)) {
1216 struct idxd_engine *engine = idxd->engines[i];
1218 engine->group = group;
1222 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
1223 group->grpcfg.flags.bits = ioread64(idxd->reg_base + grpcfg_offset);
1224 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
1225 group->id, grpcfg_offset, group->grpcfg.flags.bits);
1228 int idxd_device_load_config(struct idxd_device *idxd)
1230 union gencfg_reg reg;
1233 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
1234 idxd->rdbuf_limit = reg.rdbuf_limit;
1236 for (i = 0; i < idxd->max_groups; i++) {
1237 struct idxd_group *group = idxd->groups[i];
1239 idxd_group_load_config(group);
1242 for (i = 0; i < idxd->max_wqs; i++) {
1243 struct idxd_wq *wq = idxd->wqs[i];
1245 rc = idxd_wq_load_config(wq);
1253 static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
1255 struct idxd_desc *desc, *itr;
1256 struct llist_node *head;
1258 enum idxd_complete_type ctype;
1260 spin_lock(&ie->list_lock);
1261 head = llist_del_all(&ie->pending_llist);
1263 llist_for_each_entry_safe(desc, itr, head, llnode)
1264 list_add_tail(&desc->list, &ie->work_list);
1267 list_for_each_entry_safe(desc, itr, &ie->work_list, list)
1268 list_move_tail(&desc->list, &flist);
1269 spin_unlock(&ie->list_lock);
1271 list_for_each_entry_safe(desc, itr, &flist, list) {
1272 struct dma_async_tx_descriptor *tx;
1274 list_del(&desc->list);
1275 ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
1277 * wq is being disabled. Any remaining descriptors are
1278 * likely to be stuck and can be dropped. callback could
1279 * point to code that is no longer accessible, for example
1280 * if dmatest module has been unloaded.
1283 tx->callback = NULL;
1284 tx->callback_result = NULL;
1285 idxd_dma_complete_txd(desc, ctype, true);
1289 static void idxd_device_set_perm_entry(struct idxd_device *idxd,
1290 struct idxd_irq_entry *ie)
1292 union msix_perm mperm;
1294 if (ie->pasid == IOMMU_PASID_INVALID)
1298 mperm.pasid = ie->pasid;
1300 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1303 static void idxd_device_clear_perm_entry(struct idxd_device *idxd,
1304 struct idxd_irq_entry *ie)
1306 iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1309 void idxd_wq_free_irq(struct idxd_wq *wq)
1311 struct idxd_device *idxd = wq->idxd;
1312 struct idxd_irq_entry *ie = &wq->ie;
1314 if (wq->type != IDXD_WQT_KERNEL)
1317 free_irq(ie->vector, ie);
1318 idxd_flush_pending_descs(ie);
1319 if (idxd->request_int_handles)
1320 idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
1321 idxd_device_clear_perm_entry(idxd, ie);
1323 ie->int_handle = INVALID_INT_HANDLE;
1324 ie->pasid = IOMMU_PASID_INVALID;
1327 int idxd_wq_request_irq(struct idxd_wq *wq)
1329 struct idxd_device *idxd = wq->idxd;
1330 struct pci_dev *pdev = idxd->pdev;
1331 struct device *dev = &pdev->dev;
1332 struct idxd_irq_entry *ie;
1335 if (wq->type != IDXD_WQT_KERNEL)
1339 ie->vector = pci_irq_vector(pdev, ie->id);
1340 ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : IOMMU_PASID_INVALID;
1341 idxd_device_set_perm_entry(idxd, ie);
1343 rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
1345 dev_err(dev, "Failed to request irq %d.\n", ie->vector);
1349 if (idxd->request_int_handles) {
1350 rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle,
1353 goto err_int_handle;
1355 ie->int_handle = ie->id;
1361 ie->int_handle = INVALID_INT_HANDLE;
1362 free_irq(ie->vector, ie);
1364 idxd_device_clear_perm_entry(idxd, ie);
1365 ie->pasid = IOMMU_PASID_INVALID;
1369 int drv_enable_wq(struct idxd_wq *wq)
1371 struct idxd_device *idxd = wq->idxd;
1372 struct device *dev = &idxd->pdev->dev;
1375 lockdep_assert_held(&wq->wq_lock);
1377 if (idxd->state != IDXD_DEV_ENABLED) {
1378 idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
1382 if (wq->state != IDXD_WQ_DISABLED) {
1383 dev_dbg(dev, "wq %d already enabled.\n", wq->id);
1384 idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
1390 dev_dbg(dev, "wq %d not attached to group.\n", wq->id);
1391 idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
1395 if (strlen(wq->name) == 0) {
1396 idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
1397 dev_dbg(dev, "wq %d name not set.\n", wq->id);
1401 /* Shared WQ checks */
1402 if (wq_shared(wq)) {
1403 if (!wq_shared_supported(wq)) {
1404 idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
1405 dev_dbg(dev, "PASID not enabled and shared wq.\n");
1409 * Shared wq with the threshold set to 0 means the user
1410 * did not set the threshold or transitioned from a
1411 * dedicated wq but did not set threshold. A value
1412 * of 0 would effectively disable the shared wq. The
1413 * driver does not allow a value of 0 to be set for
1414 * threshold via sysfs.
1416 if (wq->threshold == 0) {
1417 idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
1418 dev_dbg(dev, "Shared wq and threshold 0.\n");
1424 * In the event that the WQ is configurable for pasid and priv bits.
1425 * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
1426 * However, for non-kernel wq, the driver should only set the pasid_en bit for
1427 * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
1428 * pasid_en later on so there is no need to setup.
1430 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
1433 if (wq_pasid_enabled(wq)) {
1434 if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
1435 u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
1437 __idxd_wq_set_pasid_locked(wq, pasid);
1441 if (is_idxd_wq_kernel(wq))
1443 __idxd_wq_set_priv_locked(wq, priv);
1447 spin_lock(&idxd->dev_lock);
1448 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1449 rc = idxd_device_config(idxd);
1450 spin_unlock(&idxd->dev_lock);
1452 dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
1456 rc = idxd_wq_enable(wq);
1458 dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
1462 rc = idxd_wq_map_portal(wq);
1464 idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
1465 dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc);
1466 goto err_map_portal;
1469 wq->client_count = 0;
1471 rc = idxd_wq_request_irq(wq);
1473 idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
1474 dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
1478 rc = idxd_wq_alloc_resources(wq);
1480 idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
1481 dev_dbg(dev, "WQ resource alloc failed\n");
1485 rc = idxd_wq_init_percpu_ref(wq);
1487 idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
1488 dev_dbg(dev, "percpu_ref setup failed\n");
1495 idxd_wq_free_resources(wq);
1497 idxd_wq_free_irq(wq);
1499 idxd_wq_unmap_portal(wq);
1501 if (idxd_wq_disable(wq, false))
1502 dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
1507 void drv_disable_wq(struct idxd_wq *wq)
1509 struct idxd_device *idxd = wq->idxd;
1510 struct device *dev = &idxd->pdev->dev;
1512 lockdep_assert_held(&wq->wq_lock);
1514 if (idxd_wq_refcount(wq))
1515 dev_warn(dev, "Clients has claim on wq %d: %d\n",
1516 wq->id, idxd_wq_refcount(wq));
1518 idxd_wq_unmap_portal(wq);
1520 idxd_wq_free_irq(wq);
1522 idxd_wq_free_resources(wq);
1523 percpu_ref_exit(&wq->wq_active);
1524 wq->type = IDXD_WQT_NONE;
1525 wq->client_count = 0;
1528 int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
1530 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1534 * Device should be in disabled state for the idxd_drv to load. If it's in
1535 * enabled state, then the device was altered outside of driver's control.
1536 * If the state is in halted state, then we don't want to proceed.
1538 if (idxd->state != IDXD_DEV_DISABLED) {
1539 idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
1543 /* Device configuration */
1544 spin_lock(&idxd->dev_lock);
1545 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1546 rc = idxd_device_config(idxd);
1547 spin_unlock(&idxd->dev_lock);
1551 rc = idxd_device_evl_setup(idxd);
1553 idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
1558 rc = idxd_device_enable(idxd);
1560 idxd_device_evl_free(idxd);
1564 /* Setup DMA device without channels */
1565 rc = idxd_register_dma_device(idxd);
1567 idxd_device_disable(idxd);
1568 idxd_device_evl_free(idxd);
1569 idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
1573 idxd->cmd_status = 0;
1577 void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
1579 struct device *dev = &idxd_dev->conf_dev;
1580 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1583 for (i = 0; i < idxd->max_wqs; i++) {
1584 struct idxd_wq *wq = idxd->wqs[i];
1585 struct device *wq_dev = wq_confdev(wq);
1587 if (wq->state == IDXD_WQ_DISABLED)
1589 dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev));
1590 device_release_driver(wq_dev);
1593 idxd_unregister_dma_device(idxd);
1594 idxd_device_disable(idxd);
1595 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1596 idxd_device_reset(idxd);
1597 idxd_device_evl_free(idxd);
1600 static enum idxd_dev_type dev_types[] = {
1606 struct idxd_device_driver idxd_drv = {
1608 .probe = idxd_device_drv_probe,
1609 .remove = idxd_device_drv_remove,
1612 EXPORT_SYMBOL_GPL(idxd_drv);