1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
10 #include <linux/msi.h>
11 #include <uapi/linux/idxd.h>
12 #include "../dmaengine.h"
14 #include "registers.h"
16 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
18 static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
19 static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
21 /* Interrupt control bits */
22 void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
24 struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
26 pci_msi_mask_irq(data);
29 void idxd_mask_msix_vectors(struct idxd_device *idxd)
31 struct pci_dev *pdev = idxd->pdev;
32 int msixcnt = pci_msix_vec_count(pdev);
35 for (i = 0; i < msixcnt; i++)
36 idxd_mask_msix_vector(idxd, i);
39 void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
41 struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
43 pci_msi_unmask_irq(data);
46 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
48 union genctrl_reg genctrl;
50 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
51 genctrl.softerr_int_en = 1;
52 genctrl.halt_int_en = 1;
53 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
56 void idxd_mask_error_interrupts(struct idxd_device *idxd)
58 union genctrl_reg genctrl;
60 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
61 genctrl.softerr_int_en = 0;
62 genctrl.halt_int_en = 0;
63 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
66 static void free_hw_descs(struct idxd_wq *wq)
70 for (i = 0; i < wq->num_descs; i++)
71 kfree(wq->hw_descs[i]);
76 static int alloc_hw_descs(struct idxd_wq *wq, int num)
78 struct device *dev = &wq->idxd->pdev->dev;
80 int node = dev_to_node(dev);
82 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
87 for (i = 0; i < num; i++) {
88 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
90 if (!wq->hw_descs[i]) {
99 static void free_descs(struct idxd_wq *wq)
103 for (i = 0; i < wq->num_descs; i++)
109 static int alloc_descs(struct idxd_wq *wq, int num)
111 struct device *dev = &wq->idxd->pdev->dev;
113 int node = dev_to_node(dev);
115 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
120 for (i = 0; i < num; i++) {
121 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
132 /* WQ control bits */
133 int idxd_wq_alloc_resources(struct idxd_wq *wq)
135 struct idxd_device *idxd = wq->idxd;
136 struct device *dev = &idxd->pdev->dev;
137 int rc, num_descs, i;
141 if (wq->type != IDXD_WQT_KERNEL)
144 num_descs = wq_dedicated(wq) ? wq->size : wq->threshold;
145 wq->num_descs = num_descs;
147 rc = alloc_hw_descs(wq, num_descs);
151 align = idxd->data->align;
152 wq->compls_size = num_descs * idxd->data->compl_size + align;
153 wq->compls_raw = dma_alloc_coherent(dev, wq->compls_size,
154 &wq->compls_addr_raw, GFP_KERNEL);
155 if (!wq->compls_raw) {
157 goto fail_alloc_compls;
160 /* Adjust alignment */
161 wq->compls_addr = (wq->compls_addr_raw + (align - 1)) & ~(align - 1);
162 tmp = (u64)wq->compls_raw;
163 tmp = (tmp + (align - 1)) & ~(align - 1);
164 wq->compls = (struct dsa_completion_record *)tmp;
166 rc = alloc_descs(wq, num_descs);
168 goto fail_alloc_descs;
170 rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
173 goto fail_sbitmap_init;
175 for (i = 0; i < num_descs; i++) {
176 struct idxd_desc *desc = wq->descs[i];
178 desc->hw = wq->hw_descs[i];
179 if (idxd->data->type == IDXD_TYPE_DSA)
180 desc->completion = &wq->compls[i];
181 else if (idxd->data->type == IDXD_TYPE_IAX)
182 desc->iax_completion = &wq->iax_compls[i];
183 desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i;
194 dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
195 wq->compls_addr_raw);
201 void idxd_wq_free_resources(struct idxd_wq *wq)
203 struct device *dev = &wq->idxd->pdev->dev;
205 if (wq->type != IDXD_WQT_KERNEL)
210 dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
211 wq->compls_addr_raw);
212 sbitmap_queue_free(&wq->sbq);
215 int idxd_wq_enable(struct idxd_wq *wq)
217 struct idxd_device *idxd = wq->idxd;
218 struct device *dev = &idxd->pdev->dev;
221 if (wq->state == IDXD_WQ_ENABLED) {
222 dev_dbg(dev, "WQ %d already enabled\n", wq->id);
226 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
228 if (status != IDXD_CMDSTS_SUCCESS &&
229 status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
230 dev_dbg(dev, "WQ enable failed: %#x\n", status);
234 wq->state = IDXD_WQ_ENABLED;
235 dev_dbg(dev, "WQ %d enabled\n", wq->id);
239 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
241 struct idxd_device *idxd = wq->idxd;
242 struct device *dev = &idxd->pdev->dev;
245 dev_dbg(dev, "Disabling WQ %d\n", wq->id);
247 if (wq->state != IDXD_WQ_ENABLED) {
248 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
252 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
253 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
255 if (status != IDXD_CMDSTS_SUCCESS) {
256 dev_dbg(dev, "WQ disable failed: %#x\n", status);
261 idxd_wq_disable_cleanup(wq);
262 wq->state = IDXD_WQ_DISABLED;
263 dev_dbg(dev, "WQ %d disabled\n", wq->id);
267 void idxd_wq_drain(struct idxd_wq *wq)
269 struct idxd_device *idxd = wq->idxd;
270 struct device *dev = &idxd->pdev->dev;
273 if (wq->state != IDXD_WQ_ENABLED) {
274 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
278 dev_dbg(dev, "Draining WQ %d\n", wq->id);
279 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
280 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
283 void idxd_wq_reset(struct idxd_wq *wq)
285 struct idxd_device *idxd = wq->idxd;
286 struct device *dev = &idxd->pdev->dev;
289 if (wq->state != IDXD_WQ_ENABLED) {
290 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
294 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
295 idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
296 idxd_wq_disable_cleanup(wq);
297 wq->state = IDXD_WQ_DISABLED;
300 int idxd_wq_map_portal(struct idxd_wq *wq)
302 struct idxd_device *idxd = wq->idxd;
303 struct pci_dev *pdev = idxd->pdev;
304 struct device *dev = &pdev->dev;
305 resource_size_t start;
307 start = pci_resource_start(pdev, IDXD_WQ_BAR);
308 start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
310 wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
317 void idxd_wq_unmap_portal(struct idxd_wq *wq)
319 struct device *dev = &wq->idxd->pdev->dev;
321 devm_iounmap(dev, wq->portal);
323 wq->portal_offset = 0;
326 void idxd_wqs_unmap_portal(struct idxd_device *idxd)
330 for (i = 0; i < idxd->max_wqs; i++) {
331 struct idxd_wq *wq = idxd->wqs[i];
334 idxd_wq_unmap_portal(wq);
338 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
340 struct idxd_device *idxd = wq->idxd;
345 rc = idxd_wq_disable(wq, false);
349 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
350 spin_lock(&idxd->dev_lock);
351 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
354 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
355 spin_unlock(&idxd->dev_lock);
357 rc = idxd_wq_enable(wq);
364 int idxd_wq_disable_pasid(struct idxd_wq *wq)
366 struct idxd_device *idxd = wq->idxd;
371 rc = idxd_wq_disable(wq, false);
375 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
376 spin_lock(&idxd->dev_lock);
377 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
380 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
381 spin_unlock(&idxd->dev_lock);
383 rc = idxd_wq_enable(wq);
390 static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
392 struct idxd_device *idxd = wq->idxd;
394 lockdep_assert_held(&wq->wq_lock);
395 memset(wq->wqcfg, 0, idxd->wqcfg_size);
396 wq->type = IDXD_WQT_NONE;
400 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
401 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
402 memset(wq->name, 0, WQ_NAME_SIZE);
405 static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
407 lockdep_assert_held(&wq->wq_lock);
413 static void idxd_wq_ref_release(struct percpu_ref *ref)
415 struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active);
417 complete(&wq->wq_dead);
420 int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
424 memset(&wq->wq_active, 0, sizeof(wq->wq_active));
425 rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 0, GFP_KERNEL);
428 reinit_completion(&wq->wq_dead);
432 void idxd_wq_quiesce(struct idxd_wq *wq)
434 percpu_ref_kill(&wq->wq_active);
435 wait_for_completion(&wq->wq_dead);
438 /* Device control bits */
439 static inline bool idxd_is_enabled(struct idxd_device *idxd)
441 union gensts_reg gensts;
443 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
445 if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
450 static inline bool idxd_device_is_halted(struct idxd_device *idxd)
452 union gensts_reg gensts;
454 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
456 return (gensts.state == IDXD_DEVICE_STATE_HALT);
460 * This is function is only used for reset during probe and will
461 * poll for completion. Once the device is setup with interrupts,
462 * all commands will be done via interrupt completion.
464 int idxd_device_init_reset(struct idxd_device *idxd)
466 struct device *dev = &idxd->pdev->dev;
467 union idxd_command_reg cmd;
469 if (idxd_device_is_halted(idxd)) {
470 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
474 memset(&cmd, 0, sizeof(cmd));
475 cmd.cmd = IDXD_CMD_RESET_DEVICE;
476 dev_dbg(dev, "%s: sending reset for init.\n", __func__);
477 spin_lock(&idxd->cmd_lock);
478 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
480 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
483 spin_unlock(&idxd->cmd_lock);
487 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
490 union idxd_command_reg cmd;
491 DECLARE_COMPLETION_ONSTACK(done);
494 if (idxd_device_is_halted(idxd)) {
495 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
497 *status = IDXD_CMDSTS_HW_ERR;
501 memset(&cmd, 0, sizeof(cmd));
503 cmd.operand = operand;
506 spin_lock(&idxd->cmd_lock);
507 wait_event_lock_irq(idxd->cmd_waitq,
508 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
511 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
512 __func__, cmd_code, operand);
514 idxd->cmd_status = 0;
515 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
516 idxd->cmd_done = &done;
517 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
520 * After command submitted, release lock and go to sleep until
521 * the command completes via interrupt.
523 spin_unlock(&idxd->cmd_lock);
524 wait_for_completion(&done);
525 stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
526 spin_lock(&idxd->cmd_lock);
529 idxd->cmd_status = stat & GENMASK(7, 0);
531 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
532 /* Wake up other pending commands */
533 wake_up(&idxd->cmd_waitq);
534 spin_unlock(&idxd->cmd_lock);
537 int idxd_device_enable(struct idxd_device *idxd)
539 struct device *dev = &idxd->pdev->dev;
542 if (idxd_is_enabled(idxd)) {
543 dev_dbg(dev, "Device already enabled\n");
547 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
549 /* If the command is successful or if the device was enabled */
550 if (status != IDXD_CMDSTS_SUCCESS &&
551 status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
552 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
556 idxd->state = IDXD_DEV_ENABLED;
560 int idxd_device_disable(struct idxd_device *idxd)
562 struct device *dev = &idxd->pdev->dev;
565 if (!idxd_is_enabled(idxd)) {
566 dev_dbg(dev, "Device is not enabled\n");
570 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
572 /* If the command is successful or if the device was disabled */
573 if (status != IDXD_CMDSTS_SUCCESS &&
574 !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
575 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
579 spin_lock(&idxd->dev_lock);
580 idxd_device_clear_state(idxd);
581 idxd->state = IDXD_DEV_DISABLED;
582 spin_unlock(&idxd->dev_lock);
586 void idxd_device_reset(struct idxd_device *idxd)
588 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
589 spin_lock(&idxd->dev_lock);
590 idxd_device_clear_state(idxd);
591 idxd->state = IDXD_DEV_DISABLED;
592 idxd_unmask_error_interrupts(idxd);
593 idxd_msix_perm_setup(idxd);
594 spin_unlock(&idxd->dev_lock);
597 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
599 struct device *dev = &idxd->pdev->dev;
603 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
604 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
605 dev_dbg(dev, "pasid %d drained\n", pasid);
608 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
609 enum idxd_interrupt_type irq_type)
611 struct device *dev = &idxd->pdev->dev;
614 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)))
617 dev_dbg(dev, "get int handle, idx %d\n", idx);
619 operand = idx & GENMASK(15, 0);
620 if (irq_type == IDXD_IRQ_IMS)
621 operand |= CMD_INT_HANDLE_IMS;
623 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand);
625 idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status);
627 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
628 dev_dbg(dev, "request int handle failed: %#x\n", status);
632 *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0);
634 dev_dbg(dev, "int handle acquired: %u\n", *handle);
638 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
639 enum idxd_interrupt_type irq_type)
641 struct device *dev = &idxd->pdev->dev;
643 union idxd_command_reg cmd;
645 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
648 dev_dbg(dev, "release int handle, handle %d\n", handle);
650 memset(&cmd, 0, sizeof(cmd));
651 operand = handle & GENMASK(15, 0);
653 if (irq_type == IDXD_IRQ_IMS)
654 operand |= CMD_INT_HANDLE_IMS;
656 cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE;
657 cmd.operand = operand;
659 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
661 spin_lock(&idxd->cmd_lock);
662 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
664 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
666 status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
667 spin_unlock(&idxd->cmd_lock);
669 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
670 dev_dbg(dev, "release int handle failed: %#x\n", status);
674 dev_dbg(dev, "int handle released.\n");
678 /* Device configuration bits */
679 static void idxd_engines_clear_state(struct idxd_device *idxd)
681 struct idxd_engine *engine;
684 lockdep_assert_held(&idxd->dev_lock);
685 for (i = 0; i < idxd->max_engines; i++) {
686 engine = idxd->engines[i];
687 engine->group = NULL;
691 static void idxd_groups_clear_state(struct idxd_device *idxd)
693 struct idxd_group *group;
696 lockdep_assert_held(&idxd->dev_lock);
697 for (i = 0; i < idxd->max_groups; i++) {
698 group = idxd->groups[i];
699 memset(&group->grpcfg, 0, sizeof(group->grpcfg));
700 group->num_engines = 0;
702 group->use_rdbuf_limit = false;
703 group->rdbufs_allowed = 0;
704 group->rdbufs_reserved = 0;
705 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
715 static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
719 lockdep_assert_held(&idxd->dev_lock);
720 for (i = 0; i < idxd->max_wqs; i++) {
721 struct idxd_wq *wq = idxd->wqs[i];
723 if (wq->state == IDXD_WQ_ENABLED) {
724 idxd_wq_disable_cleanup(wq);
725 wq->state = IDXD_WQ_DISABLED;
727 idxd_wq_device_reset_cleanup(wq);
731 void idxd_device_clear_state(struct idxd_device *idxd)
733 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
736 idxd_groups_clear_state(idxd);
737 idxd_engines_clear_state(idxd);
738 idxd_device_wqs_clear_state(idxd);
741 void idxd_msix_perm_setup(struct idxd_device *idxd)
743 union msix_perm mperm;
746 msixcnt = pci_msix_vec_count(idxd->pdev);
751 mperm.pasid = idxd->pasid;
752 mperm.pasid_en = device_pasid_enabled(idxd);
753 for (i = 1; i < msixcnt; i++)
754 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
757 void idxd_msix_perm_clear(struct idxd_device *idxd)
759 union msix_perm mperm;
762 msixcnt = pci_msix_vec_count(idxd->pdev);
767 for (i = 1; i < msixcnt; i++)
768 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
771 static void idxd_group_config_write(struct idxd_group *group)
773 struct idxd_device *idxd = group->idxd;
774 struct device *dev = &idxd->pdev->dev;
778 dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
781 for (i = 0; i < GRPWQCFG_STRIDES; i++) {
782 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
783 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
784 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
785 group->id, i, grpcfg_offset,
786 ioread64(idxd->reg_base + grpcfg_offset));
789 /* setup GRPENGCFG */
790 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
791 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
792 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
793 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
796 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
797 iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
798 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
799 group->id, grpcfg_offset,
800 ioread32(idxd->reg_base + grpcfg_offset));
803 static int idxd_groups_config_write(struct idxd_device *idxd)
806 union gencfg_reg reg;
808 struct device *dev = &idxd->pdev->dev;
810 /* Setup bandwidth rdbuf limit */
811 if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) {
812 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
813 reg.rdbuf_limit = idxd->rdbuf_limit;
814 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
817 dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
818 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
820 for (i = 0; i < idxd->max_groups; i++) {
821 struct idxd_group *group = idxd->groups[i];
823 idxd_group_config_write(group);
829 static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
831 struct pci_dev *pdev = idxd->pdev;
833 if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV))
838 static int idxd_wq_config_write(struct idxd_wq *wq)
840 struct idxd_device *idxd = wq->idxd;
841 struct device *dev = &idxd->pdev->dev;
849 * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
850 * wq reset. This will copy back the sticky values that are present on some devices.
852 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
853 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
854 wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
858 wq->wqcfg->wq_size = wq->size;
861 idxd->cmd_status = IDXD_SCMD_WQ_NO_SIZE;
862 dev_warn(dev, "Incorrect work queue size: 0\n");
867 wq->wqcfg->wq_thresh = wq->threshold;
870 if (wq_dedicated(wq))
873 if (device_pasid_enabled(idxd)) {
874 wq->wqcfg->pasid_en = 1;
875 if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq))
876 wq->wqcfg->pasid = idxd->pasid;
880 * Here the priv bit is set depending on the WQ type. priv = 1 if the
881 * WQ type is kernel to indicate privileged access. This setting only
882 * matters for dedicated WQ. According to the DSA spec:
883 * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
884 * Privileged Mode Enable field of the PCI Express PASID capability
885 * is 0, this field must be 0.
887 * In the case of a dedicated kernel WQ that is not able to support
888 * the PASID cap, then the configuration will be rejected.
890 wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
891 if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
892 !idxd_device_pasid_priv_enabled(idxd) &&
893 wq->type == IDXD_WQT_KERNEL) {
894 idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
898 wq->wqcfg->priority = wq->priority;
900 if (idxd->hw.gen_cap.block_on_fault &&
901 test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags))
904 if (idxd->hw.wq_cap.wq_ats_support)
905 wq->wqcfg->wq_ats_disable = wq->ats_dis;
908 wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
909 wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
911 dev_dbg(dev, "WQ %d CFGs\n", wq->id);
912 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
913 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
914 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
915 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
916 wq->id, i, wq_offset,
917 ioread32(idxd->reg_base + wq_offset));
923 static int idxd_wqs_config_write(struct idxd_device *idxd)
927 for (i = 0; i < idxd->max_wqs; i++) {
928 struct idxd_wq *wq = idxd->wqs[i];
930 rc = idxd_wq_config_write(wq);
938 static void idxd_group_flags_setup(struct idxd_device *idxd)
942 /* TC-A 0 and TC-B 1 should be defaults */
943 for (i = 0; i < idxd->max_groups; i++) {
944 struct idxd_group *group = idxd->groups[i];
946 if (group->tc_a == -1)
947 group->tc_a = group->grpcfg.flags.tc_a = 0;
949 group->grpcfg.flags.tc_a = group->tc_a;
950 if (group->tc_b == -1)
951 group->tc_b = group->grpcfg.flags.tc_b = 1;
953 group->grpcfg.flags.tc_b = group->tc_b;
954 group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
955 group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
956 if (group->rdbufs_allowed)
957 group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
959 group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs;
963 static int idxd_engines_setup(struct idxd_device *idxd)
966 struct idxd_engine *eng;
967 struct idxd_group *group;
969 for (i = 0; i < idxd->max_groups; i++) {
970 group = idxd->groups[i];
971 group->grpcfg.engines = 0;
974 for (i = 0; i < idxd->max_engines; i++) {
975 eng = idxd->engines[i];
981 group->grpcfg.engines |= BIT(eng->id);
991 static int idxd_wqs_setup(struct idxd_device *idxd)
994 struct idxd_group *group;
995 int i, j, configured = 0;
996 struct device *dev = &idxd->pdev->dev;
998 for (i = 0; i < idxd->max_groups; i++) {
999 group = idxd->groups[i];
1000 for (j = 0; j < 4; j++)
1001 group->grpcfg.wqs[j] = 0;
1004 for (i = 0; i < idxd->max_wqs; i++) {
1013 if (wq_shared(wq) && !device_swq_supported(idxd)) {
1014 idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
1015 dev_warn(dev, "No shared wq support but configured.\n");
1019 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
1023 if (configured == 0) {
1024 idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
1031 int idxd_device_config(struct idxd_device *idxd)
1035 lockdep_assert_held(&idxd->dev_lock);
1036 rc = idxd_wqs_setup(idxd);
1040 rc = idxd_engines_setup(idxd);
1044 idxd_group_flags_setup(idxd);
1046 rc = idxd_wqs_config_write(idxd);
1050 rc = idxd_groups_config_write(idxd);
1057 static int idxd_wq_load_config(struct idxd_wq *wq)
1059 struct idxd_device *idxd = wq->idxd;
1060 struct device *dev = &idxd->pdev->dev;
1064 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
1065 memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
1067 wq->size = wq->wqcfg->wq_size;
1068 wq->threshold = wq->wqcfg->wq_thresh;
1069 if (wq->wqcfg->priv)
1070 wq->type = IDXD_WQT_KERNEL;
1072 /* The driver does not support shared WQ mode in read-only config yet */
1073 if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
1076 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
1078 wq->priority = wq->wqcfg->priority;
1080 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
1081 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
1082 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
1088 static void idxd_group_load_config(struct idxd_group *group)
1090 struct idxd_device *idxd = group->idxd;
1091 struct device *dev = &idxd->pdev->dev;
1092 int i, j, grpcfg_offset;
1095 * Load WQS bit fields
1096 * Iterate through all 256 bits 64 bits at a time
1098 for (i = 0; i < GRPWQCFG_STRIDES; i++) {
1101 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
1102 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
1103 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
1104 group->id, i, grpcfg_offset, group->grpcfg.wqs[i]);
1106 if (i * 64 >= idxd->max_wqs)
1109 /* Iterate through all 64 bits and check for wq set */
1110 for (j = 0; j < 64; j++) {
1111 int id = i * 64 + j;
1113 /* No need to check beyond max wqs */
1114 if (id >= idxd->max_wqs)
1117 /* Set group assignment for wq if wq bit is set */
1118 if (group->grpcfg.wqs[i] & BIT(j)) {
1125 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
1126 group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
1127 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
1128 grpcfg_offset, group->grpcfg.engines);
1130 /* Iterate through all 64 bits to check engines set */
1131 for (i = 0; i < 64; i++) {
1132 if (i >= idxd->max_engines)
1135 if (group->grpcfg.engines & BIT(i)) {
1136 struct idxd_engine *engine = idxd->engines[i];
1138 engine->group = group;
1142 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
1143 group->grpcfg.flags.bits = ioread32(idxd->reg_base + grpcfg_offset);
1144 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
1145 group->id, grpcfg_offset, group->grpcfg.flags.bits);
1148 int idxd_device_load_config(struct idxd_device *idxd)
1150 union gencfg_reg reg;
1153 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
1154 idxd->rdbuf_limit = reg.rdbuf_limit;
1156 for (i = 0; i < idxd->max_groups; i++) {
1157 struct idxd_group *group = idxd->groups[i];
1159 idxd_group_load_config(group);
1162 for (i = 0; i < idxd->max_wqs; i++) {
1163 struct idxd_wq *wq = idxd->wqs[i];
1165 rc = idxd_wq_load_config(wq);
1173 int __drv_enable_wq(struct idxd_wq *wq)
1175 struct idxd_device *idxd = wq->idxd;
1176 struct device *dev = &idxd->pdev->dev;
1179 lockdep_assert_held(&wq->wq_lock);
1181 if (idxd->state != IDXD_DEV_ENABLED) {
1182 idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
1186 if (wq->state != IDXD_WQ_DISABLED) {
1187 dev_dbg(dev, "wq %d already enabled.\n", wq->id);
1188 idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
1194 dev_dbg(dev, "wq %d not attached to group.\n", wq->id);
1195 idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
1199 if (strlen(wq->name) == 0) {
1200 idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
1201 dev_dbg(dev, "wq %d name not set.\n", wq->id);
1205 /* Shared WQ checks */
1206 if (wq_shared(wq)) {
1207 if (!device_swq_supported(idxd)) {
1208 idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
1209 dev_dbg(dev, "PASID not enabled and shared wq.\n");
1213 * Shared wq with the threshold set to 0 means the user
1214 * did not set the threshold or transitioned from a
1215 * dedicated wq but did not set threshold. A value
1216 * of 0 would effectively disable the shared wq. The
1217 * driver does not allow a value of 0 to be set for
1218 * threshold via sysfs.
1220 if (wq->threshold == 0) {
1221 idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
1222 dev_dbg(dev, "Shared wq and threshold 0.\n");
1228 spin_lock(&idxd->dev_lock);
1229 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1230 rc = idxd_device_config(idxd);
1231 spin_unlock(&idxd->dev_lock);
1233 dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
1237 rc = idxd_wq_enable(wq);
1239 dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
1243 rc = idxd_wq_map_portal(wq);
1245 idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
1246 dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc);
1247 goto err_map_portal;
1250 wq->client_count = 0;
1254 rc = idxd_wq_disable(wq, false);
1256 dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
1261 int drv_enable_wq(struct idxd_wq *wq)
1265 mutex_lock(&wq->wq_lock);
1266 rc = __drv_enable_wq(wq);
1267 mutex_unlock(&wq->wq_lock);
1271 void __drv_disable_wq(struct idxd_wq *wq)
1273 struct idxd_device *idxd = wq->idxd;
1274 struct device *dev = &idxd->pdev->dev;
1276 lockdep_assert_held(&wq->wq_lock);
1278 if (idxd_wq_refcount(wq))
1279 dev_warn(dev, "Clients has claim on wq %d: %d\n",
1280 wq->id, idxd_wq_refcount(wq));
1282 idxd_wq_unmap_portal(wq);
1287 wq->client_count = 0;
1290 void drv_disable_wq(struct idxd_wq *wq)
1292 mutex_lock(&wq->wq_lock);
1293 __drv_disable_wq(wq);
1294 mutex_unlock(&wq->wq_lock);
1297 int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
1299 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1303 * Device should be in disabled state for the idxd_drv to load. If it's in
1304 * enabled state, then the device was altered outside of driver's control.
1305 * If the state is in halted state, then we don't want to proceed.
1307 if (idxd->state != IDXD_DEV_DISABLED) {
1308 idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
1312 /* Device configuration */
1313 spin_lock(&idxd->dev_lock);
1314 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1315 rc = idxd_device_config(idxd);
1316 spin_unlock(&idxd->dev_lock);
1321 rc = idxd_device_enable(idxd);
1325 /* Setup DMA device without channels */
1326 rc = idxd_register_dma_device(idxd);
1328 idxd_device_disable(idxd);
1329 idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
1333 idxd->cmd_status = 0;
1337 void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
1339 struct device *dev = &idxd_dev->conf_dev;
1340 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1343 for (i = 0; i < idxd->max_wqs; i++) {
1344 struct idxd_wq *wq = idxd->wqs[i];
1345 struct device *wq_dev = wq_confdev(wq);
1347 if (wq->state == IDXD_WQ_DISABLED)
1349 dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev));
1350 device_release_driver(wq_dev);
1353 idxd_unregister_dma_device(idxd);
1354 idxd_device_disable(idxd);
1355 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1356 idxd_device_reset(idxd);
1359 static enum idxd_dev_type dev_types[] = {
1365 struct idxd_device_driver idxd_drv = {
1367 .probe = idxd_device_drv_probe,
1368 .remove = idxd_device_drv_remove,
1371 EXPORT_SYMBOL_GPL(idxd_drv);