1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
10 #include <linux/msi.h>
11 #include <uapi/linux/idxd.h>
12 #include "../dmaengine.h"
14 #include "registers.h"
16 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
18 static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
19 static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
21 /* Interrupt control bits */
22 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
24 union genctrl_reg genctrl;
26 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
27 genctrl.softerr_int_en = 1;
28 genctrl.halt_int_en = 1;
29 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
32 void idxd_mask_error_interrupts(struct idxd_device *idxd)
34 union genctrl_reg genctrl;
36 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
37 genctrl.softerr_int_en = 0;
38 genctrl.halt_int_en = 0;
39 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
42 static void free_hw_descs(struct idxd_wq *wq)
46 for (i = 0; i < wq->num_descs; i++)
47 kfree(wq->hw_descs[i]);
52 static int alloc_hw_descs(struct idxd_wq *wq, int num)
54 struct device *dev = &wq->idxd->pdev->dev;
56 int node = dev_to_node(dev);
58 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
63 for (i = 0; i < num; i++) {
64 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
66 if (!wq->hw_descs[i]) {
75 static void free_descs(struct idxd_wq *wq)
79 for (i = 0; i < wq->num_descs; i++)
85 static int alloc_descs(struct idxd_wq *wq, int num)
87 struct device *dev = &wq->idxd->pdev->dev;
89 int node = dev_to_node(dev);
91 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
96 for (i = 0; i < num; i++) {
97 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
108 /* WQ control bits */
109 int idxd_wq_alloc_resources(struct idxd_wq *wq)
111 struct idxd_device *idxd = wq->idxd;
112 struct device *dev = &idxd->pdev->dev;
113 int rc, num_descs, i;
115 if (wq->type != IDXD_WQT_KERNEL)
118 num_descs = wq_dedicated(wq) ? wq->size : wq->threshold;
119 wq->num_descs = num_descs;
121 rc = alloc_hw_descs(wq, num_descs);
125 wq->compls_size = num_descs * idxd->data->compl_size;
126 wq->compls = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr, GFP_KERNEL);
129 goto fail_alloc_compls;
132 rc = alloc_descs(wq, num_descs);
134 goto fail_alloc_descs;
136 rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
139 goto fail_sbitmap_init;
141 for (i = 0; i < num_descs; i++) {
142 struct idxd_desc *desc = wq->descs[i];
144 desc->hw = wq->hw_descs[i];
145 if (idxd->data->type == IDXD_TYPE_DSA)
146 desc->completion = &wq->compls[i];
147 else if (idxd->data->type == IDXD_TYPE_IAX)
148 desc->iax_completion = &wq->iax_compls[i];
149 desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i;
160 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
166 void idxd_wq_free_resources(struct idxd_wq *wq)
168 struct device *dev = &wq->idxd->pdev->dev;
170 if (wq->type != IDXD_WQT_KERNEL)
175 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
176 sbitmap_queue_free(&wq->sbq);
179 int idxd_wq_enable(struct idxd_wq *wq)
181 struct idxd_device *idxd = wq->idxd;
182 struct device *dev = &idxd->pdev->dev;
185 if (wq->state == IDXD_WQ_ENABLED) {
186 dev_dbg(dev, "WQ %d already enabled\n", wq->id);
190 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
192 if (status != IDXD_CMDSTS_SUCCESS &&
193 status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
194 dev_dbg(dev, "WQ enable failed: %#x\n", status);
198 wq->state = IDXD_WQ_ENABLED;
199 set_bit(wq->id, idxd->wq_enable_map);
200 dev_dbg(dev, "WQ %d enabled\n", wq->id);
204 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
206 struct idxd_device *idxd = wq->idxd;
207 struct device *dev = &idxd->pdev->dev;
210 dev_dbg(dev, "Disabling WQ %d\n", wq->id);
212 if (wq->state != IDXD_WQ_ENABLED) {
213 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
217 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
218 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
220 if (status != IDXD_CMDSTS_SUCCESS) {
221 dev_dbg(dev, "WQ disable failed: %#x\n", status);
226 idxd_wq_disable_cleanup(wq);
227 clear_bit(wq->id, idxd->wq_enable_map);
228 wq->state = IDXD_WQ_DISABLED;
229 dev_dbg(dev, "WQ %d disabled\n", wq->id);
233 void idxd_wq_drain(struct idxd_wq *wq)
235 struct idxd_device *idxd = wq->idxd;
236 struct device *dev = &idxd->pdev->dev;
239 if (wq->state != IDXD_WQ_ENABLED) {
240 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
244 dev_dbg(dev, "Draining WQ %d\n", wq->id);
245 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
246 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
249 void idxd_wq_reset(struct idxd_wq *wq)
251 struct idxd_device *idxd = wq->idxd;
252 struct device *dev = &idxd->pdev->dev;
255 if (wq->state != IDXD_WQ_ENABLED) {
256 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
260 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
261 idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
262 idxd_wq_disable_cleanup(wq);
265 int idxd_wq_map_portal(struct idxd_wq *wq)
267 struct idxd_device *idxd = wq->idxd;
268 struct pci_dev *pdev = idxd->pdev;
269 struct device *dev = &pdev->dev;
270 resource_size_t start;
272 start = pci_resource_start(pdev, IDXD_WQ_BAR);
273 start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
275 wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
282 void idxd_wq_unmap_portal(struct idxd_wq *wq)
284 struct device *dev = &wq->idxd->pdev->dev;
286 devm_iounmap(dev, wq->portal);
288 wq->portal_offset = 0;
291 void idxd_wqs_unmap_portal(struct idxd_device *idxd)
295 for (i = 0; i < idxd->max_wqs; i++) {
296 struct idxd_wq *wq = idxd->wqs[i];
299 idxd_wq_unmap_portal(wq);
303 static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
305 struct idxd_device *idxd = wq->idxd;
309 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
310 spin_lock(&idxd->dev_lock);
311 wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
313 wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
314 iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
315 spin_unlock(&idxd->dev_lock);
318 static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
320 struct idxd_device *idxd = wq->idxd;
324 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
325 spin_lock(&idxd->dev_lock);
326 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
329 wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX];
330 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
331 spin_unlock(&idxd->dev_lock);
334 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
338 rc = idxd_wq_disable(wq, false);
342 __idxd_wq_set_pasid_locked(wq, pasid);
344 rc = idxd_wq_enable(wq);
351 int idxd_wq_disable_pasid(struct idxd_wq *wq)
353 struct idxd_device *idxd = wq->idxd;
358 rc = idxd_wq_disable(wq, false);
362 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
363 spin_lock(&idxd->dev_lock);
364 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
367 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
368 spin_unlock(&idxd->dev_lock);
370 rc = idxd_wq_enable(wq);
377 static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
379 struct idxd_device *idxd = wq->idxd;
381 lockdep_assert_held(&wq->wq_lock);
382 wq->state = IDXD_WQ_DISABLED;
383 memset(wq->wqcfg, 0, idxd->wqcfg_size);
384 wq->type = IDXD_WQT_NONE;
387 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
388 clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
389 clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
390 clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
391 memset(wq->name, 0, WQ_NAME_SIZE);
392 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
393 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
395 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
398 static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
400 lockdep_assert_held(&wq->wq_lock);
406 static void idxd_wq_ref_release(struct percpu_ref *ref)
408 struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active);
410 complete(&wq->wq_dead);
413 int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
417 memset(&wq->wq_active, 0, sizeof(wq->wq_active));
418 rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release,
419 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
422 reinit_completion(&wq->wq_dead);
423 reinit_completion(&wq->wq_resurrect);
427 void __idxd_wq_quiesce(struct idxd_wq *wq)
429 lockdep_assert_held(&wq->wq_lock);
430 reinit_completion(&wq->wq_resurrect);
431 percpu_ref_kill(&wq->wq_active);
432 complete_all(&wq->wq_resurrect);
433 wait_for_completion(&wq->wq_dead);
436 void idxd_wq_quiesce(struct idxd_wq *wq)
438 mutex_lock(&wq->wq_lock);
439 __idxd_wq_quiesce(wq);
440 mutex_unlock(&wq->wq_lock);
443 /* Device control bits */
444 static inline bool idxd_is_enabled(struct idxd_device *idxd)
446 union gensts_reg gensts;
448 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
450 if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
455 static inline bool idxd_device_is_halted(struct idxd_device *idxd)
457 union gensts_reg gensts;
459 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
461 return (gensts.state == IDXD_DEVICE_STATE_HALT);
465 * This is function is only used for reset during probe and will
466 * poll for completion. Once the device is setup with interrupts,
467 * all commands will be done via interrupt completion.
469 int idxd_device_init_reset(struct idxd_device *idxd)
471 struct device *dev = &idxd->pdev->dev;
472 union idxd_command_reg cmd;
474 if (idxd_device_is_halted(idxd)) {
475 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
479 memset(&cmd, 0, sizeof(cmd));
480 cmd.cmd = IDXD_CMD_RESET_DEVICE;
481 dev_dbg(dev, "%s: sending reset for init.\n", __func__);
482 spin_lock(&idxd->cmd_lock);
483 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
485 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
488 spin_unlock(&idxd->cmd_lock);
492 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
495 union idxd_command_reg cmd;
496 DECLARE_COMPLETION_ONSTACK(done);
499 if (idxd_device_is_halted(idxd)) {
500 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
502 *status = IDXD_CMDSTS_HW_ERR;
506 memset(&cmd, 0, sizeof(cmd));
508 cmd.operand = operand;
511 spin_lock(&idxd->cmd_lock);
512 wait_event_lock_irq(idxd->cmd_waitq,
513 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
516 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
517 __func__, cmd_code, operand);
519 idxd->cmd_status = 0;
520 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
521 idxd->cmd_done = &done;
522 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
525 * After command submitted, release lock and go to sleep until
526 * the command completes via interrupt.
528 spin_unlock(&idxd->cmd_lock);
529 wait_for_completion(&done);
530 stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
531 spin_lock(&idxd->cmd_lock);
534 idxd->cmd_status = stat & GENMASK(7, 0);
536 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
537 /* Wake up other pending commands */
538 wake_up(&idxd->cmd_waitq);
539 spin_unlock(&idxd->cmd_lock);
542 int idxd_device_enable(struct idxd_device *idxd)
544 struct device *dev = &idxd->pdev->dev;
547 if (idxd_is_enabled(idxd)) {
548 dev_dbg(dev, "Device already enabled\n");
552 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
554 /* If the command is successful or if the device was enabled */
555 if (status != IDXD_CMDSTS_SUCCESS &&
556 status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
557 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
561 idxd->state = IDXD_DEV_ENABLED;
565 int idxd_device_disable(struct idxd_device *idxd)
567 struct device *dev = &idxd->pdev->dev;
570 if (!idxd_is_enabled(idxd)) {
571 dev_dbg(dev, "Device is not enabled\n");
575 idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
577 /* If the command is successful or if the device was disabled */
578 if (status != IDXD_CMDSTS_SUCCESS &&
579 !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
580 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
584 idxd_device_clear_state(idxd);
588 void idxd_device_reset(struct idxd_device *idxd)
590 idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
591 idxd_device_clear_state(idxd);
592 spin_lock(&idxd->dev_lock);
593 idxd_unmask_error_interrupts(idxd);
594 spin_unlock(&idxd->dev_lock);
597 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
599 struct device *dev = &idxd->pdev->dev;
603 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
604 idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
605 dev_dbg(dev, "pasid %d drained\n", pasid);
608 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
609 enum idxd_interrupt_type irq_type)
611 struct device *dev = &idxd->pdev->dev;
614 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)))
617 dev_dbg(dev, "get int handle, idx %d\n", idx);
619 operand = idx & GENMASK(15, 0);
620 if (irq_type == IDXD_IRQ_IMS)
621 operand |= CMD_INT_HANDLE_IMS;
623 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand);
625 idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status);
627 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
628 dev_dbg(dev, "request int handle failed: %#x\n", status);
632 *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0);
634 dev_dbg(dev, "int handle acquired: %u\n", *handle);
638 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
639 enum idxd_interrupt_type irq_type)
641 struct device *dev = &idxd->pdev->dev;
643 union idxd_command_reg cmd;
645 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
648 dev_dbg(dev, "release int handle, handle %d\n", handle);
650 memset(&cmd, 0, sizeof(cmd));
651 operand = handle & GENMASK(15, 0);
653 if (irq_type == IDXD_IRQ_IMS)
654 operand |= CMD_INT_HANDLE_IMS;
656 cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE;
657 cmd.operand = operand;
659 dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
661 spin_lock(&idxd->cmd_lock);
662 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
664 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
666 status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
667 spin_unlock(&idxd->cmd_lock);
669 if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
670 dev_dbg(dev, "release int handle failed: %#x\n", status);
674 dev_dbg(dev, "int handle released.\n");
678 /* Device configuration bits */
679 static void idxd_engines_clear_state(struct idxd_device *idxd)
681 struct idxd_engine *engine;
684 lockdep_assert_held(&idxd->dev_lock);
685 for (i = 0; i < idxd->max_engines; i++) {
686 engine = idxd->engines[i];
687 engine->group = NULL;
691 static void idxd_groups_clear_state(struct idxd_device *idxd)
693 struct idxd_group *group;
696 lockdep_assert_held(&idxd->dev_lock);
697 for (i = 0; i < idxd->max_groups; i++) {
698 group = idxd->groups[i];
699 memset(&group->grpcfg, 0, sizeof(group->grpcfg));
700 group->num_engines = 0;
702 group->use_rdbuf_limit = false;
703 group->rdbufs_allowed = 0;
704 group->rdbufs_reserved = 0;
705 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
712 group->desc_progress_limit = 0;
713 group->batch_progress_limit = 0;
717 static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
721 for (i = 0; i < idxd->max_wqs; i++) {
722 struct idxd_wq *wq = idxd->wqs[i];
724 mutex_lock(&wq->wq_lock);
725 idxd_wq_disable_cleanup(wq);
726 idxd_wq_device_reset_cleanup(wq);
727 mutex_unlock(&wq->wq_lock);
731 void idxd_device_clear_state(struct idxd_device *idxd)
733 /* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */
734 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
736 * Clearing wq state is protected by wq lock.
737 * So no need to be protected by device lock.
739 idxd_device_wqs_clear_state(idxd);
741 spin_lock(&idxd->dev_lock);
742 idxd_groups_clear_state(idxd);
743 idxd_engines_clear_state(idxd);
745 spin_lock(&idxd->dev_lock);
748 idxd->state = IDXD_DEV_DISABLED;
749 spin_unlock(&idxd->dev_lock);
752 static void idxd_group_config_write(struct idxd_group *group)
754 struct idxd_device *idxd = group->idxd;
755 struct device *dev = &idxd->pdev->dev;
759 dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
762 for (i = 0; i < GRPWQCFG_STRIDES; i++) {
763 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
764 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
765 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
766 group->id, i, grpcfg_offset,
767 ioread64(idxd->reg_base + grpcfg_offset));
770 /* setup GRPENGCFG */
771 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
772 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
773 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
774 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
777 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
778 iowrite64(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
779 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
780 group->id, grpcfg_offset,
781 ioread64(idxd->reg_base + grpcfg_offset));
784 static int idxd_groups_config_write(struct idxd_device *idxd)
787 union gencfg_reg reg;
789 struct device *dev = &idxd->pdev->dev;
791 /* Setup bandwidth rdbuf limit */
792 if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) {
793 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
794 reg.rdbuf_limit = idxd->rdbuf_limit;
795 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
798 dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
799 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
801 for (i = 0; i < idxd->max_groups; i++) {
802 struct idxd_group *group = idxd->groups[i];
804 idxd_group_config_write(group);
810 static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
812 struct pci_dev *pdev = idxd->pdev;
814 if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV))
819 static int idxd_wq_config_write(struct idxd_wq *wq)
821 struct idxd_device *idxd = wq->idxd;
822 struct device *dev = &idxd->pdev->dev;
830 * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
831 * wq reset. This will copy back the sticky values that are present on some devices.
833 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
834 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
835 wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset);
838 if (wq->size == 0 && wq->type != IDXD_WQT_NONE)
839 wq->size = WQ_DEFAULT_QUEUE_DEPTH;
842 wq->wqcfg->wq_size = wq->size;
845 wq->wqcfg->wq_thresh = wq->threshold;
848 if (wq_dedicated(wq))
852 * The WQ priv bit is set depending on the WQ type. priv = 1 if the
853 * WQ type is kernel to indicate privileged access. This setting only
854 * matters for dedicated WQ. According to the DSA spec:
855 * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
856 * Privileged Mode Enable field of the PCI Express PASID capability
857 * is 0, this field must be 0.
859 * In the case of a dedicated kernel WQ that is not able to support
860 * the PASID cap, then the configuration will be rejected.
862 if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
863 !idxd_device_pasid_priv_enabled(idxd) &&
864 wq->type == IDXD_WQT_KERNEL) {
865 idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
869 wq->wqcfg->priority = wq->priority;
871 if (idxd->hw.gen_cap.block_on_fault &&
872 test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags))
875 if (idxd->hw.wq_cap.wq_ats_support)
876 wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
879 wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
880 idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
883 if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
884 memset(wq->wqcfg->op_config, 0, IDXD_MAX_OPCAP_BITS / 8);
885 for_each_set_bit(n, wq->opcap_bmap, IDXD_MAX_OPCAP_BITS) {
886 int pos = n % BITS_PER_LONG_LONG;
887 int idx = n / BITS_PER_LONG_LONG;
889 wq->wqcfg->op_config[idx] |= BIT(pos);
893 dev_dbg(dev, "WQ %d CFGs\n", wq->id);
894 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
895 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
896 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
897 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
898 wq->id, i, wq_offset,
899 ioread32(idxd->reg_base + wq_offset));
905 static int idxd_wqs_config_write(struct idxd_device *idxd)
909 for (i = 0; i < idxd->max_wqs; i++) {
910 struct idxd_wq *wq = idxd->wqs[i];
912 rc = idxd_wq_config_write(wq);
920 static void idxd_group_flags_setup(struct idxd_device *idxd)
924 /* TC-A 0 and TC-B 1 should be defaults */
925 for (i = 0; i < idxd->max_groups; i++) {
926 struct idxd_group *group = idxd->groups[i];
928 if (group->tc_a == -1)
929 group->tc_a = group->grpcfg.flags.tc_a = 0;
931 group->grpcfg.flags.tc_a = group->tc_a;
932 if (group->tc_b == -1)
933 group->tc_b = group->grpcfg.flags.tc_b = 1;
935 group->grpcfg.flags.tc_b = group->tc_b;
936 group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
937 group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
938 if (group->rdbufs_allowed)
939 group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
941 group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs;
943 group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit;
944 group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit;
948 static int idxd_engines_setup(struct idxd_device *idxd)
951 struct idxd_engine *eng;
952 struct idxd_group *group;
954 for (i = 0; i < idxd->max_groups; i++) {
955 group = idxd->groups[i];
956 group->grpcfg.engines = 0;
959 for (i = 0; i < idxd->max_engines; i++) {
960 eng = idxd->engines[i];
966 group->grpcfg.engines |= BIT(eng->id);
976 static int idxd_wqs_setup(struct idxd_device *idxd)
979 struct idxd_group *group;
980 int i, j, configured = 0;
981 struct device *dev = &idxd->pdev->dev;
983 for (i = 0; i < idxd->max_groups; i++) {
984 group = idxd->groups[i];
985 for (j = 0; j < 4; j++)
986 group->grpcfg.wqs[j] = 0;
989 for (i = 0; i < idxd->max_wqs; i++) {
996 if (wq_shared(wq) && !wq_shared_supported(wq)) {
997 idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
998 dev_warn(dev, "No shared wq support but configured.\n");
1002 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
1006 if (configured == 0) {
1007 idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
1014 int idxd_device_config(struct idxd_device *idxd)
1018 lockdep_assert_held(&idxd->dev_lock);
1019 rc = idxd_wqs_setup(idxd);
1023 rc = idxd_engines_setup(idxd);
1027 idxd_group_flags_setup(idxd);
1029 rc = idxd_wqs_config_write(idxd);
1033 rc = idxd_groups_config_write(idxd);
1040 static int idxd_wq_load_config(struct idxd_wq *wq)
1042 struct idxd_device *idxd = wq->idxd;
1043 struct device *dev = &idxd->pdev->dev;
1047 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
1048 memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
1050 wq->size = wq->wqcfg->wq_size;
1051 wq->threshold = wq->wqcfg->wq_thresh;
1053 /* The driver does not support shared WQ mode in read-only config yet */
1054 if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
1057 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
1059 wq->priority = wq->wqcfg->priority;
1061 wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
1062 idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift);
1064 for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
1065 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
1066 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
1072 static void idxd_group_load_config(struct idxd_group *group)
1074 struct idxd_device *idxd = group->idxd;
1075 struct device *dev = &idxd->pdev->dev;
1076 int i, j, grpcfg_offset;
1079 * Load WQS bit fields
1080 * Iterate through all 256 bits 64 bits at a time
1082 for (i = 0; i < GRPWQCFG_STRIDES; i++) {
1085 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
1086 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
1087 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
1088 group->id, i, grpcfg_offset, group->grpcfg.wqs[i]);
1090 if (i * 64 >= idxd->max_wqs)
1093 /* Iterate through all 64 bits and check for wq set */
1094 for (j = 0; j < 64; j++) {
1095 int id = i * 64 + j;
1097 /* No need to check beyond max wqs */
1098 if (id >= idxd->max_wqs)
1101 /* Set group assignment for wq if wq bit is set */
1102 if (group->grpcfg.wqs[i] & BIT(j)) {
1109 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
1110 group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
1111 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
1112 grpcfg_offset, group->grpcfg.engines);
1114 /* Iterate through all 64 bits to check engines set */
1115 for (i = 0; i < 64; i++) {
1116 if (i >= idxd->max_engines)
1119 if (group->grpcfg.engines & BIT(i)) {
1120 struct idxd_engine *engine = idxd->engines[i];
1122 engine->group = group;
1126 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
1127 group->grpcfg.flags.bits = ioread64(idxd->reg_base + grpcfg_offset);
1128 dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
1129 group->id, grpcfg_offset, group->grpcfg.flags.bits);
1132 int idxd_device_load_config(struct idxd_device *idxd)
1134 union gencfg_reg reg;
1137 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
1138 idxd->rdbuf_limit = reg.rdbuf_limit;
1140 for (i = 0; i < idxd->max_groups; i++) {
1141 struct idxd_group *group = idxd->groups[i];
1143 idxd_group_load_config(group);
1146 for (i = 0; i < idxd->max_wqs; i++) {
1147 struct idxd_wq *wq = idxd->wqs[i];
1149 rc = idxd_wq_load_config(wq);
1157 static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
1159 struct idxd_desc *desc, *itr;
1160 struct llist_node *head;
1162 enum idxd_complete_type ctype;
1164 spin_lock(&ie->list_lock);
1165 head = llist_del_all(&ie->pending_llist);
1167 llist_for_each_entry_safe(desc, itr, head, llnode)
1168 list_add_tail(&desc->list, &ie->work_list);
1171 list_for_each_entry_safe(desc, itr, &ie->work_list, list)
1172 list_move_tail(&desc->list, &flist);
1173 spin_unlock(&ie->list_lock);
1175 list_for_each_entry_safe(desc, itr, &flist, list) {
1176 list_del(&desc->list);
1177 ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
1178 idxd_dma_complete_txd(desc, ctype, true);
1182 static void idxd_device_set_perm_entry(struct idxd_device *idxd,
1183 struct idxd_irq_entry *ie)
1185 union msix_perm mperm;
1187 if (ie->pasid == INVALID_IOASID)
1191 mperm.pasid = ie->pasid;
1193 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1196 static void idxd_device_clear_perm_entry(struct idxd_device *idxd,
1197 struct idxd_irq_entry *ie)
1199 iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1202 void idxd_wq_free_irq(struct idxd_wq *wq)
1204 struct idxd_device *idxd = wq->idxd;
1205 struct idxd_irq_entry *ie = &wq->ie;
1207 if (wq->type != IDXD_WQT_KERNEL)
1210 free_irq(ie->vector, ie);
1211 idxd_flush_pending_descs(ie);
1212 if (idxd->request_int_handles)
1213 idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
1214 idxd_device_clear_perm_entry(idxd, ie);
1216 ie->int_handle = INVALID_INT_HANDLE;
1217 ie->pasid = INVALID_IOASID;
1220 int idxd_wq_request_irq(struct idxd_wq *wq)
1222 struct idxd_device *idxd = wq->idxd;
1223 struct pci_dev *pdev = idxd->pdev;
1224 struct device *dev = &pdev->dev;
1225 struct idxd_irq_entry *ie;
1228 if (wq->type != IDXD_WQT_KERNEL)
1232 ie->vector = pci_irq_vector(pdev, ie->id);
1233 ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID;
1234 idxd_device_set_perm_entry(idxd, ie);
1236 rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
1238 dev_err(dev, "Failed to request irq %d.\n", ie->vector);
1242 if (idxd->request_int_handles) {
1243 rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle,
1246 goto err_int_handle;
1248 ie->int_handle = ie->id;
1254 ie->int_handle = INVALID_INT_HANDLE;
1255 free_irq(ie->vector, ie);
1257 idxd_device_clear_perm_entry(idxd, ie);
1258 ie->pasid = INVALID_IOASID;
1262 int drv_enable_wq(struct idxd_wq *wq)
1264 struct idxd_device *idxd = wq->idxd;
1265 struct device *dev = &idxd->pdev->dev;
1268 lockdep_assert_held(&wq->wq_lock);
1270 if (idxd->state != IDXD_DEV_ENABLED) {
1271 idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
1275 if (wq->state != IDXD_WQ_DISABLED) {
1276 dev_dbg(dev, "wq %d already enabled.\n", wq->id);
1277 idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
1283 dev_dbg(dev, "wq %d not attached to group.\n", wq->id);
1284 idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
1288 if (strlen(wq->name) == 0) {
1289 idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
1290 dev_dbg(dev, "wq %d name not set.\n", wq->id);
1294 /* Shared WQ checks */
1295 if (wq_shared(wq)) {
1296 if (!wq_shared_supported(wq)) {
1297 idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
1298 dev_dbg(dev, "PASID not enabled and shared wq.\n");
1302 * Shared wq with the threshold set to 0 means the user
1303 * did not set the threshold or transitioned from a
1304 * dedicated wq but did not set threshold. A value
1305 * of 0 would effectively disable the shared wq. The
1306 * driver does not allow a value of 0 to be set for
1307 * threshold via sysfs.
1309 if (wq->threshold == 0) {
1310 idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
1311 dev_dbg(dev, "Shared wq and threshold 0.\n");
1317 * In the event that the WQ is configurable for pasid and priv bits.
1318 * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
1319 * However, for non-kernel wq, the driver should only set the pasid_en bit for
1320 * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
1321 * pasid_en later on so there is no need to setup.
1323 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
1326 if (wq_pasid_enabled(wq)) {
1327 if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
1328 u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
1330 __idxd_wq_set_pasid_locked(wq, pasid);
1334 if (is_idxd_wq_kernel(wq))
1336 __idxd_wq_set_priv_locked(wq, priv);
1340 spin_lock(&idxd->dev_lock);
1341 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1342 rc = idxd_device_config(idxd);
1343 spin_unlock(&idxd->dev_lock);
1345 dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
1349 rc = idxd_wq_enable(wq);
1351 dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
1355 rc = idxd_wq_map_portal(wq);
1357 idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
1358 dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc);
1359 goto err_map_portal;
1362 wq->client_count = 0;
1364 rc = idxd_wq_request_irq(wq);
1366 idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
1367 dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
1371 rc = idxd_wq_alloc_resources(wq);
1373 idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
1374 dev_dbg(dev, "WQ resource alloc failed\n");
1378 rc = idxd_wq_init_percpu_ref(wq);
1380 idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
1381 dev_dbg(dev, "percpu_ref setup failed\n");
1388 idxd_wq_free_resources(wq);
1390 idxd_wq_free_irq(wq);
1392 idxd_wq_unmap_portal(wq);
1394 if (idxd_wq_disable(wq, false))
1395 dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
1400 void drv_disable_wq(struct idxd_wq *wq)
1402 struct idxd_device *idxd = wq->idxd;
1403 struct device *dev = &idxd->pdev->dev;
1405 lockdep_assert_held(&wq->wq_lock);
1407 if (idxd_wq_refcount(wq))
1408 dev_warn(dev, "Clients has claim on wq %d: %d\n",
1409 wq->id, idxd_wq_refcount(wq));
1411 idxd_wq_unmap_portal(wq);
1413 idxd_wq_free_irq(wq);
1415 idxd_wq_free_resources(wq);
1416 percpu_ref_exit(&wq->wq_active);
1417 wq->type = IDXD_WQT_NONE;
1418 wq->client_count = 0;
1421 int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
1423 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1427 * Device should be in disabled state for the idxd_drv to load. If it's in
1428 * enabled state, then the device was altered outside of driver's control.
1429 * If the state is in halted state, then we don't want to proceed.
1431 if (idxd->state != IDXD_DEV_DISABLED) {
1432 idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
1436 /* Device configuration */
1437 spin_lock(&idxd->dev_lock);
1438 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1439 rc = idxd_device_config(idxd);
1440 spin_unlock(&idxd->dev_lock);
1445 rc = idxd_device_enable(idxd);
1449 /* Setup DMA device without channels */
1450 rc = idxd_register_dma_device(idxd);
1452 idxd_device_disable(idxd);
1453 idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
1457 idxd->cmd_status = 0;
1461 void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
1463 struct device *dev = &idxd_dev->conf_dev;
1464 struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1467 for (i = 0; i < idxd->max_wqs; i++) {
1468 struct idxd_wq *wq = idxd->wqs[i];
1469 struct device *wq_dev = wq_confdev(wq);
1471 if (wq->state == IDXD_WQ_DISABLED)
1473 dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev));
1474 device_release_driver(wq_dev);
1477 idxd_unregister_dma_device(idxd);
1478 idxd_device_disable(idxd);
1479 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1480 idxd_device_reset(idxd);
1483 static enum idxd_dev_type dev_types[] = {
1489 struct idxd_device_driver idxd_drv = {
1491 .probe = idxd_device_drv_probe,
1492 .remove = idxd_device_drv_remove,
1495 EXPORT_SYMBOL_GPL(idxd_drv);