1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/workqueue.h>
12 #include <linux/aer.h>
14 #include <linux/io-64-nonatomic-lo-hi.h>
15 #include <linux/device.h>
16 #include <linux/idr.h>
17 #include <linux/intel-svm.h>
18 #include <linux/iommu.h>
19 #include <uapi/linux/idxd.h>
20 #include <linux/dmaengine.h>
21 #include "../dmaengine.h"
22 #include "registers.h"
25 MODULE_VERSION(IDXD_DRIVER_VERSION);
26 MODULE_LICENSE("GPL v2");
27 MODULE_AUTHOR("Intel Corporation");
29 static bool sva = true;
30 module_param(sva, bool, 0644);
31 MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
33 #define DRV_NAME "idxd"
37 static struct idr idxd_idrs[IDXD_TYPE_MAX];
38 static DEFINE_MUTEX(idxd_idr_lock);
40 static struct pci_device_id idxd_pci_tbl[] = {
41 /* DSA ver 1.0 platforms */
42 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
44 /* IAX ver 1.0 platforms */
45 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IAX_SPR0) },
48 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
50 static char *idxd_name[] = {
55 const char *idxd_get_dev_name(struct idxd_device *idxd)
57 return idxd_name[idxd->type];
60 static int idxd_setup_interrupts(struct idxd_device *idxd)
62 struct pci_dev *pdev = idxd->pdev;
63 struct device *dev = &pdev->dev;
64 struct msix_entry *msix;
65 struct idxd_irq_entry *irq_entry;
69 msixcnt = pci_msix_vec_count(pdev);
71 dev_err(dev, "Not MSI-X interrupt capable.\n");
75 idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
77 if (!idxd->msix_entries) {
82 for (i = 0; i < msixcnt; i++)
83 idxd->msix_entries[i].entry = i;
85 rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
87 dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
90 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
93 * We implement 1 completion list per MSI-X entry except for
94 * entry 0, which is for errors and others.
96 idxd->irq_entries = devm_kcalloc(dev, msixcnt,
97 sizeof(struct idxd_irq_entry),
99 if (!idxd->irq_entries) {
104 for (i = 0; i < msixcnt; i++) {
105 idxd->irq_entries[i].id = i;
106 idxd->irq_entries[i].idxd = idxd;
107 spin_lock_init(&idxd->irq_entries[i].list_lock);
110 msix = &idxd->msix_entries[0];
111 irq_entry = &idxd->irq_entries[0];
112 rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
113 idxd_misc_thread, 0, "idxd-misc",
116 dev_err(dev, "Failed to allocate misc interrupt.\n");
120 dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
123 /* first MSI-X entry is not for wq interrupts */
124 idxd->num_wq_irqs = msixcnt - 1;
126 for (i = 1; i < msixcnt; i++) {
127 msix = &idxd->msix_entries[i];
128 irq_entry = &idxd->irq_entries[i];
130 init_llist_head(&idxd->irq_entries[i].pending_llist);
131 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
132 rc = devm_request_threaded_irq(dev, msix->vector,
135 "idxd-portal", irq_entry);
137 dev_err(dev, "Failed to allocate irq %d.\n",
141 dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
145 idxd_unmask_error_interrupts(idxd);
146 idxd_msix_perm_setup(idxd);
150 /* Disable error interrupt generation */
151 idxd_mask_error_interrupts(idxd);
152 pci_disable_msix(pdev);
153 dev_err(dev, "No usable interrupts\n");
157 static int idxd_setup_internals(struct idxd_device *idxd)
159 struct device *dev = &idxd->pdev->dev;
162 init_waitqueue_head(&idxd->cmd_waitq);
163 idxd->groups = devm_kcalloc(dev, idxd->max_groups,
164 sizeof(struct idxd_group), GFP_KERNEL);
168 for (i = 0; i < idxd->max_groups; i++) {
169 idxd->groups[i].idxd = idxd;
170 idxd->groups[i].id = i;
171 idxd->groups[i].tc_a = -1;
172 idxd->groups[i].tc_b = -1;
175 idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
180 idxd->engines = devm_kcalloc(dev, idxd->max_engines,
181 sizeof(struct idxd_engine), GFP_KERNEL);
185 for (i = 0; i < idxd->max_wqs; i++) {
186 struct idxd_wq *wq = &idxd->wqs[i];
190 mutex_init(&wq->wq_lock);
191 wq->idxd_cdev.minor = -1;
192 wq->max_xfer_bytes = idxd->max_xfer_bytes;
193 wq->max_batch_size = idxd->max_batch_size;
194 wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
199 for (i = 0; i < idxd->max_engines; i++) {
200 idxd->engines[i].idxd = idxd;
201 idxd->engines[i].id = i;
204 idxd->wq = create_workqueue(dev_name(dev));
211 static void idxd_read_table_offsets(struct idxd_device *idxd)
213 union offsets_reg offsets;
214 struct device *dev = &idxd->pdev->dev;
216 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
217 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
218 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
219 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
220 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
221 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
222 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
223 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
224 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
225 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
228 static void idxd_read_caps(struct idxd_device *idxd)
230 struct device *dev = &idxd->pdev->dev;
233 /* reading generic capabilities */
234 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
235 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
236 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
237 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
238 idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
239 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
240 if (idxd->hw.gen_cap.config_en)
241 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
243 /* reading group capabilities */
244 idxd->hw.group_cap.bits =
245 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
246 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
247 idxd->max_groups = idxd->hw.group_cap.num_groups;
248 dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
249 idxd->max_tokens = idxd->hw.group_cap.total_tokens;
250 dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
251 idxd->nr_tokens = idxd->max_tokens;
253 /* read engine capabilities */
254 idxd->hw.engine_cap.bits =
255 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
256 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
257 idxd->max_engines = idxd->hw.engine_cap.num_engines;
258 dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
260 /* read workqueue capabilities */
261 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
262 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
263 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
264 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
265 idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
266 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
267 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
268 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
270 /* reading operation capabilities */
271 for (i = 0; i < 4; i++) {
272 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
273 IDXD_OPCAP_OFFSET + i * sizeof(u64));
274 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
278 static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
280 struct device *dev = &pdev->dev;
281 struct idxd_device *idxd;
283 idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
288 spin_lock_init(&idxd->dev_lock);
293 static int idxd_enable_system_pasid(struct idxd_device *idxd)
297 struct iommu_sva *sva;
299 flags = SVM_FLAG_SUPERVISOR_MODE;
301 sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
303 dev_warn(&idxd->pdev->dev,
304 "iommu sva bind failed: %ld\n", PTR_ERR(sva));
308 pasid = iommu_sva_get_pasid(sva);
309 if (pasid == IOMMU_PASID_INVALID) {
310 iommu_sva_unbind_device(sva);
316 dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
320 static void idxd_disable_system_pasid(struct idxd_device *idxd)
323 iommu_sva_unbind_device(idxd->sva);
327 static int idxd_probe(struct idxd_device *idxd)
329 struct pci_dev *pdev = idxd->pdev;
330 struct device *dev = &pdev->dev;
333 dev_dbg(dev, "%s entered and resetting device\n", __func__);
334 rc = idxd_device_init_reset(idxd);
338 dev_dbg(dev, "IDXD reset complete\n");
340 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
341 rc = idxd_enable_system_pasid(idxd);
343 dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
345 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
347 dev_warn(dev, "User forced SVA off via module param.\n");
350 idxd_read_caps(idxd);
351 idxd_read_table_offsets(idxd);
353 rc = idxd_setup_internals(idxd);
357 rc = idxd_setup_interrupts(idxd);
361 dev_dbg(dev, "IDXD interrupt setup complete.\n");
363 mutex_lock(&idxd_idr_lock);
364 idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
365 mutex_unlock(&idxd_idr_lock);
371 idxd->major = idxd_cdev_get_major(idxd);
373 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
377 idxd_mask_error_interrupts(idxd);
378 idxd_mask_msix_vectors(idxd);
380 if (device_pasid_enabled(idxd))
381 idxd_disable_system_pasid(idxd);
385 static void idxd_type_init(struct idxd_device *idxd)
387 if (idxd->type == IDXD_TYPE_DSA)
388 idxd->compl_size = sizeof(struct dsa_completion_record);
389 else if (idxd->type == IDXD_TYPE_IAX)
390 idxd->compl_size = sizeof(struct iax_completion_record);
393 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
395 struct device *dev = &pdev->dev;
396 struct idxd_device *idxd;
399 rc = pcim_enable_device(pdev);
403 dev_dbg(dev, "Alloc IDXD context\n");
404 idxd = idxd_alloc(pdev);
408 dev_dbg(dev, "Mapping BARs\n");
409 idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
413 dev_dbg(dev, "Set DMA masks\n");
414 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
416 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
420 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
422 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
428 idxd_type_init(idxd);
430 dev_dbg(dev, "Set PCI master\n");
431 pci_set_master(pdev);
432 pci_set_drvdata(pdev, idxd);
434 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
435 rc = idxd_probe(idxd);
437 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
441 rc = idxd_setup_sysfs(idxd);
443 dev_err(dev, "IDXD sysfs setup failed\n");
447 idxd->state = IDXD_DEV_CONF_READY;
449 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
455 static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
457 struct idxd_desc *desc, *itr;
458 struct llist_node *head;
460 head = llist_del_all(&ie->pending_llist);
464 llist_for_each_entry_safe(desc, itr, head, llnode) {
465 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
466 idxd_free_desc(desc->wq, desc);
470 static void idxd_flush_work_list(struct idxd_irq_entry *ie)
472 struct idxd_desc *desc, *iter;
474 list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
475 list_del(&desc->list);
476 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
477 idxd_free_desc(desc->wq, desc);
481 static void idxd_shutdown(struct pci_dev *pdev)
483 struct idxd_device *idxd = pci_get_drvdata(pdev);
485 struct idxd_irq_entry *irq_entry;
486 int msixcnt = pci_msix_vec_count(pdev);
488 rc = idxd_device_disable(idxd);
490 dev_err(&pdev->dev, "Disabling device failed\n");
492 dev_dbg(&pdev->dev, "%s called\n", __func__);
493 idxd_mask_msix_vectors(idxd);
494 idxd_mask_error_interrupts(idxd);
496 for (i = 0; i < msixcnt; i++) {
497 irq_entry = &idxd->irq_entries[i];
498 synchronize_irq(idxd->msix_entries[i].vector);
501 idxd_flush_pending_llist(irq_entry);
502 idxd_flush_work_list(irq_entry);
505 idxd_msix_perm_clear(idxd);
506 destroy_workqueue(idxd->wq);
509 static void idxd_remove(struct pci_dev *pdev)
511 struct idxd_device *idxd = pci_get_drvdata(pdev);
513 dev_dbg(&pdev->dev, "%s called\n", __func__);
514 idxd_cleanup_sysfs(idxd);
516 if (device_pasid_enabled(idxd))
517 idxd_disable_system_pasid(idxd);
518 mutex_lock(&idxd_idr_lock);
519 idr_remove(&idxd_idrs[idxd->type], idxd->id);
520 mutex_unlock(&idxd_idr_lock);
523 static struct pci_driver idxd_pci_driver = {
525 .id_table = idxd_pci_tbl,
526 .probe = idxd_pci_probe,
527 .remove = idxd_remove,
528 .shutdown = idxd_shutdown,
531 static int __init idxd_init_module(void)
536 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
537 * enumerating the device. We can not utilize it.
539 if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
540 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
544 if (!boot_cpu_has(X86_FEATURE_ENQCMD))
545 pr_warn("Platform does not have ENQCMD(S) support.\n");
547 support_enqcmd = true;
549 for (i = 0; i < IDXD_TYPE_MAX; i++)
550 idr_init(&idxd_idrs[i]);
552 err = idxd_register_bus_type();
556 err = idxd_register_driver();
558 goto err_idxd_driver_register;
560 err = idxd_cdev_register();
562 goto err_cdev_register;
564 err = pci_register_driver(&idxd_pci_driver);
566 goto err_pci_register;
573 idxd_unregister_driver();
574 err_idxd_driver_register:
575 idxd_unregister_bus_type();
578 module_init(idxd_init_module);
580 static void __exit idxd_exit_module(void)
582 pci_unregister_driver(&idxd_pci_driver);
584 idxd_unregister_bus_type();
586 module_exit(idxd_exit_module);