1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/workqueue.h>
12 #include <linux/aer.h>
14 #include <linux/io-64-nonatomic-lo-hi.h>
15 #include <linux/device.h>
16 #include <linux/idr.h>
17 #include <linux/intel-svm.h>
18 #include <linux/iommu.h>
19 #include <uapi/linux/idxd.h>
20 #include <linux/dmaengine.h>
21 #include "../dmaengine.h"
22 #include "registers.h"
26 MODULE_VERSION(IDXD_DRIVER_VERSION);
27 MODULE_LICENSE("GPL v2");
28 MODULE_AUTHOR("Intel Corporation");
29 MODULE_IMPORT_NS(IDXD);
31 static bool sva = true;
32 module_param(sva, bool, 0644);
33 MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
36 module_param(tc_override, bool, 0644);
37 MODULE_PARM_DESC(tc_override, "Override traffic class defaults");
39 #define DRV_NAME "idxd"
44 static struct idxd_driver_data idxd_driver_data[] = {
47 .type = IDXD_TYPE_DSA,
48 .compl_size = sizeof(struct dsa_completion_record),
50 .dev_type = &dsa_device_type,
54 .type = IDXD_TYPE_IAX,
55 .compl_size = sizeof(struct iax_completion_record),
57 .dev_type = &iax_device_type,
61 static struct pci_device_id idxd_pci_tbl[] = {
62 /* DSA ver 1.0 platforms */
63 { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
65 /* IAX ver 1.0 platforms */
66 { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
69 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
71 static int idxd_setup_interrupts(struct idxd_device *idxd)
73 struct pci_dev *pdev = idxd->pdev;
74 struct device *dev = &pdev->dev;
75 struct idxd_irq_entry *ie;
79 msixcnt = pci_msix_vec_count(pdev);
81 dev_err(dev, "Not MSI-X interrupt capable.\n");
84 idxd->irq_cnt = msixcnt;
86 rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
88 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
91 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
94 ie = idxd_get_ie(idxd, 0);
95 ie->vector = pci_irq_vector(pdev, 0);
96 rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie);
98 dev_err(dev, "Failed to allocate misc interrupt.\n");
101 dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector);
103 for (i = 0; i < idxd->max_wqs; i++) {
104 int msix_idx = i + 1;
106 ie = idxd_get_ie(idxd, msix_idx);
108 ie->int_handle = INVALID_INT_HANDLE;
109 ie->pasid = INVALID_IOASID;
111 spin_lock_init(&ie->list_lock);
112 init_llist_head(&ie->pending_llist);
113 INIT_LIST_HEAD(&ie->work_list);
116 idxd_unmask_error_interrupts(idxd);
120 idxd_mask_error_interrupts(idxd);
121 pci_free_irq_vectors(pdev);
122 dev_err(dev, "No usable interrupts\n");
126 static void idxd_cleanup_interrupts(struct idxd_device *idxd)
128 struct pci_dev *pdev = idxd->pdev;
129 struct idxd_irq_entry *ie;
132 msixcnt = pci_msix_vec_count(pdev);
136 ie = idxd_get_ie(idxd, 0);
137 idxd_mask_error_interrupts(idxd);
138 free_irq(ie->vector, ie);
139 pci_free_irq_vectors(pdev);
142 static int idxd_setup_wqs(struct idxd_device *idxd)
144 struct device *dev = &idxd->pdev->dev;
146 struct device *conf_dev;
149 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
150 GFP_KERNEL, dev_to_node(dev));
154 idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
155 if (!idxd->wq_enable_map) {
160 for (i = 0; i < idxd->max_wqs; i++) {
161 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
167 idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
168 conf_dev = wq_confdev(wq);
171 device_initialize(wq_confdev(wq));
172 conf_dev->parent = idxd_confdev(idxd);
173 conf_dev->bus = &dsa_bus_type;
174 conf_dev->type = &idxd_wq_device_type;
175 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
177 put_device(conf_dev);
181 mutex_init(&wq->wq_lock);
182 init_waitqueue_head(&wq->err_queue);
183 init_completion(&wq->wq_dead);
184 init_completion(&wq->wq_resurrect);
185 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
186 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
187 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
188 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
190 put_device(conf_dev);
195 if (idxd->hw.wq_cap.op_config) {
196 wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
197 if (!wq->opcap_bmap) {
198 put_device(conf_dev);
202 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
212 conf_dev = wq_confdev(wq);
213 put_device(conf_dev);
218 static int idxd_setup_engines(struct idxd_device *idxd)
220 struct idxd_engine *engine;
221 struct device *dev = &idxd->pdev->dev;
222 struct device *conf_dev;
225 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
226 GFP_KERNEL, dev_to_node(dev));
230 for (i = 0; i < idxd->max_engines; i++) {
231 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
237 idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE);
238 conf_dev = engine_confdev(engine);
241 device_initialize(conf_dev);
242 conf_dev->parent = idxd_confdev(idxd);
243 conf_dev->bus = &dsa_bus_type;
244 conf_dev->type = &idxd_engine_device_type;
245 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
247 put_device(conf_dev);
251 idxd->engines[i] = engine;
258 engine = idxd->engines[i];
259 conf_dev = engine_confdev(engine);
260 put_device(conf_dev);
265 static int idxd_setup_groups(struct idxd_device *idxd)
267 struct device *dev = &idxd->pdev->dev;
268 struct device *conf_dev;
269 struct idxd_group *group;
272 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
273 GFP_KERNEL, dev_to_node(dev));
277 for (i = 0; i < idxd->max_groups; i++) {
278 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
284 idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP);
285 conf_dev = group_confdev(group);
288 device_initialize(conf_dev);
289 conf_dev->parent = idxd_confdev(idxd);
290 conf_dev->bus = &dsa_bus_type;
291 conf_dev->type = &idxd_group_device_type;
292 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
294 put_device(conf_dev);
298 idxd->groups[i] = group;
299 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
312 group = idxd->groups[i];
313 put_device(group_confdev(group));
318 static void idxd_cleanup_internals(struct idxd_device *idxd)
322 for (i = 0; i < idxd->max_groups; i++)
323 put_device(group_confdev(idxd->groups[i]));
324 for (i = 0; i < idxd->max_engines; i++)
325 put_device(engine_confdev(idxd->engines[i]));
326 for (i = 0; i < idxd->max_wqs; i++)
327 put_device(wq_confdev(idxd->wqs[i]));
328 destroy_workqueue(idxd->wq);
331 static int idxd_setup_internals(struct idxd_device *idxd)
333 struct device *dev = &idxd->pdev->dev;
336 init_waitqueue_head(&idxd->cmd_waitq);
338 rc = idxd_setup_wqs(idxd);
342 rc = idxd_setup_engines(idxd);
346 rc = idxd_setup_groups(idxd);
350 idxd->wq = create_workqueue(dev_name(dev));
359 for (i = 0; i < idxd->max_groups; i++)
360 put_device(group_confdev(idxd->groups[i]));
362 for (i = 0; i < idxd->max_engines; i++)
363 put_device(engine_confdev(idxd->engines[i]));
365 for (i = 0; i < idxd->max_wqs; i++)
366 put_device(wq_confdev(idxd->wqs[i]));
371 static void idxd_read_table_offsets(struct idxd_device *idxd)
373 union offsets_reg offsets;
374 struct device *dev = &idxd->pdev->dev;
376 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
377 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
378 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
379 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
380 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
381 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
382 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
383 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
384 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
385 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
388 static void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count)
392 for (i = 0, nr = 0; i < count; i++) {
393 for (j = 0; j < BITS_PER_LONG_LONG; j++) {
401 static void idxd_read_caps(struct idxd_device *idxd)
403 struct device *dev = &idxd->pdev->dev;
406 /* reading generic capabilities */
407 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
408 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
410 if (idxd->hw.gen_cap.cmd_cap) {
411 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET);
412 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
415 /* reading command capabilities */
416 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))
417 idxd->request_int_handles = true;
419 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
420 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
421 idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift);
422 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
423 if (idxd->hw.gen_cap.config_en)
424 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
426 /* reading group capabilities */
427 idxd->hw.group_cap.bits =
428 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
429 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
430 idxd->max_groups = idxd->hw.group_cap.num_groups;
431 dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
432 idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs;
433 dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs);
434 idxd->nr_rdbufs = idxd->max_rdbufs;
436 /* read engine capabilities */
437 idxd->hw.engine_cap.bits =
438 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
439 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
440 idxd->max_engines = idxd->hw.engine_cap.num_engines;
441 dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
443 /* read workqueue capabilities */
444 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
445 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
446 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
447 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
448 idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
449 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
450 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
451 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
453 /* reading operation capabilities */
454 for (i = 0; i < 4; i++) {
455 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
456 IDXD_OPCAP_OFFSET + i * sizeof(u64));
457 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
459 multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4);
462 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
464 struct device *dev = &pdev->dev;
465 struct device *conf_dev;
466 struct idxd_device *idxd;
469 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
473 conf_dev = idxd_confdev(idxd);
476 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
477 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
481 idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
482 if (!idxd->opcap_bmap) {
483 ida_free(&idxd_ida, idxd->id);
487 device_initialize(conf_dev);
488 conf_dev->parent = dev;
489 conf_dev->bus = &dsa_bus_type;
490 conf_dev->type = idxd->data->dev_type;
491 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
493 put_device(conf_dev);
497 spin_lock_init(&idxd->dev_lock);
498 spin_lock_init(&idxd->cmd_lock);
503 static int idxd_enable_system_pasid(struct idxd_device *idxd)
507 struct iommu_sva *sva;
509 flags = SVM_FLAG_SUPERVISOR_MODE;
511 sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
513 dev_warn(&idxd->pdev->dev,
514 "iommu sva bind failed: %ld\n", PTR_ERR(sva));
518 pasid = iommu_sva_get_pasid(sva);
519 if (pasid == IOMMU_PASID_INVALID) {
520 iommu_sva_unbind_device(sva);
526 dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
530 static void idxd_disable_system_pasid(struct idxd_device *idxd)
533 iommu_sva_unbind_device(idxd->sva);
537 static int idxd_probe(struct idxd_device *idxd)
539 struct pci_dev *pdev = idxd->pdev;
540 struct device *dev = &pdev->dev;
543 dev_dbg(dev, "%s entered and resetting device\n", __func__);
544 rc = idxd_device_init_reset(idxd);
548 dev_dbg(dev, "IDXD reset complete\n");
550 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
551 if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) {
552 dev_warn(dev, "Unable to turn on user SVA feature.\n");
554 set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
556 if (idxd_enable_system_pasid(idxd))
557 dev_warn(dev, "No in-kernel DMA with PASID.\n");
559 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
562 dev_warn(dev, "User forced SVA off via module param.\n");
565 idxd_read_caps(idxd);
566 idxd_read_table_offsets(idxd);
568 rc = idxd_setup_internals(idxd);
572 /* If the configs are readonly, then load them from device */
573 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
574 dev_dbg(dev, "Loading RO device config\n");
575 rc = idxd_device_load_config(idxd);
580 rc = idxd_setup_interrupts(idxd);
584 idxd->major = idxd_cdev_get_major(idxd);
586 rc = perfmon_pmu_init(idxd);
588 dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc);
590 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
594 idxd_cleanup_internals(idxd);
596 if (device_pasid_enabled(idxd))
597 idxd_disable_system_pasid(idxd);
598 if (device_user_pasid_enabled(idxd))
599 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
603 static void idxd_cleanup(struct idxd_device *idxd)
605 struct device *dev = &idxd->pdev->dev;
607 perfmon_pmu_remove(idxd);
608 idxd_cleanup_interrupts(idxd);
609 idxd_cleanup_internals(idxd);
610 if (device_pasid_enabled(idxd))
611 idxd_disable_system_pasid(idxd);
612 if (device_user_pasid_enabled(idxd))
613 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
616 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
618 struct device *dev = &pdev->dev;
619 struct idxd_device *idxd;
620 struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data;
623 rc = pci_enable_device(pdev);
627 dev_dbg(dev, "Alloc IDXD context\n");
628 idxd = idxd_alloc(pdev, data);
634 dev_dbg(dev, "Mapping BARs\n");
635 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
636 if (!idxd->reg_base) {
641 dev_dbg(dev, "Set DMA masks\n");
642 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
646 dev_dbg(dev, "Set PCI master\n");
647 pci_set_master(pdev);
648 pci_set_drvdata(pdev, idxd);
650 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
651 rc = idxd_probe(idxd);
653 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
657 rc = idxd_register_devices(idxd);
659 dev_err(dev, "IDXD sysfs setup failed\n");
660 goto err_dev_register;
663 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
671 pci_iounmap(pdev, idxd->reg_base);
673 put_device(idxd_confdev(idxd));
675 pci_disable_device(pdev);
679 void idxd_wqs_quiesce(struct idxd_device *idxd)
684 for (i = 0; i < idxd->max_wqs; i++) {
686 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL)
691 static void idxd_shutdown(struct pci_dev *pdev)
693 struct idxd_device *idxd = pci_get_drvdata(pdev);
694 struct idxd_irq_entry *irq_entry;
697 rc = idxd_device_disable(idxd);
699 dev_err(&pdev->dev, "Disabling device failed\n");
701 irq_entry = &idxd->ie;
702 synchronize_irq(irq_entry->vector);
703 idxd_mask_error_interrupts(idxd);
704 flush_workqueue(idxd->wq);
707 static void idxd_remove(struct pci_dev *pdev)
709 struct idxd_device *idxd = pci_get_drvdata(pdev);
710 struct idxd_irq_entry *irq_entry;
712 idxd_unregister_devices(idxd);
714 * When ->release() is called for the idxd->conf_dev, it frees all the memory related
715 * to the idxd context. The driver still needs those bits in order to do the rest of
716 * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref
717 * on the device here to hold off the freeing while allowing the idxd sub-driver
720 get_device(idxd_confdev(idxd));
721 device_unregister(idxd_confdev(idxd));
723 if (device_pasid_enabled(idxd))
724 idxd_disable_system_pasid(idxd);
726 irq_entry = idxd_get_ie(idxd, 0);
727 free_irq(irq_entry->vector, irq_entry);
728 pci_free_irq_vectors(pdev);
729 pci_iounmap(pdev, idxd->reg_base);
730 if (device_user_pasid_enabled(idxd))
731 iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
732 pci_disable_device(pdev);
733 destroy_workqueue(idxd->wq);
734 perfmon_pmu_remove(idxd);
735 put_device(idxd_confdev(idxd));
738 static struct pci_driver idxd_pci_driver = {
740 .id_table = idxd_pci_tbl,
741 .probe = idxd_pci_probe,
742 .remove = idxd_remove,
743 .shutdown = idxd_shutdown,
746 static int __init idxd_init_module(void)
751 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
752 * enumerating the device. We can not utilize it.
754 if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
755 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
759 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
760 pr_warn("Platform does not have ENQCMD(S) support.\n");
762 support_enqcmd = true;
766 err = idxd_driver_register(&idxd_drv);
768 goto err_idxd_driver_register;
770 err = idxd_driver_register(&idxd_dmaengine_drv);
772 goto err_idxd_dmaengine_driver_register;
774 err = idxd_driver_register(&idxd_user_drv);
776 goto err_idxd_user_driver_register;
778 err = idxd_cdev_register();
780 goto err_cdev_register;
782 err = pci_register_driver(&idxd_pci_driver);
784 goto err_pci_register;
791 idxd_driver_unregister(&idxd_user_drv);
792 err_idxd_user_driver_register:
793 idxd_driver_unregister(&idxd_dmaengine_drv);
794 err_idxd_dmaengine_driver_register:
795 idxd_driver_unregister(&idxd_drv);
796 err_idxd_driver_register:
799 module_init(idxd_init_module);
801 static void __exit idxd_exit_module(void)
803 idxd_driver_unregister(&idxd_user_drv);
804 idxd_driver_unregister(&idxd_dmaengine_drv);
805 idxd_driver_unregister(&idxd_drv);
806 pci_unregister_driver(&idxd_pci_driver);
810 module_exit(idxd_exit_module);