Merge tag 'dmaengine-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul...
[platform/kernel/linux-starfive.git] / drivers / dma / idxd / init.c
index 9998512..1aa8239 100644 (file)
@@ -9,7 +9,6 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/workqueue.h>
-#include <linux/aer.h>
 #include <linux/fs.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/device.h>
@@ -47,6 +46,9 @@ static struct idxd_driver_data idxd_driver_data[] = {
                .compl_size = sizeof(struct dsa_completion_record),
                .align = 32,
                .dev_type = &dsa_device_type,
+               .evl_cr_off = offsetof(struct dsa_evl_entry, cr),
+               .cr_status_off = offsetof(struct dsa_completion_record, status),
+               .cr_result_off = offsetof(struct dsa_completion_record, result),
        },
        [IDXD_TYPE_IAX] = {
                .name_prefix = "iax",
@@ -54,6 +56,9 @@ static struct idxd_driver_data idxd_driver_data[] = {
                .compl_size = sizeof(struct iax_completion_record),
                .align = 64,
                .dev_type = &iax_device_type,
+               .evl_cr_off = offsetof(struct iax_evl_entry, cr),
+               .cr_status_off = offsetof(struct iax_completion_record, status),
+               .cr_result_off = offsetof(struct iax_completion_record, error_code),
        },
 };
 
@@ -200,6 +205,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
                        }
                        bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
                }
+               mutex_init(&wq->uc_lock);
+               xa_init(&wq->upasid_xa);
                idxd->wqs[i] = wq;
        }
 
@@ -332,6 +339,33 @@ static void idxd_cleanup_internals(struct idxd_device *idxd)
        destroy_workqueue(idxd->wq);
 }
 
+static int idxd_init_evl(struct idxd_device *idxd)
+{
+       struct device *dev = &idxd->pdev->dev;
+       struct idxd_evl *evl;
+
+       if (idxd->hw.gen_cap.evl_support == 0)
+               return 0;
+
+       evl = kzalloc_node(sizeof(*evl), GFP_KERNEL, dev_to_node(dev));
+       if (!evl)
+               return -ENOMEM;
+
+       spin_lock_init(&evl->lock);
+       evl->size = IDXD_EVL_SIZE_MIN;
+
+       idxd->evl_cache = kmem_cache_create(dev_name(idxd_confdev(idxd)),
+                                           sizeof(struct idxd_evl_fault) + evl_ent_size(idxd),
+                                           0, 0, NULL);
+       if (!idxd->evl_cache) {
+               kfree(evl);
+               return -ENOMEM;
+       }
+
+       idxd->evl = evl;
+       return 0;
+}
+
 static int idxd_setup_internals(struct idxd_device *idxd)
 {
        struct device *dev = &idxd->pdev->dev;
@@ -357,8 +391,14 @@ static int idxd_setup_internals(struct idxd_device *idxd)
                goto err_wkq_create;
        }
 
+       rc = idxd_init_evl(idxd);
+       if (rc < 0)
+               goto err_evl;
+
        return 0;
 
+ err_evl:
+       destroy_workqueue(idxd->wq);
  err_wkq_create:
        for (i = 0; i < idxd->max_groups; i++)
                put_device(group_confdev(idxd->groups[i]));
@@ -389,7 +429,7 @@ static void idxd_read_table_offsets(struct idxd_device *idxd)
        dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
 }
 
-static void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count)
+void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count)
 {
        int i, j, nr;
 
@@ -461,6 +501,10 @@ static void idxd_read_caps(struct idxd_device *idxd)
                dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
        }
        multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4);
+
+       /* read iaa cap */
+       if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2)
+               idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
 }
 
 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
@@ -661,6 +705,10 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto err_dev_register;
        }
 
+       rc = idxd_device_init_debugfs(idxd);
+       if (rc)
+               dev_warn(dev, "IDXD debugfs failed to setup\n");
+
        dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
                 idxd->hw.version);
 
@@ -723,6 +771,7 @@ static void idxd_remove(struct pci_dev *pdev)
        idxd_shutdown(pdev);
        if (device_pasid_enabled(idxd))
                idxd_disable_system_pasid(idxd);
+       idxd_device_remove_debugfs(idxd);
 
        irq_entry = idxd_get_ie(idxd, 0);
        free_irq(irq_entry->vector, irq_entry);
@@ -780,6 +829,10 @@ static int __init idxd_init_module(void)
        if (err)
                goto err_cdev_register;
 
+       err = idxd_init_debugfs();
+       if (err)
+               goto err_debugfs;
+
        err = pci_register_driver(&idxd_pci_driver);
        if (err)
                goto err_pci_register;
@@ -787,6 +840,8 @@ static int __init idxd_init_module(void)
        return 0;
 
 err_pci_register:
+       idxd_remove_debugfs();
+err_debugfs:
        idxd_cdev_remove();
 err_cdev_register:
        idxd_driver_unregister(&idxd_user_drv);
@@ -807,5 +862,6 @@ static void __exit idxd_exit_module(void)
        pci_unregister_driver(&idxd_pci_driver);
        idxd_cdev_remove();
        perfmon_exit();
+       idxd_remove_debugfs();
 }
 module_exit(idxd_exit_module);