Merge branch 'for-5.14/amd-sfh' into for-linus
[platform/kernel/linux-starfive.git] / drivers / dma / idxd / init.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/pci.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/workqueue.h>
12 #include <linux/aer.h>
13 #include <linux/fs.h>
14 #include <linux/io-64-nonatomic-lo-hi.h>
15 #include <linux/device.h>
16 #include <linux/idr.h>
17 #include <linux/intel-svm.h>
18 #include <linux/iommu.h>
19 #include <uapi/linux/idxd.h>
20 #include <linux/dmaengine.h>
21 #include "../dmaengine.h"
22 #include "registers.h"
23 #include "idxd.h"
24 #include "perfmon.h"
25
26 MODULE_VERSION(IDXD_DRIVER_VERSION);
27 MODULE_LICENSE("GPL v2");
28 MODULE_AUTHOR("Intel Corporation");
29
30 static bool sva = true;
31 module_param(sva, bool, 0644);
32 MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
33
34 #define DRV_NAME "idxd"
35
36 bool support_enqcmd;
37 DEFINE_IDA(idxd_ida);
38
39 static struct idxd_driver_data idxd_driver_data[] = {
40         [IDXD_TYPE_DSA] = {
41                 .name_prefix = "dsa",
42                 .type = IDXD_TYPE_DSA,
43                 .compl_size = sizeof(struct dsa_completion_record),
44                 .align = 32,
45                 .dev_type = &dsa_device_type,
46         },
47         [IDXD_TYPE_IAX] = {
48                 .name_prefix = "iax",
49                 .type = IDXD_TYPE_IAX,
50                 .compl_size = sizeof(struct iax_completion_record),
51                 .align = 64,
52                 .dev_type = &iax_device_type,
53         },
54 };
55
56 static struct pci_device_id idxd_pci_tbl[] = {
57         /* DSA ver 1.0 platforms */
58         { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
59
60         /* IAX ver 1.0 platforms */
61         { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
62         { 0, }
63 };
64 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
65
66 static int idxd_setup_interrupts(struct idxd_device *idxd)
67 {
68         struct pci_dev *pdev = idxd->pdev;
69         struct device *dev = &pdev->dev;
70         struct idxd_irq_entry *irq_entry;
71         int i, msixcnt;
72         int rc = 0;
73
74         msixcnt = pci_msix_vec_count(pdev);
75         if (msixcnt < 0) {
76                 dev_err(dev, "Not MSI-X interrupt capable.\n");
77                 return -ENOSPC;
78         }
79
80         rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
81         if (rc != msixcnt) {
82                 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
83                 return -ENOSPC;
84         }
85         dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
86
87         /*
88          * We implement 1 completion list per MSI-X entry except for
89          * entry 0, which is for errors and others.
90          */
91         idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
92                                          GFP_KERNEL, dev_to_node(dev));
93         if (!idxd->irq_entries) {
94                 rc = -ENOMEM;
95                 goto err_irq_entries;
96         }
97
98         for (i = 0; i < msixcnt; i++) {
99                 idxd->irq_entries[i].id = i;
100                 idxd->irq_entries[i].idxd = idxd;
101                 idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
102                 spin_lock_init(&idxd->irq_entries[i].list_lock);
103         }
104
105         irq_entry = &idxd->irq_entries[0];
106         rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
107                                   0, "idxd-misc", irq_entry);
108         if (rc < 0) {
109                 dev_err(dev, "Failed to allocate misc interrupt.\n");
110                 goto err_misc_irq;
111         }
112
113         dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
114
115         /* first MSI-X entry is not for wq interrupts */
116         idxd->num_wq_irqs = msixcnt - 1;
117
118         for (i = 1; i < msixcnt; i++) {
119                 irq_entry = &idxd->irq_entries[i];
120
121                 init_llist_head(&idxd->irq_entries[i].pending_llist);
122                 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
123                 rc = request_threaded_irq(irq_entry->vector, NULL,
124                                           idxd_wq_thread, 0, "idxd-portal", irq_entry);
125                 if (rc < 0) {
126                         dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
127                         goto err_wq_irqs;
128                 }
129
130                 dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
131                 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
132                         /*
133                          * The MSIX vector enumeration starts at 1 with vector 0 being the
134                          * misc interrupt that handles non I/O completion events. The
135                          * interrupt handles are for IMS enumeration on guest. The misc
136                          * interrupt vector does not require a handle and therefore we start
137                          * the int_handles at index 0. Since 'i' starts at 1, the first
138                          * int_handles index will be 0.
139                          */
140                         rc = idxd_device_request_int_handle(idxd, i, &idxd->int_handles[i - 1],
141                                                             IDXD_IRQ_MSIX);
142                         if (rc < 0) {
143                                 free_irq(irq_entry->vector, irq_entry);
144                                 goto err_wq_irqs;
145                         }
146                         dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i - 1]);
147                 }
148         }
149
150         idxd_unmask_error_interrupts(idxd);
151         idxd_msix_perm_setup(idxd);
152         return 0;
153
154  err_wq_irqs:
155         while (--i >= 0) {
156                 irq_entry = &idxd->irq_entries[i];
157                 free_irq(irq_entry->vector, irq_entry);
158                 if (i != 0)
159                         idxd_device_release_int_handle(idxd,
160                                                        idxd->int_handles[i], IDXD_IRQ_MSIX);
161         }
162  err_misc_irq:
163         /* Disable error interrupt generation */
164         idxd_mask_error_interrupts(idxd);
165  err_irq_entries:
166         pci_free_irq_vectors(pdev);
167         dev_err(dev, "No usable interrupts\n");
168         return rc;
169 }
170
171 static int idxd_setup_wqs(struct idxd_device *idxd)
172 {
173         struct device *dev = &idxd->pdev->dev;
174         struct idxd_wq *wq;
175         int i, rc;
176
177         idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
178                                  GFP_KERNEL, dev_to_node(dev));
179         if (!idxd->wqs)
180                 return -ENOMEM;
181
182         for (i = 0; i < idxd->max_wqs; i++) {
183                 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
184                 if (!wq) {
185                         rc = -ENOMEM;
186                         goto err;
187                 }
188
189                 wq->id = i;
190                 wq->idxd = idxd;
191                 device_initialize(&wq->conf_dev);
192                 wq->conf_dev.parent = &idxd->conf_dev;
193                 wq->conf_dev.bus = &dsa_bus_type;
194                 wq->conf_dev.type = &idxd_wq_device_type;
195                 rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
196                 if (rc < 0) {
197                         put_device(&wq->conf_dev);
198                         goto err;
199                 }
200
201                 mutex_init(&wq->wq_lock);
202                 init_waitqueue_head(&wq->err_queue);
203                 init_completion(&wq->wq_dead);
204                 wq->max_xfer_bytes = idxd->max_xfer_bytes;
205                 wq->max_batch_size = idxd->max_batch_size;
206                 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
207                 if (!wq->wqcfg) {
208                         put_device(&wq->conf_dev);
209                         rc = -ENOMEM;
210                         goto err;
211                 }
212                 idxd->wqs[i] = wq;
213         }
214
215         return 0;
216
217  err:
218         while (--i >= 0)
219                 put_device(&idxd->wqs[i]->conf_dev);
220         return rc;
221 }
222
223 static int idxd_setup_engines(struct idxd_device *idxd)
224 {
225         struct idxd_engine *engine;
226         struct device *dev = &idxd->pdev->dev;
227         int i, rc;
228
229         idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
230                                      GFP_KERNEL, dev_to_node(dev));
231         if (!idxd->engines)
232                 return -ENOMEM;
233
234         for (i = 0; i < idxd->max_engines; i++) {
235                 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
236                 if (!engine) {
237                         rc = -ENOMEM;
238                         goto err;
239                 }
240
241                 engine->id = i;
242                 engine->idxd = idxd;
243                 device_initialize(&engine->conf_dev);
244                 engine->conf_dev.parent = &idxd->conf_dev;
245                 engine->conf_dev.type = &idxd_engine_device_type;
246                 rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
247                 if (rc < 0) {
248                         put_device(&engine->conf_dev);
249                         goto err;
250                 }
251
252                 idxd->engines[i] = engine;
253         }
254
255         return 0;
256
257  err:
258         while (--i >= 0)
259                 put_device(&idxd->engines[i]->conf_dev);
260         return rc;
261 }
262
263 static int idxd_setup_groups(struct idxd_device *idxd)
264 {
265         struct device *dev = &idxd->pdev->dev;
266         struct idxd_group *group;
267         int i, rc;
268
269         idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
270                                     GFP_KERNEL, dev_to_node(dev));
271         if (!idxd->groups)
272                 return -ENOMEM;
273
274         for (i = 0; i < idxd->max_groups; i++) {
275                 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
276                 if (!group) {
277                         rc = -ENOMEM;
278                         goto err;
279                 }
280
281                 group->id = i;
282                 group->idxd = idxd;
283                 device_initialize(&group->conf_dev);
284                 group->conf_dev.parent = &idxd->conf_dev;
285                 group->conf_dev.bus = &dsa_bus_type;
286                 group->conf_dev.type = &idxd_group_device_type;
287                 rc = dev_set_name(&group->conf_dev, "group%d.%d", idxd->id, group->id);
288                 if (rc < 0) {
289                         put_device(&group->conf_dev);
290                         goto err;
291                 }
292
293                 idxd->groups[i] = group;
294                 group->tc_a = -1;
295                 group->tc_b = -1;
296         }
297
298         return 0;
299
300  err:
301         while (--i >= 0)
302                 put_device(&idxd->groups[i]->conf_dev);
303         return rc;
304 }
305
306 static int idxd_setup_internals(struct idxd_device *idxd)
307 {
308         struct device *dev = &idxd->pdev->dev;
309         int rc, i;
310
311         init_waitqueue_head(&idxd->cmd_waitq);
312
313         if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
314                 idxd->int_handles = devm_kcalloc(dev, idxd->max_wqs, sizeof(int), GFP_KERNEL);
315                 if (!idxd->int_handles)
316                         return -ENOMEM;
317         }
318
319         rc = idxd_setup_wqs(idxd);
320         if (rc < 0)
321                 goto err_wqs;
322
323         rc = idxd_setup_engines(idxd);
324         if (rc < 0)
325                 goto err_engine;
326
327         rc = idxd_setup_groups(idxd);
328         if (rc < 0)
329                 goto err_group;
330
331         idxd->wq = create_workqueue(dev_name(dev));
332         if (!idxd->wq) {
333                 rc = -ENOMEM;
334                 goto err_wkq_create;
335         }
336
337         return 0;
338
339  err_wkq_create:
340         for (i = 0; i < idxd->max_groups; i++)
341                 put_device(&idxd->groups[i]->conf_dev);
342  err_group:
343         for (i = 0; i < idxd->max_engines; i++)
344                 put_device(&idxd->engines[i]->conf_dev);
345  err_engine:
346         for (i = 0; i < idxd->max_wqs; i++)
347                 put_device(&idxd->wqs[i]->conf_dev);
348  err_wqs:
349         kfree(idxd->int_handles);
350         return rc;
351 }
352
353 static void idxd_read_table_offsets(struct idxd_device *idxd)
354 {
355         union offsets_reg offsets;
356         struct device *dev = &idxd->pdev->dev;
357
358         offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
359         offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
360         idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
361         dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
362         idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
363         dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
364         idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
365         dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
366         idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
367         dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
368 }
369
370 static void idxd_read_caps(struct idxd_device *idxd)
371 {
372         struct device *dev = &idxd->pdev->dev;
373         int i;
374
375         /* reading generic capabilities */
376         idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
377         dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
378
379         if (idxd->hw.gen_cap.cmd_cap) {
380                 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET);
381                 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
382         }
383
384         idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
385         dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
386         idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
387         dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
388         if (idxd->hw.gen_cap.config_en)
389                 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
390
391         /* reading group capabilities */
392         idxd->hw.group_cap.bits =
393                 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
394         dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
395         idxd->max_groups = idxd->hw.group_cap.num_groups;
396         dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
397         idxd->max_tokens = idxd->hw.group_cap.total_tokens;
398         dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
399         idxd->nr_tokens = idxd->max_tokens;
400
401         /* read engine capabilities */
402         idxd->hw.engine_cap.bits =
403                 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
404         dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
405         idxd->max_engines = idxd->hw.engine_cap.num_engines;
406         dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
407
408         /* read workqueue capabilities */
409         idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
410         dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
411         idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
412         dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
413         idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
414         dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
415         idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
416         dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
417
418         /* reading operation capabilities */
419         for (i = 0; i < 4; i++) {
420                 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
421                                 IDXD_OPCAP_OFFSET + i * sizeof(u64));
422                 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
423         }
424 }
425
426 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
427 {
428         struct device *dev = &pdev->dev;
429         struct idxd_device *idxd;
430         int rc;
431
432         idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
433         if (!idxd)
434                 return NULL;
435
436         idxd->pdev = pdev;
437         idxd->data = data;
438         idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
439         if (idxd->id < 0)
440                 return NULL;
441
442         device_initialize(&idxd->conf_dev);
443         idxd->conf_dev.parent = dev;
444         idxd->conf_dev.bus = &dsa_bus_type;
445         idxd->conf_dev.type = idxd->data->dev_type;
446         rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
447         if (rc < 0) {
448                 put_device(&idxd->conf_dev);
449                 return NULL;
450         }
451
452         spin_lock_init(&idxd->dev_lock);
453         spin_lock_init(&idxd->cmd_lock);
454
455         return idxd;
456 }
457
458 static int idxd_enable_system_pasid(struct idxd_device *idxd)
459 {
460         int flags;
461         unsigned int pasid;
462         struct iommu_sva *sva;
463
464         flags = SVM_FLAG_SUPERVISOR_MODE;
465
466         sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
467         if (IS_ERR(sva)) {
468                 dev_warn(&idxd->pdev->dev,
469                          "iommu sva bind failed: %ld\n", PTR_ERR(sva));
470                 return PTR_ERR(sva);
471         }
472
473         pasid = iommu_sva_get_pasid(sva);
474         if (pasid == IOMMU_PASID_INVALID) {
475                 iommu_sva_unbind_device(sva);
476                 return -ENODEV;
477         }
478
479         idxd->sva = sva;
480         idxd->pasid = pasid;
481         dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
482         return 0;
483 }
484
485 static void idxd_disable_system_pasid(struct idxd_device *idxd)
486 {
487
488         iommu_sva_unbind_device(idxd->sva);
489         idxd->sva = NULL;
490 }
491
492 static int idxd_probe(struct idxd_device *idxd)
493 {
494         struct pci_dev *pdev = idxd->pdev;
495         struct device *dev = &pdev->dev;
496         int rc;
497
498         dev_dbg(dev, "%s entered and resetting device\n", __func__);
499         rc = idxd_device_init_reset(idxd);
500         if (rc < 0)
501                 return rc;
502
503         dev_dbg(dev, "IDXD reset complete\n");
504
505         if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
506                 rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA);
507                 if (rc == 0) {
508                         rc = idxd_enable_system_pasid(idxd);
509                         if (rc < 0) {
510                                 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
511                                 dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
512                         } else {
513                                 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
514                         }
515                 } else {
516                         dev_warn(dev, "Unable to turn on SVA feature.\n");
517                 }
518         } else if (!sva) {
519                 dev_warn(dev, "User forced SVA off via module param.\n");
520         }
521
522         idxd_read_caps(idxd);
523         idxd_read_table_offsets(idxd);
524
525         rc = idxd_setup_internals(idxd);
526         if (rc)
527                 goto err;
528
529         /* If the configs are readonly, then load them from device */
530         if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
531                 dev_dbg(dev, "Loading RO device config\n");
532                 rc = idxd_device_load_config(idxd);
533                 if (rc < 0)
534                         goto err;
535         }
536
537         rc = idxd_setup_interrupts(idxd);
538         if (rc)
539                 goto err;
540
541         dev_dbg(dev, "IDXD interrupt setup complete.\n");
542
543         idxd->major = idxd_cdev_get_major(idxd);
544
545         rc = perfmon_pmu_init(idxd);
546         if (rc < 0)
547                 dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc);
548
549         dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
550         return 0;
551
552  err:
553         if (device_pasid_enabled(idxd))
554                 idxd_disable_system_pasid(idxd);
555         iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA);
556         return rc;
557 }
558
559 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
560 {
561         struct device *dev = &pdev->dev;
562         struct idxd_device *idxd;
563         struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data;
564         int rc;
565
566         rc = pci_enable_device(pdev);
567         if (rc)
568                 return rc;
569
570         dev_dbg(dev, "Alloc IDXD context\n");
571         idxd = idxd_alloc(pdev, data);
572         if (!idxd) {
573                 rc = -ENOMEM;
574                 goto err_idxd_alloc;
575         }
576
577         dev_dbg(dev, "Mapping BARs\n");
578         idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
579         if (!idxd->reg_base) {
580                 rc = -ENOMEM;
581                 goto err_iomap;
582         }
583
584         dev_dbg(dev, "Set DMA masks\n");
585         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
586         if (rc)
587                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
588         if (rc)
589                 goto err;
590
591         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
592         if (rc)
593                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
594         if (rc)
595                 goto err;
596
597         dev_dbg(dev, "Set PCI master\n");
598         pci_set_master(pdev);
599         pci_set_drvdata(pdev, idxd);
600
601         idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
602         rc = idxd_probe(idxd);
603         if (rc) {
604                 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
605                 goto err;
606         }
607
608         rc = idxd_register_devices(idxd);
609         if (rc) {
610                 dev_err(dev, "IDXD sysfs setup failed\n");
611                 goto err;
612         }
613
614         idxd->state = IDXD_DEV_CONF_READY;
615
616         dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
617                  idxd->hw.version);
618
619         return 0;
620
621  err:
622         pci_iounmap(pdev, idxd->reg_base);
623  err_iomap:
624         put_device(&idxd->conf_dev);
625  err_idxd_alloc:
626         pci_disable_device(pdev);
627         return rc;
628 }
629
630 static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
631 {
632         struct idxd_desc *desc, *itr;
633         struct llist_node *head;
634
635         head = llist_del_all(&ie->pending_llist);
636         if (!head)
637                 return;
638
639         llist_for_each_entry_safe(desc, itr, head, llnode) {
640                 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
641                 idxd_free_desc(desc->wq, desc);
642         }
643 }
644
645 static void idxd_flush_work_list(struct idxd_irq_entry *ie)
646 {
647         struct idxd_desc *desc, *iter;
648
649         list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
650                 list_del(&desc->list);
651                 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
652                 idxd_free_desc(desc->wq, desc);
653         }
654 }
655
656 void idxd_wqs_quiesce(struct idxd_device *idxd)
657 {
658         struct idxd_wq *wq;
659         int i;
660
661         for (i = 0; i < idxd->max_wqs; i++) {
662                 wq = idxd->wqs[i];
663                 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL)
664                         idxd_wq_quiesce(wq);
665         }
666 }
667
668 static void idxd_release_int_handles(struct idxd_device *idxd)
669 {
670         struct device *dev = &idxd->pdev->dev;
671         int i, rc;
672
673         for (i = 0; i < idxd->num_wq_irqs; i++) {
674                 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) {
675                         rc = idxd_device_release_int_handle(idxd, idxd->int_handles[i],
676                                                             IDXD_IRQ_MSIX);
677                         if (rc < 0)
678                                 dev_warn(dev, "irq handle %d release failed\n",
679                                          idxd->int_handles[i]);
680                         else
681                                 dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i]);
682                 }
683         }
684 }
685
686 static void idxd_shutdown(struct pci_dev *pdev)
687 {
688         struct idxd_device *idxd = pci_get_drvdata(pdev);
689         int rc, i;
690         struct idxd_irq_entry *irq_entry;
691         int msixcnt = pci_msix_vec_count(pdev);
692
693         rc = idxd_device_disable(idxd);
694         if (rc)
695                 dev_err(&pdev->dev, "Disabling device failed\n");
696
697         dev_dbg(&pdev->dev, "%s called\n", __func__);
698         idxd_mask_msix_vectors(idxd);
699         idxd_mask_error_interrupts(idxd);
700
701         for (i = 0; i < msixcnt; i++) {
702                 irq_entry = &idxd->irq_entries[i];
703                 synchronize_irq(irq_entry->vector);
704                 free_irq(irq_entry->vector, irq_entry);
705                 if (i == 0)
706                         continue;
707                 idxd_flush_pending_llist(irq_entry);
708                 idxd_flush_work_list(irq_entry);
709         }
710
711         idxd_msix_perm_clear(idxd);
712         idxd_release_int_handles(idxd);
713         pci_free_irq_vectors(pdev);
714         pci_iounmap(pdev, idxd->reg_base);
715         pci_disable_device(pdev);
716         destroy_workqueue(idxd->wq);
717 }
718
719 static void idxd_remove(struct pci_dev *pdev)
720 {
721         struct idxd_device *idxd = pci_get_drvdata(pdev);
722
723         dev_dbg(&pdev->dev, "%s called\n", __func__);
724         idxd_shutdown(pdev);
725         if (device_pasid_enabled(idxd))
726                 idxd_disable_system_pasid(idxd);
727         idxd_unregister_devices(idxd);
728         perfmon_pmu_remove(idxd);
729         iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
730 }
731
732 static struct pci_driver idxd_pci_driver = {
733         .name           = DRV_NAME,
734         .id_table       = idxd_pci_tbl,
735         .probe          = idxd_pci_probe,
736         .remove         = idxd_remove,
737         .shutdown       = idxd_shutdown,
738 };
739
740 static int __init idxd_init_module(void)
741 {
742         int err;
743
744         /*
745          * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
746          * enumerating the device. We can not utilize it.
747          */
748         if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
749                 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
750                 return -ENODEV;
751         }
752
753         if (!boot_cpu_has(X86_FEATURE_ENQCMD))
754                 pr_warn("Platform does not have ENQCMD(S) support.\n");
755         else
756                 support_enqcmd = true;
757
758         perfmon_init();
759
760         err = idxd_register_bus_type();
761         if (err < 0)
762                 return err;
763
764         err = idxd_register_driver();
765         if (err < 0)
766                 goto err_idxd_driver_register;
767
768         err = idxd_cdev_register();
769         if (err)
770                 goto err_cdev_register;
771
772         err = pci_register_driver(&idxd_pci_driver);
773         if (err)
774                 goto err_pci_register;
775
776         return 0;
777
778 err_pci_register:
779         idxd_cdev_remove();
780 err_cdev_register:
781         idxd_unregister_driver();
782 err_idxd_driver_register:
783         idxd_unregister_bus_type();
784         return err;
785 }
786 module_init(idxd_init_module);
787
788 static void __exit idxd_exit_module(void)
789 {
790         pci_unregister_driver(&idxd_pci_driver);
791         idxd_cdev_remove();
792         idxd_unregister_bus_type();
793         perfmon_exit();
794 }
795 module_exit(idxd_exit_module);