Merge tag 'phy-for-6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy
[platform/kernel/linux-rpi.git] / drivers / dma / idxd / device.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/io-64-nonatomic-lo-hi.h>
8 #include <linux/dmaengine.h>
9 #include <linux/irq.h>
10 #include <uapi/linux/idxd.h>
11 #include "../dmaengine.h"
12 #include "idxd.h"
13 #include "registers.h"
14
15 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
16                           u32 *status);
17 static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
18 static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
19
20 /* Interrupt control bits */
21 void idxd_unmask_error_interrupts(struct idxd_device *idxd)
22 {
23         union genctrl_reg genctrl;
24
25         genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
26         genctrl.softerr_int_en = 1;
27         genctrl.halt_int_en = 1;
28         iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
29 }
30
31 void idxd_mask_error_interrupts(struct idxd_device *idxd)
32 {
33         union genctrl_reg genctrl;
34
35         genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
36         genctrl.softerr_int_en = 0;
37         genctrl.halt_int_en = 0;
38         iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
39 }
40
41 static void free_hw_descs(struct idxd_wq *wq)
42 {
43         int i;
44
45         for (i = 0; i < wq->num_descs; i++)
46                 kfree(wq->hw_descs[i]);
47
48         kfree(wq->hw_descs);
49 }
50
51 static int alloc_hw_descs(struct idxd_wq *wq, int num)
52 {
53         struct device *dev = &wq->idxd->pdev->dev;
54         int i;
55         int node = dev_to_node(dev);
56
57         wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *),
58                                     GFP_KERNEL, node);
59         if (!wq->hw_descs)
60                 return -ENOMEM;
61
62         for (i = 0; i < num; i++) {
63                 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]),
64                                                GFP_KERNEL, node);
65                 if (!wq->hw_descs[i]) {
66                         free_hw_descs(wq);
67                         return -ENOMEM;
68                 }
69         }
70
71         return 0;
72 }
73
74 static void free_descs(struct idxd_wq *wq)
75 {
76         int i;
77
78         for (i = 0; i < wq->num_descs; i++)
79                 kfree(wq->descs[i]);
80
81         kfree(wq->descs);
82 }
83
84 static int alloc_descs(struct idxd_wq *wq, int num)
85 {
86         struct device *dev = &wq->idxd->pdev->dev;
87         int i;
88         int node = dev_to_node(dev);
89
90         wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *),
91                                  GFP_KERNEL, node);
92         if (!wq->descs)
93                 return -ENOMEM;
94
95         for (i = 0; i < num; i++) {
96                 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]),
97                                             GFP_KERNEL, node);
98                 if (!wq->descs[i]) {
99                         free_descs(wq);
100                         return -ENOMEM;
101                 }
102         }
103
104         return 0;
105 }
106
107 /* WQ control bits */
108 int idxd_wq_alloc_resources(struct idxd_wq *wq)
109 {
110         struct idxd_device *idxd = wq->idxd;
111         struct device *dev = &idxd->pdev->dev;
112         int rc, num_descs, i;
113
114         if (wq->type != IDXD_WQT_KERNEL)
115                 return 0;
116
117         num_descs = wq_dedicated(wq) ? wq->size : wq->threshold;
118         wq->num_descs = num_descs;
119
120         rc = alloc_hw_descs(wq, num_descs);
121         if (rc < 0)
122                 return rc;
123
124         wq->compls_size = num_descs * idxd->data->compl_size;
125         wq->compls = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr, GFP_KERNEL);
126         if (!wq->compls) {
127                 rc = -ENOMEM;
128                 goto fail_alloc_compls;
129         }
130
131         rc = alloc_descs(wq, num_descs);
132         if (rc < 0)
133                 goto fail_alloc_descs;
134
135         rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL,
136                                      dev_to_node(dev));
137         if (rc < 0)
138                 goto fail_sbitmap_init;
139
140         for (i = 0; i < num_descs; i++) {
141                 struct idxd_desc *desc = wq->descs[i];
142
143                 desc->hw = wq->hw_descs[i];
144                 if (idxd->data->type == IDXD_TYPE_DSA)
145                         desc->completion = &wq->compls[i];
146                 else if (idxd->data->type == IDXD_TYPE_IAX)
147                         desc->iax_completion = &wq->iax_compls[i];
148                 desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i;
149                 desc->id = i;
150                 desc->wq = wq;
151                 desc->cpu = -1;
152         }
153
154         return 0;
155
156  fail_sbitmap_init:
157         free_descs(wq);
158  fail_alloc_descs:
159         dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
160  fail_alloc_compls:
161         free_hw_descs(wq);
162         return rc;
163 }
164
165 void idxd_wq_free_resources(struct idxd_wq *wq)
166 {
167         struct device *dev = &wq->idxd->pdev->dev;
168
169         if (wq->type != IDXD_WQT_KERNEL)
170                 return;
171
172         free_hw_descs(wq);
173         free_descs(wq);
174         dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
175         sbitmap_queue_free(&wq->sbq);
176 }
177
178 int idxd_wq_enable(struct idxd_wq *wq)
179 {
180         struct idxd_device *idxd = wq->idxd;
181         struct device *dev = &idxd->pdev->dev;
182         u32 status;
183
184         if (wq->state == IDXD_WQ_ENABLED) {
185                 dev_dbg(dev, "WQ %d already enabled\n", wq->id);
186                 return 0;
187         }
188
189         idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status);
190
191         if (status != IDXD_CMDSTS_SUCCESS &&
192             status != IDXD_CMDSTS_ERR_WQ_ENABLED) {
193                 dev_dbg(dev, "WQ enable failed: %#x\n", status);
194                 return -ENXIO;
195         }
196
197         wq->state = IDXD_WQ_ENABLED;
198         set_bit(wq->id, idxd->wq_enable_map);
199         dev_dbg(dev, "WQ %d enabled\n", wq->id);
200         return 0;
201 }
202
203 int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
204 {
205         struct idxd_device *idxd = wq->idxd;
206         struct device *dev = &idxd->pdev->dev;
207         u32 status, operand;
208
209         dev_dbg(dev, "Disabling WQ %d\n", wq->id);
210
211         if (wq->state != IDXD_WQ_ENABLED) {
212                 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
213                 return 0;
214         }
215
216         operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
217         idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
218
219         if (status != IDXD_CMDSTS_SUCCESS) {
220                 dev_dbg(dev, "WQ disable failed: %#x\n", status);
221                 return -ENXIO;
222         }
223
224         if (reset_config)
225                 idxd_wq_disable_cleanup(wq);
226         clear_bit(wq->id, idxd->wq_enable_map);
227         wq->state = IDXD_WQ_DISABLED;
228         dev_dbg(dev, "WQ %d disabled\n", wq->id);
229         return 0;
230 }
231
232 void idxd_wq_drain(struct idxd_wq *wq)
233 {
234         struct idxd_device *idxd = wq->idxd;
235         struct device *dev = &idxd->pdev->dev;
236         u32 operand;
237
238         if (wq->state != IDXD_WQ_ENABLED) {
239                 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
240                 return;
241         }
242
243         dev_dbg(dev, "Draining WQ %d\n", wq->id);
244         operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
245         idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
246 }
247
248 void idxd_wq_reset(struct idxd_wq *wq)
249 {
250         struct idxd_device *idxd = wq->idxd;
251         struct device *dev = &idxd->pdev->dev;
252         u32 operand;
253
254         if (wq->state != IDXD_WQ_ENABLED) {
255                 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
256                 return;
257         }
258
259         operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
260         idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
261         idxd_wq_disable_cleanup(wq);
262 }
263
264 int idxd_wq_map_portal(struct idxd_wq *wq)
265 {
266         struct idxd_device *idxd = wq->idxd;
267         struct pci_dev *pdev = idxd->pdev;
268         struct device *dev = &pdev->dev;
269         resource_size_t start;
270
271         start = pci_resource_start(pdev, IDXD_WQ_BAR);
272         start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
273
274         wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
275         if (!wq->portal)
276                 return -ENOMEM;
277
278         return 0;
279 }
280
281 void idxd_wq_unmap_portal(struct idxd_wq *wq)
282 {
283         struct device *dev = &wq->idxd->pdev->dev;
284
285         devm_iounmap(dev, wq->portal);
286         wq->portal = NULL;
287         wq->portal_offset = 0;
288 }
289
290 void idxd_wqs_unmap_portal(struct idxd_device *idxd)
291 {
292         int i;
293
294         for (i = 0; i < idxd->max_wqs; i++) {
295                 struct idxd_wq *wq = idxd->wqs[i];
296
297                 if (wq->portal)
298                         idxd_wq_unmap_portal(wq);
299         }
300 }
301
302 static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
303 {
304         struct idxd_device *idxd = wq->idxd;
305         union wqcfg wqcfg;
306         unsigned int offset;
307
308         offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
309         spin_lock(&idxd->dev_lock);
310         wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
311         wqcfg.pasid_en = 1;
312         wqcfg.pasid = pasid;
313         wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX];
314         iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
315         spin_unlock(&idxd->dev_lock);
316 }
317
318 int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
319 {
320         int rc;
321
322         rc = idxd_wq_disable(wq, false);
323         if (rc < 0)
324                 return rc;
325
326         __idxd_wq_set_pasid_locked(wq, pasid);
327
328         rc = idxd_wq_enable(wq);
329         if (rc < 0)
330                 return rc;
331
332         return 0;
333 }
334
335 int idxd_wq_disable_pasid(struct idxd_wq *wq)
336 {
337         struct idxd_device *idxd = wq->idxd;
338         int rc;
339         union wqcfg wqcfg;
340         unsigned int offset;
341
342         rc = idxd_wq_disable(wq, false);
343         if (rc < 0)
344                 return rc;
345
346         offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
347         spin_lock(&idxd->dev_lock);
348         wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
349         wqcfg.pasid_en = 0;
350         wqcfg.pasid = 0;
351         iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
352         spin_unlock(&idxd->dev_lock);
353
354         rc = idxd_wq_enable(wq);
355         if (rc < 0)
356                 return rc;
357
358         return 0;
359 }
360
361 static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
362 {
363         struct idxd_device *idxd = wq->idxd;
364
365         lockdep_assert_held(&wq->wq_lock);
366         wq->state = IDXD_WQ_DISABLED;
367         memset(wq->wqcfg, 0, idxd->wqcfg_size);
368         wq->type = IDXD_WQT_NONE;
369         wq->threshold = 0;
370         wq->priority = 0;
371         wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
372         wq->flags = 0;
373         memset(wq->name, 0, WQ_NAME_SIZE);
374         wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
375         idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
376         if (wq->opcap_bmap)
377                 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
378 }
379
380 static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
381 {
382         lockdep_assert_held(&wq->wq_lock);
383
384         wq->size = 0;
385         wq->group = NULL;
386 }
387
388 static void idxd_wq_ref_release(struct percpu_ref *ref)
389 {
390         struct idxd_wq *wq = container_of(ref, struct idxd_wq, wq_active);
391
392         complete(&wq->wq_dead);
393 }
394
395 int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
396 {
397         int rc;
398
399         memset(&wq->wq_active, 0, sizeof(wq->wq_active));
400         rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release,
401                              PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
402         if (rc < 0)
403                 return rc;
404         reinit_completion(&wq->wq_dead);
405         reinit_completion(&wq->wq_resurrect);
406         return 0;
407 }
408
409 void __idxd_wq_quiesce(struct idxd_wq *wq)
410 {
411         lockdep_assert_held(&wq->wq_lock);
412         reinit_completion(&wq->wq_resurrect);
413         percpu_ref_kill(&wq->wq_active);
414         complete_all(&wq->wq_resurrect);
415         wait_for_completion(&wq->wq_dead);
416 }
417
418 void idxd_wq_quiesce(struct idxd_wq *wq)
419 {
420         mutex_lock(&wq->wq_lock);
421         __idxd_wq_quiesce(wq);
422         mutex_unlock(&wq->wq_lock);
423 }
424
425 /* Device control bits */
426 static inline bool idxd_is_enabled(struct idxd_device *idxd)
427 {
428         union gensts_reg gensts;
429
430         gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
431
432         if (gensts.state == IDXD_DEVICE_STATE_ENABLED)
433                 return true;
434         return false;
435 }
436
437 static inline bool idxd_device_is_halted(struct idxd_device *idxd)
438 {
439         union gensts_reg gensts;
440
441         gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
442
443         return (gensts.state == IDXD_DEVICE_STATE_HALT);
444 }
445
446 /*
447  * This is function is only used for reset during probe and will
448  * poll for completion. Once the device is setup with interrupts,
449  * all commands will be done via interrupt completion.
450  */
451 int idxd_device_init_reset(struct idxd_device *idxd)
452 {
453         struct device *dev = &idxd->pdev->dev;
454         union idxd_command_reg cmd;
455
456         if (idxd_device_is_halted(idxd)) {
457                 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
458                 return -ENXIO;
459         }
460
461         memset(&cmd, 0, sizeof(cmd));
462         cmd.cmd = IDXD_CMD_RESET_DEVICE;
463         dev_dbg(dev, "%s: sending reset for init.\n", __func__);
464         spin_lock(&idxd->cmd_lock);
465         iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
466
467         while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) &
468                IDXD_CMDSTS_ACTIVE)
469                 cpu_relax();
470         spin_unlock(&idxd->cmd_lock);
471         return 0;
472 }
473
474 static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
475                           u32 *status)
476 {
477         union idxd_command_reg cmd;
478         DECLARE_COMPLETION_ONSTACK(done);
479         u32 stat;
480
481         if (idxd_device_is_halted(idxd)) {
482                 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
483                 if (status)
484                         *status = IDXD_CMDSTS_HW_ERR;
485                 return;
486         }
487
488         memset(&cmd, 0, sizeof(cmd));
489         cmd.cmd = cmd_code;
490         cmd.operand = operand;
491         cmd.int_req = 1;
492
493         spin_lock(&idxd->cmd_lock);
494         wait_event_lock_irq(idxd->cmd_waitq,
495                             !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
496                             idxd->cmd_lock);
497
498         dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n",
499                 __func__, cmd_code, operand);
500
501         idxd->cmd_status = 0;
502         __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
503         idxd->cmd_done = &done;
504         iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
505
506         /*
507          * After command submitted, release lock and go to sleep until
508          * the command completes via interrupt.
509          */
510         spin_unlock(&idxd->cmd_lock);
511         wait_for_completion(&done);
512         stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
513         spin_lock(&idxd->cmd_lock);
514         if (status)
515                 *status = stat;
516         idxd->cmd_status = stat & GENMASK(7, 0);
517
518         __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags);
519         /* Wake up other pending commands */
520         wake_up(&idxd->cmd_waitq);
521         spin_unlock(&idxd->cmd_lock);
522 }
523
524 int idxd_device_enable(struct idxd_device *idxd)
525 {
526         struct device *dev = &idxd->pdev->dev;
527         u32 status;
528
529         if (idxd_is_enabled(idxd)) {
530                 dev_dbg(dev, "Device already enabled\n");
531                 return -ENXIO;
532         }
533
534         idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_DEVICE, 0, &status);
535
536         /* If the command is successful or if the device was enabled */
537         if (status != IDXD_CMDSTS_SUCCESS &&
538             status != IDXD_CMDSTS_ERR_DEV_ENABLED) {
539                 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
540                 return -ENXIO;
541         }
542
543         idxd->state = IDXD_DEV_ENABLED;
544         return 0;
545 }
546
547 int idxd_device_disable(struct idxd_device *idxd)
548 {
549         struct device *dev = &idxd->pdev->dev;
550         u32 status;
551
552         if (!idxd_is_enabled(idxd)) {
553                 dev_dbg(dev, "Device is not enabled\n");
554                 return 0;
555         }
556
557         idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_DEVICE, 0, &status);
558
559         /* If the command is successful or if the device was disabled */
560         if (status != IDXD_CMDSTS_SUCCESS &&
561             !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) {
562                 dev_dbg(dev, "%s: err_code: %#x\n", __func__, status);
563                 return -ENXIO;
564         }
565
566         idxd_device_clear_state(idxd);
567         return 0;
568 }
569
570 void idxd_device_reset(struct idxd_device *idxd)
571 {
572         idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
573         idxd_device_clear_state(idxd);
574         spin_lock(&idxd->dev_lock);
575         idxd_unmask_error_interrupts(idxd);
576         spin_unlock(&idxd->dev_lock);
577 }
578
579 void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
580 {
581         struct device *dev = &idxd->pdev->dev;
582         u32 operand;
583
584         operand = pasid;
585         dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
586         idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
587         dev_dbg(dev, "pasid %d drained\n", pasid);
588 }
589
590 int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
591                                    enum idxd_interrupt_type irq_type)
592 {
593         struct device *dev = &idxd->pdev->dev;
594         u32 operand, status;
595
596         if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)))
597                 return -EOPNOTSUPP;
598
599         dev_dbg(dev, "get int handle, idx %d\n", idx);
600
601         operand = idx & GENMASK(15, 0);
602         if (irq_type == IDXD_IRQ_IMS)
603                 operand |= CMD_INT_HANDLE_IMS;
604
605         dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_REQUEST_INT_HANDLE, operand);
606
607         idxd_cmd_exec(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand, &status);
608
609         if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
610                 dev_dbg(dev, "request int handle failed: %#x\n", status);
611                 return -ENXIO;
612         }
613
614         *handle = (status >> IDXD_CMDSTS_RES_SHIFT) & GENMASK(15, 0);
615
616         dev_dbg(dev, "int handle acquired: %u\n", *handle);
617         return 0;
618 }
619
620 int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
621                                    enum idxd_interrupt_type irq_type)
622 {
623         struct device *dev = &idxd->pdev->dev;
624         u32 operand, status;
625         union idxd_command_reg cmd;
626
627         if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)))
628                 return -EOPNOTSUPP;
629
630         dev_dbg(dev, "release int handle, handle %d\n", handle);
631
632         memset(&cmd, 0, sizeof(cmd));
633         operand = handle & GENMASK(15, 0);
634
635         if (irq_type == IDXD_IRQ_IMS)
636                 operand |= CMD_INT_HANDLE_IMS;
637
638         cmd.cmd = IDXD_CMD_RELEASE_INT_HANDLE;
639         cmd.operand = operand;
640
641         dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_RELEASE_INT_HANDLE, operand);
642
643         spin_lock(&idxd->cmd_lock);
644         iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET);
645
646         while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE)
647                 cpu_relax();
648         status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
649         spin_unlock(&idxd->cmd_lock);
650
651         if ((status & IDXD_CMDSTS_ERR_MASK) != IDXD_CMDSTS_SUCCESS) {
652                 dev_dbg(dev, "release int handle failed: %#x\n", status);
653                 return -ENXIO;
654         }
655
656         dev_dbg(dev, "int handle released.\n");
657         return 0;
658 }
659
660 /* Device configuration bits */
661 static void idxd_engines_clear_state(struct idxd_device *idxd)
662 {
663         struct idxd_engine *engine;
664         int i;
665
666         lockdep_assert_held(&idxd->dev_lock);
667         for (i = 0; i < idxd->max_engines; i++) {
668                 engine = idxd->engines[i];
669                 engine->group = NULL;
670         }
671 }
672
673 static void idxd_groups_clear_state(struct idxd_device *idxd)
674 {
675         struct idxd_group *group;
676         int i;
677
678         lockdep_assert_held(&idxd->dev_lock);
679         for (i = 0; i < idxd->max_groups; i++) {
680                 group = idxd->groups[i];
681                 memset(&group->grpcfg, 0, sizeof(group->grpcfg));
682                 group->num_engines = 0;
683                 group->num_wqs = 0;
684                 group->use_rdbuf_limit = false;
685                 /*
686                  * The default value is the same as the value of
687                  * total read buffers in GRPCAP.
688                  */
689                 group->rdbufs_allowed = idxd->max_rdbufs;
690                 group->rdbufs_reserved = 0;
691                 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
692                         group->tc_a = 1;
693                         group->tc_b = 1;
694                 } else {
695                         group->tc_a = -1;
696                         group->tc_b = -1;
697                 }
698                 group->desc_progress_limit = 0;
699                 group->batch_progress_limit = 0;
700         }
701 }
702
703 static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
704 {
705         int i;
706
707         for (i = 0; i < idxd->max_wqs; i++) {
708                 struct idxd_wq *wq = idxd->wqs[i];
709
710                 mutex_lock(&wq->wq_lock);
711                 idxd_wq_disable_cleanup(wq);
712                 idxd_wq_device_reset_cleanup(wq);
713                 mutex_unlock(&wq->wq_lock);
714         }
715 }
716
717 void idxd_device_clear_state(struct idxd_device *idxd)
718 {
719         /* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */
720         if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
721                 /*
722                  * Clearing wq state is protected by wq lock.
723                  * So no need to be protected by device lock.
724                  */
725                 idxd_device_wqs_clear_state(idxd);
726
727                 spin_lock(&idxd->dev_lock);
728                 idxd_groups_clear_state(idxd);
729                 idxd_engines_clear_state(idxd);
730         } else {
731                 spin_lock(&idxd->dev_lock);
732         }
733
734         idxd->state = IDXD_DEV_DISABLED;
735         spin_unlock(&idxd->dev_lock);
736 }
737
738 static int idxd_device_evl_setup(struct idxd_device *idxd)
739 {
740         union gencfg_reg gencfg;
741         union evlcfg_reg evlcfg;
742         union genctrl_reg genctrl;
743         struct device *dev = &idxd->pdev->dev;
744         void *addr;
745         dma_addr_t dma_addr;
746         int size;
747         struct idxd_evl *evl = idxd->evl;
748         unsigned long *bmap;
749         int rc;
750
751         if (!evl)
752                 return 0;
753
754         size = evl_size(idxd);
755
756         bmap = bitmap_zalloc(size, GFP_KERNEL);
757         if (!bmap) {
758                 rc = -ENOMEM;
759                 goto err_bmap;
760         }
761
762         /*
763          * Address needs to be page aligned. However, dma_alloc_coherent() provides
764          * at minimal page size aligned address. No manual alignment required.
765          */
766         addr = dma_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
767         if (!addr) {
768                 rc = -ENOMEM;
769                 goto err_alloc;
770         }
771
772         memset(addr, 0, size);
773
774         spin_lock(&evl->lock);
775         evl->log = addr;
776         evl->dma = dma_addr;
777         evl->log_size = size;
778         evl->bmap = bmap;
779
780         memset(&evlcfg, 0, sizeof(evlcfg));
781         evlcfg.bits[0] = dma_addr & GENMASK(63, 12);
782         evlcfg.size = evl->size;
783
784         iowrite64(evlcfg.bits[0], idxd->reg_base + IDXD_EVLCFG_OFFSET);
785         iowrite64(evlcfg.bits[1], idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
786
787         genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
788         genctrl.evl_int_en = 1;
789         iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
790
791         gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
792         gencfg.evl_en = 1;
793         iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
794
795         spin_unlock(&evl->lock);
796         return 0;
797
798 err_alloc:
799         bitmap_free(bmap);
800 err_bmap:
801         return rc;
802 }
803
804 static void idxd_device_evl_free(struct idxd_device *idxd)
805 {
806         union gencfg_reg gencfg;
807         union genctrl_reg genctrl;
808         struct device *dev = &idxd->pdev->dev;
809         struct idxd_evl *evl = idxd->evl;
810
811         gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
812         if (!gencfg.evl_en)
813                 return;
814
815         spin_lock(&evl->lock);
816         gencfg.evl_en = 0;
817         iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
818
819         genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
820         genctrl.evl_int_en = 0;
821         iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
822
823         iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
824         iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
825
826         dma_free_coherent(dev, evl->log_size, evl->log, evl->dma);
827         bitmap_free(evl->bmap);
828         evl->log = NULL;
829         evl->size = IDXD_EVL_SIZE_MIN;
830         spin_unlock(&evl->lock);
831 }
832
833 static void idxd_group_config_write(struct idxd_group *group)
834 {
835         struct idxd_device *idxd = group->idxd;
836         struct device *dev = &idxd->pdev->dev;
837         int i;
838         u32 grpcfg_offset;
839
840         dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
841
842         /* setup GRPWQCFG */
843         for (i = 0; i < GRPWQCFG_STRIDES; i++) {
844                 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
845                 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
846                 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
847                         group->id, i, grpcfg_offset,
848                         ioread64(idxd->reg_base + grpcfg_offset));
849         }
850
851         /* setup GRPENGCFG */
852         grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
853         iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
854         dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
855                 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
856
857         /* setup GRPFLAGS */
858         grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
859         iowrite64(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
860         dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
861                 group->id, grpcfg_offset,
862                 ioread64(idxd->reg_base + grpcfg_offset));
863 }
864
865 static int idxd_groups_config_write(struct idxd_device *idxd)
866
867 {
868         union gencfg_reg reg;
869         int i;
870         struct device *dev = &idxd->pdev->dev;
871
872         /* Setup bandwidth rdbuf limit */
873         if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) {
874                 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
875                 reg.rdbuf_limit = idxd->rdbuf_limit;
876                 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
877         }
878
879         dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET,
880                 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET));
881
882         for (i = 0; i < idxd->max_groups; i++) {
883                 struct idxd_group *group = idxd->groups[i];
884
885                 idxd_group_config_write(group);
886         }
887
888         return 0;
889 }
890
891 static bool idxd_device_pasid_priv_enabled(struct idxd_device *idxd)
892 {
893         struct pci_dev *pdev = idxd->pdev;
894
895         if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV))
896                 return true;
897         return false;
898 }
899
900 static int idxd_wq_config_write(struct idxd_wq *wq)
901 {
902         struct idxd_device *idxd = wq->idxd;
903         struct device *dev = &idxd->pdev->dev;
904         u32 wq_offset;
905         int i, n;
906
907         if (!wq->group)
908                 return 0;
909
910         /*
911          * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
912          * wq reset. This will copy back the sticky values that are present on some devices.
913          */
914         for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
915                 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
916                 wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset);
917         }
918
919         if (wq->size == 0 && wq->type != IDXD_WQT_NONE)
920                 wq->size = WQ_DEFAULT_QUEUE_DEPTH;
921
922         /* byte 0-3 */
923         wq->wqcfg->wq_size = wq->size;
924
925         /* bytes 4-7 */
926         wq->wqcfg->wq_thresh = wq->threshold;
927
928         /* byte 8-11 */
929         if (wq_dedicated(wq))
930                 wq->wqcfg->mode = 1;
931
932         /*
933          * The WQ priv bit is set depending on the WQ type. priv = 1 if the
934          * WQ type is kernel to indicate privileged access. This setting only
935          * matters for dedicated WQ. According to the DSA spec:
936          * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the
937          * Privileged Mode Enable field of the PCI Express PASID capability
938          * is 0, this field must be 0.
939          *
940          * In the case of a dedicated kernel WQ that is not able to support
941          * the PASID cap, then the configuration will be rejected.
942          */
943         if (wq_dedicated(wq) && wq->wqcfg->pasid_en &&
944             !idxd_device_pasid_priv_enabled(idxd) &&
945             wq->type == IDXD_WQT_KERNEL) {
946                 idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV;
947                 return -EOPNOTSUPP;
948         }
949
950         wq->wqcfg->priority = wq->priority;
951
952         if (idxd->hw.gen_cap.block_on_fault &&
953             test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags) &&
954             !test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags))
955                 wq->wqcfg->bof = 1;
956
957         if (idxd->hw.wq_cap.wq_ats_support)
958                 wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
959
960         if (idxd->hw.wq_cap.wq_prs_support)
961                 wq->wqcfg->wq_prs_disable = test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
962
963         /* bytes 12-15 */
964         wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
965         idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
966
967         /* bytes 32-63 */
968         if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) {
969                 memset(wq->wqcfg->op_config, 0, IDXD_MAX_OPCAP_BITS / 8);
970                 for_each_set_bit(n, wq->opcap_bmap, IDXD_MAX_OPCAP_BITS) {
971                         int pos = n % BITS_PER_LONG_LONG;
972                         int idx = n / BITS_PER_LONG_LONG;
973
974                         wq->wqcfg->op_config[idx] |= BIT(pos);
975                 }
976         }
977
978         dev_dbg(dev, "WQ %d CFGs\n", wq->id);
979         for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
980                 wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
981                 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset);
982                 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
983                         wq->id, i, wq_offset,
984                         ioread32(idxd->reg_base + wq_offset));
985         }
986
987         return 0;
988 }
989
990 static int idxd_wqs_config_write(struct idxd_device *idxd)
991 {
992         int i, rc;
993
994         for (i = 0; i < idxd->max_wqs; i++) {
995                 struct idxd_wq *wq = idxd->wqs[i];
996
997                 rc = idxd_wq_config_write(wq);
998                 if (rc < 0)
999                         return rc;
1000         }
1001
1002         return 0;
1003 }
1004
1005 static void idxd_group_flags_setup(struct idxd_device *idxd)
1006 {
1007         int i;
1008
1009         /* TC-A 0 and TC-B 1 should be defaults */
1010         for (i = 0; i < idxd->max_groups; i++) {
1011                 struct idxd_group *group = idxd->groups[i];
1012
1013                 if (group->tc_a == -1)
1014                         group->tc_a = group->grpcfg.flags.tc_a = 0;
1015                 else
1016                         group->grpcfg.flags.tc_a = group->tc_a;
1017                 if (group->tc_b == -1)
1018                         group->tc_b = group->grpcfg.flags.tc_b = 1;
1019                 else
1020                         group->grpcfg.flags.tc_b = group->tc_b;
1021                 group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
1022                 group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
1023                 group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
1024                 group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit;
1025                 group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit;
1026         }
1027 }
1028
1029 static int idxd_engines_setup(struct idxd_device *idxd)
1030 {
1031         int i, engines = 0;
1032         struct idxd_engine *eng;
1033         struct idxd_group *group;
1034
1035         for (i = 0; i < idxd->max_groups; i++) {
1036                 group = idxd->groups[i];
1037                 group->grpcfg.engines = 0;
1038         }
1039
1040         for (i = 0; i < idxd->max_engines; i++) {
1041                 eng = idxd->engines[i];
1042                 group = eng->group;
1043
1044                 if (!group)
1045                         continue;
1046
1047                 group->grpcfg.engines |= BIT(eng->id);
1048                 engines++;
1049         }
1050
1051         if (!engines)
1052                 return -EINVAL;
1053
1054         return 0;
1055 }
1056
1057 static int idxd_wqs_setup(struct idxd_device *idxd)
1058 {
1059         struct idxd_wq *wq;
1060         struct idxd_group *group;
1061         int i, j, configured = 0;
1062         struct device *dev = &idxd->pdev->dev;
1063
1064         for (i = 0; i < idxd->max_groups; i++) {
1065                 group = idxd->groups[i];
1066                 for (j = 0; j < 4; j++)
1067                         group->grpcfg.wqs[j] = 0;
1068         }
1069
1070         for (i = 0; i < idxd->max_wqs; i++) {
1071                 wq = idxd->wqs[i];
1072                 group = wq->group;
1073
1074                 if (!wq->group)
1075                         continue;
1076
1077                 if (wq_shared(wq) && !wq_shared_supported(wq)) {
1078                         idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
1079                         dev_warn(dev, "No shared wq support but configured.\n");
1080                         return -EINVAL;
1081                 }
1082
1083                 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64);
1084                 configured++;
1085         }
1086
1087         if (configured == 0) {
1088                 idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED;
1089                 return -EINVAL;
1090         }
1091
1092         return 0;
1093 }
1094
1095 int idxd_device_config(struct idxd_device *idxd)
1096 {
1097         int rc;
1098
1099         lockdep_assert_held(&idxd->dev_lock);
1100         rc = idxd_wqs_setup(idxd);
1101         if (rc < 0)
1102                 return rc;
1103
1104         rc = idxd_engines_setup(idxd);
1105         if (rc < 0)
1106                 return rc;
1107
1108         idxd_group_flags_setup(idxd);
1109
1110         rc = idxd_wqs_config_write(idxd);
1111         if (rc < 0)
1112                 return rc;
1113
1114         rc = idxd_groups_config_write(idxd);
1115         if (rc < 0)
1116                 return rc;
1117
1118         return 0;
1119 }
1120
1121 static int idxd_wq_load_config(struct idxd_wq *wq)
1122 {
1123         struct idxd_device *idxd = wq->idxd;
1124         struct device *dev = &idxd->pdev->dev;
1125         int wqcfg_offset;
1126         int i;
1127
1128         wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0);
1129         memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size);
1130
1131         wq->size = wq->wqcfg->wq_size;
1132         wq->threshold = wq->wqcfg->wq_thresh;
1133
1134         /* The driver does not support shared WQ mode in read-only config yet */
1135         if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en)
1136                 return -EOPNOTSUPP;
1137
1138         set_bit(WQ_FLAG_DEDICATED, &wq->flags);
1139
1140         wq->priority = wq->wqcfg->priority;
1141
1142         wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift;
1143         idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift);
1144
1145         for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
1146                 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i);
1147                 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]);
1148         }
1149
1150         return 0;
1151 }
1152
1153 static void idxd_group_load_config(struct idxd_group *group)
1154 {
1155         struct idxd_device *idxd = group->idxd;
1156         struct device *dev = &idxd->pdev->dev;
1157         int i, j, grpcfg_offset;
1158
1159         /*
1160          * Load WQS bit fields
1161          * Iterate through all 256 bits 64 bits at a time
1162          */
1163         for (i = 0; i < GRPWQCFG_STRIDES; i++) {
1164                 struct idxd_wq *wq;
1165
1166                 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
1167                 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset);
1168                 dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
1169                         group->id, i, grpcfg_offset, group->grpcfg.wqs[i]);
1170
1171                 if (i * 64 >= idxd->max_wqs)
1172                         break;
1173
1174                 /* Iterate through all 64 bits and check for wq set */
1175                 for (j = 0; j < 64; j++) {
1176                         int id = i * 64 + j;
1177
1178                         /* No need to check beyond max wqs */
1179                         if (id >= idxd->max_wqs)
1180                                 break;
1181
1182                         /* Set group assignment for wq if wq bit is set */
1183                         if (group->grpcfg.wqs[i] & BIT(j)) {
1184                                 wq = idxd->wqs[id];
1185                                 wq->group = group;
1186                         }
1187                 }
1188         }
1189
1190         grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
1191         group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset);
1192         dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
1193                 grpcfg_offset, group->grpcfg.engines);
1194
1195         /* Iterate through all 64 bits to check engines set */
1196         for (i = 0; i < 64; i++) {
1197                 if (i >= idxd->max_engines)
1198                         break;
1199
1200                 if (group->grpcfg.engines & BIT(i)) {
1201                         struct idxd_engine *engine = idxd->engines[i];
1202
1203                         engine->group = group;
1204                 }
1205         }
1206
1207         grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
1208         group->grpcfg.flags.bits = ioread64(idxd->reg_base + grpcfg_offset);
1209         dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#llx\n",
1210                 group->id, grpcfg_offset, group->grpcfg.flags.bits);
1211 }
1212
1213 int idxd_device_load_config(struct idxd_device *idxd)
1214 {
1215         union gencfg_reg reg;
1216         int i, rc;
1217
1218         reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
1219         idxd->rdbuf_limit = reg.rdbuf_limit;
1220
1221         for (i = 0; i < idxd->max_groups; i++) {
1222                 struct idxd_group *group = idxd->groups[i];
1223
1224                 idxd_group_load_config(group);
1225         }
1226
1227         for (i = 0; i < idxd->max_wqs; i++) {
1228                 struct idxd_wq *wq = idxd->wqs[i];
1229
1230                 rc = idxd_wq_load_config(wq);
1231                 if (rc < 0)
1232                         return rc;
1233         }
1234
1235         return 0;
1236 }
1237
1238 static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
1239 {
1240         struct idxd_desc *desc, *itr;
1241         struct llist_node *head;
1242         LIST_HEAD(flist);
1243         enum idxd_complete_type ctype;
1244
1245         spin_lock(&ie->list_lock);
1246         head = llist_del_all(&ie->pending_llist);
1247         if (head) {
1248                 llist_for_each_entry_safe(desc, itr, head, llnode)
1249                         list_add_tail(&desc->list, &ie->work_list);
1250         }
1251
1252         list_for_each_entry_safe(desc, itr, &ie->work_list, list)
1253                 list_move_tail(&desc->list, &flist);
1254         spin_unlock(&ie->list_lock);
1255
1256         list_for_each_entry_safe(desc, itr, &flist, list) {
1257                 struct dma_async_tx_descriptor *tx;
1258
1259                 list_del(&desc->list);
1260                 ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
1261                 /*
1262                  * wq is being disabled. Any remaining descriptors are
1263                  * likely to be stuck and can be dropped. callback could
1264                  * point to code that is no longer accessible, for example
1265                  * if dmatest module has been unloaded.
1266                  */
1267                 tx = &desc->txd;
1268                 tx->callback = NULL;
1269                 tx->callback_result = NULL;
1270                 idxd_dma_complete_txd(desc, ctype, true);
1271         }
1272 }
1273
1274 static void idxd_device_set_perm_entry(struct idxd_device *idxd,
1275                                        struct idxd_irq_entry *ie)
1276 {
1277         union msix_perm mperm;
1278
1279         if (ie->pasid == IOMMU_PASID_INVALID)
1280                 return;
1281
1282         mperm.bits = 0;
1283         mperm.pasid = ie->pasid;
1284         mperm.pasid_en = 1;
1285         iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1286 }
1287
1288 static void idxd_device_clear_perm_entry(struct idxd_device *idxd,
1289                                          struct idxd_irq_entry *ie)
1290 {
1291         iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
1292 }
1293
1294 void idxd_wq_free_irq(struct idxd_wq *wq)
1295 {
1296         struct idxd_device *idxd = wq->idxd;
1297         struct idxd_irq_entry *ie = &wq->ie;
1298
1299         if (wq->type != IDXD_WQT_KERNEL)
1300                 return;
1301
1302         free_irq(ie->vector, ie);
1303         idxd_flush_pending_descs(ie);
1304         if (idxd->request_int_handles)
1305                 idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
1306         idxd_device_clear_perm_entry(idxd, ie);
1307         ie->vector = -1;
1308         ie->int_handle = INVALID_INT_HANDLE;
1309         ie->pasid = IOMMU_PASID_INVALID;
1310 }
1311
1312 int idxd_wq_request_irq(struct idxd_wq *wq)
1313 {
1314         struct idxd_device *idxd = wq->idxd;
1315         struct pci_dev *pdev = idxd->pdev;
1316         struct device *dev = &pdev->dev;
1317         struct idxd_irq_entry *ie;
1318         int rc;
1319
1320         if (wq->type != IDXD_WQT_KERNEL)
1321                 return 0;
1322
1323         ie = &wq->ie;
1324         ie->vector = pci_irq_vector(pdev, ie->id);
1325         ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : IOMMU_PASID_INVALID;
1326         idxd_device_set_perm_entry(idxd, ie);
1327
1328         rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
1329         if (rc < 0) {
1330                 dev_err(dev, "Failed to request irq %d.\n", ie->vector);
1331                 goto err_irq;
1332         }
1333
1334         if (idxd->request_int_handles) {
1335                 rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle,
1336                                                     IDXD_IRQ_MSIX);
1337                 if (rc < 0)
1338                         goto err_int_handle;
1339         } else {
1340                 ie->int_handle = ie->id;
1341         }
1342
1343         return 0;
1344
1345 err_int_handle:
1346         ie->int_handle = INVALID_INT_HANDLE;
1347         free_irq(ie->vector, ie);
1348 err_irq:
1349         idxd_device_clear_perm_entry(idxd, ie);
1350         ie->pasid = IOMMU_PASID_INVALID;
1351         return rc;
1352 }
1353
1354 int drv_enable_wq(struct idxd_wq *wq)
1355 {
1356         struct idxd_device *idxd = wq->idxd;
1357         struct device *dev = &idxd->pdev->dev;
1358         int rc = -ENXIO;
1359
1360         lockdep_assert_held(&wq->wq_lock);
1361
1362         if (idxd->state != IDXD_DEV_ENABLED) {
1363                 idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED;
1364                 goto err;
1365         }
1366
1367         if (wq->state != IDXD_WQ_DISABLED) {
1368                 dev_dbg(dev, "wq %d already enabled.\n", wq->id);
1369                 idxd->cmd_status = IDXD_SCMD_WQ_ENABLED;
1370                 rc = -EBUSY;
1371                 goto err;
1372         }
1373
1374         if (!wq->group) {
1375                 dev_dbg(dev, "wq %d not attached to group.\n", wq->id);
1376                 idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP;
1377                 goto err;
1378         }
1379
1380         if (strlen(wq->name) == 0) {
1381                 idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME;
1382                 dev_dbg(dev, "wq %d name not set.\n", wq->id);
1383                 goto err;
1384         }
1385
1386         /* Shared WQ checks */
1387         if (wq_shared(wq)) {
1388                 if (!wq_shared_supported(wq)) {
1389                         idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM;
1390                         dev_dbg(dev, "PASID not enabled and shared wq.\n");
1391                         goto err;
1392                 }
1393                 /*
1394                  * Shared wq with the threshold set to 0 means the user
1395                  * did not set the threshold or transitioned from a
1396                  * dedicated wq but did not set threshold. A value
1397                  * of 0 would effectively disable the shared wq. The
1398                  * driver does not allow a value of 0 to be set for
1399                  * threshold via sysfs.
1400                  */
1401                 if (wq->threshold == 0) {
1402                         idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH;
1403                         dev_dbg(dev, "Shared wq and threshold 0.\n");
1404                         goto err;
1405                 }
1406         }
1407
1408         /*
1409          * In the event that the WQ is configurable for pasid, the driver
1410          * should setup the pasid, pasid_en bit. This is true for both kernel
1411          * and user shared workqueues. There is no need to setup priv bit in
1412          * that in-kernel DMA will also do user privileged requests.
1413          * A dedicated wq that is not 'kernel' type will configure pasid and
1414          * pasid_en later on so there is no need to setup.
1415          */
1416         if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
1417                 if (wq_pasid_enabled(wq)) {
1418                         if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
1419                                 u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
1420
1421                                 __idxd_wq_set_pasid_locked(wq, pasid);
1422                         }
1423                 }
1424         }
1425
1426         rc = 0;
1427         spin_lock(&idxd->dev_lock);
1428         if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1429                 rc = idxd_device_config(idxd);
1430         spin_unlock(&idxd->dev_lock);
1431         if (rc < 0) {
1432                 dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc);
1433                 goto err;
1434         }
1435
1436         rc = idxd_wq_enable(wq);
1437         if (rc < 0) {
1438                 dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc);
1439                 goto err;
1440         }
1441
1442         rc = idxd_wq_map_portal(wq);
1443         if (rc < 0) {
1444                 idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR;
1445                 dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc);
1446                 goto err_map_portal;
1447         }
1448
1449         wq->client_count = 0;
1450
1451         rc = idxd_wq_request_irq(wq);
1452         if (rc < 0) {
1453                 idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
1454                 dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
1455                 goto err_irq;
1456         }
1457
1458         rc = idxd_wq_alloc_resources(wq);
1459         if (rc < 0) {
1460                 idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR;
1461                 dev_dbg(dev, "WQ resource alloc failed\n");
1462                 goto err_res_alloc;
1463         }
1464
1465         rc = idxd_wq_init_percpu_ref(wq);
1466         if (rc < 0) {
1467                 idxd->cmd_status = IDXD_SCMD_PERCPU_ERR;
1468                 dev_dbg(dev, "percpu_ref setup failed\n");
1469                 goto err_ref;
1470         }
1471
1472         return 0;
1473
1474 err_ref:
1475         idxd_wq_free_resources(wq);
1476 err_res_alloc:
1477         idxd_wq_free_irq(wq);
1478 err_irq:
1479         idxd_wq_unmap_portal(wq);
1480 err_map_portal:
1481         if (idxd_wq_disable(wq, false))
1482                 dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
1483 err:
1484         return rc;
1485 }
1486
1487 void drv_disable_wq(struct idxd_wq *wq)
1488 {
1489         struct idxd_device *idxd = wq->idxd;
1490         struct device *dev = &idxd->pdev->dev;
1491
1492         lockdep_assert_held(&wq->wq_lock);
1493
1494         if (idxd_wq_refcount(wq))
1495                 dev_warn(dev, "Clients has claim on wq %d: %d\n",
1496                          wq->id, idxd_wq_refcount(wq));
1497
1498         idxd_wq_unmap_portal(wq);
1499         idxd_wq_drain(wq);
1500         idxd_wq_free_irq(wq);
1501         idxd_wq_reset(wq);
1502         idxd_wq_free_resources(wq);
1503         percpu_ref_exit(&wq->wq_active);
1504         wq->type = IDXD_WQT_NONE;
1505         wq->client_count = 0;
1506 }
1507
1508 int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
1509 {
1510         struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1511         int rc = 0;
1512
1513         /*
1514          * Device should be in disabled state for the idxd_drv to load. If it's in
1515          * enabled state, then the device was altered outside of driver's control.
1516          * If the state is in halted state, then we don't want to proceed.
1517          */
1518         if (idxd->state != IDXD_DEV_DISABLED) {
1519                 idxd->cmd_status = IDXD_SCMD_DEV_ENABLED;
1520                 return -ENXIO;
1521         }
1522
1523         /* Device configuration */
1524         spin_lock(&idxd->dev_lock);
1525         if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1526                 rc = idxd_device_config(idxd);
1527         spin_unlock(&idxd->dev_lock);
1528         if (rc < 0)
1529                 return -ENXIO;
1530
1531         /*
1532          * System PASID is preserved across device disable/enable cycle, but
1533          * genconfig register content gets cleared during device reset. We
1534          * need to re-enable user interrupts for kernel work queue completion
1535          * IRQ to function.
1536          */
1537         if (idxd->pasid != IOMMU_PASID_INVALID)
1538                 idxd_set_user_intr(idxd, 1);
1539
1540         rc = idxd_device_evl_setup(idxd);
1541         if (rc < 0) {
1542                 idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
1543                 return rc;
1544         }
1545
1546         /* Start device */
1547         rc = idxd_device_enable(idxd);
1548         if (rc < 0) {
1549                 idxd_device_evl_free(idxd);
1550                 return rc;
1551         }
1552
1553         /* Setup DMA device without channels */
1554         rc = idxd_register_dma_device(idxd);
1555         if (rc < 0) {
1556                 idxd_device_disable(idxd);
1557                 idxd_device_evl_free(idxd);
1558                 idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
1559                 return rc;
1560         }
1561
1562         idxd->cmd_status = 0;
1563         return 0;
1564 }
1565
1566 void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
1567 {
1568         struct device *dev = &idxd_dev->conf_dev;
1569         struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev);
1570         int i;
1571
1572         for (i = 0; i < idxd->max_wqs; i++) {
1573                 struct idxd_wq *wq = idxd->wqs[i];
1574                 struct device *wq_dev = wq_confdev(wq);
1575
1576                 if (wq->state == IDXD_WQ_DISABLED)
1577                         continue;
1578                 dev_warn(dev, "Active wq %d on disable %s.\n", i, dev_name(wq_dev));
1579                 device_release_driver(wq_dev);
1580         }
1581
1582         idxd_unregister_dma_device(idxd);
1583         idxd_device_disable(idxd);
1584         if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1585                 idxd_device_reset(idxd);
1586         idxd_device_evl_free(idxd);
1587 }
1588
1589 static enum idxd_dev_type dev_types[] = {
1590         IDXD_DEV_DSA,
1591         IDXD_DEV_IAX,
1592         IDXD_DEV_NONE,
1593 };
1594
1595 struct idxd_device_driver idxd_drv = {
1596         .type = dev_types,
1597         .probe = idxd_device_drv_probe,
1598         .remove = idxd_device_drv_remove,
1599         .name = "idxd",
1600 };
1601 EXPORT_SYMBOL_GPL(idxd_drv);