1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel I/OAT DMA Linux driver
4 * Copyright(c) 2004 - 2015 Intel Corporation.
8 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/pci.h>
16 #include <linux/interrupt.h>
17 #include <linux/dmaengine.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/workqueue.h>
21 #include <linux/prefetch.h>
22 #include <linux/sizes.h>
24 #include "registers.h"
27 #include "../dmaengine.h"
29 static int completion_timeout = 200;
30 module_param(completion_timeout, int, 0644);
31 MODULE_PARM_DESC(completion_timeout,
32 "set ioat completion timeout [msec] (default 200 [msec])");
33 static int idle_timeout = 2000;
34 module_param(idle_timeout, int, 0644);
35 MODULE_PARM_DESC(idle_timeout,
36 "set ioat idel timeout [msec] (default 2000 [msec])");
38 #define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
39 #define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
41 static char *chanerr_str[] = {
42 "DMA Transfer Source Address Error",
43 "DMA Transfer Destination Address Error",
44 "Next Descriptor Address Error",
46 "Chan Address Value Error",
48 "Chipset Uncorrectable Data Integrity Error",
49 "DMA Uncorrectable Data Integrity Error",
52 "Descriptor Control Error",
53 "Descriptor Transfer Size Error",
54 "Completion Address Error",
55 "Interrupt Configuration Error",
56 "Super extended descriptor Address Error",
60 "Descriptor Count Error",
61 "DIF All F detect Error",
62 "Guard Tag verification Error",
63 "Application Tag verification Error",
64 "Reference Tag verification Error",
66 "Result DIF All F detect Error",
67 "Result Guard Tag verification Error",
68 "Result Application Tag verification Error",
69 "Result Reference Tag verification Error",
72 static void ioat_eh(struct ioatdma_chan *ioat_chan);
74 static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
78 for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
79 if ((chanerr >> i) & 1) {
80 dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
87 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
89 * @data: interrupt data
91 irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
93 struct ioatdma_device *instance = data;
94 struct ioatdma_chan *ioat_chan;
95 unsigned long attnstatus;
99 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
101 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
104 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
105 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
109 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
110 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
111 ioat_chan = ioat_chan_by_index(instance, bit);
112 if (test_bit(IOAT_RUN, &ioat_chan->state))
113 tasklet_schedule(&ioat_chan->cleanup_task);
116 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
121 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
123 * @data: interrupt data
125 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
127 struct ioatdma_chan *ioat_chan = data;
129 if (test_bit(IOAT_RUN, &ioat_chan->state))
130 tasklet_schedule(&ioat_chan->cleanup_task);
135 void ioat_stop(struct ioatdma_chan *ioat_chan)
137 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
138 struct pci_dev *pdev = ioat_dma->pdev;
139 int chan_id = chan_num(ioat_chan);
140 struct msix_entry *msix;
142 /* 1/ stop irq from firing tasklets
143 * 2/ stop the tasklet from re-arming irqs
145 clear_bit(IOAT_RUN, &ioat_chan->state);
147 /* flush inflight interrupts */
148 switch (ioat_dma->irq_mode) {
150 msix = &ioat_dma->msix_entries[chan_id];
151 synchronize_irq(msix->vector);
155 synchronize_irq(pdev->irq);
161 /* flush inflight timers */
162 del_timer_sync(&ioat_chan->timer);
164 /* flush inflight tasklet runs */
165 tasklet_kill(&ioat_chan->cleanup_task);
167 /* final cleanup now that everything is quiesced and can't re-arm */
168 ioat_cleanup_event(&ioat_chan->cleanup_task);
171 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
173 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
174 ioat_chan->issued = ioat_chan->head;
175 writew(ioat_chan->dmacount,
176 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
177 dev_dbg(to_dev(ioat_chan),
178 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
179 __func__, ioat_chan->head, ioat_chan->tail,
180 ioat_chan->issued, ioat_chan->dmacount);
183 void ioat_issue_pending(struct dma_chan *c)
185 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
187 if (ioat_ring_pending(ioat_chan)) {
188 spin_lock_bh(&ioat_chan->prep_lock);
189 __ioat_issue_pending(ioat_chan);
190 spin_unlock_bh(&ioat_chan->prep_lock);
195 * ioat_update_pending - log pending descriptors
196 * @ioat_chan: ioat+ channel
198 * Check if the number of unsubmitted descriptors has exceeded the
199 * watermark. Called with prep_lock held
201 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
203 if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
204 __ioat_issue_pending(ioat_chan);
207 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
209 struct ioat_ring_ent *desc;
210 struct ioat_dma_descriptor *hw;
212 if (ioat_ring_space(ioat_chan) < 1) {
213 dev_err(to_dev(ioat_chan),
214 "Unable to start null desc - ring full\n");
218 dev_dbg(to_dev(ioat_chan),
219 "%s: head: %#x tail: %#x issued: %#x\n",
220 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
221 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
226 hw->ctl_f.int_en = 1;
227 hw->ctl_f.compl_write = 1;
228 /* set size to non-zero value (channel returns error when size is 0) */
229 hw->size = NULL_DESC_BUFFER_SIZE;
232 async_tx_ack(&desc->txd);
233 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
234 dump_desc_dbg(ioat_chan, desc);
235 /* make sure descriptors are written before we submit */
237 ioat_chan->head += 1;
238 __ioat_issue_pending(ioat_chan);
241 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
243 spin_lock_bh(&ioat_chan->prep_lock);
244 if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
245 __ioat_start_null_desc(ioat_chan);
246 spin_unlock_bh(&ioat_chan->prep_lock);
249 static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
251 /* set the tail to be re-issued */
252 ioat_chan->issued = ioat_chan->tail;
253 ioat_chan->dmacount = 0;
254 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
256 dev_dbg(to_dev(ioat_chan),
257 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
258 __func__, ioat_chan->head, ioat_chan->tail,
259 ioat_chan->issued, ioat_chan->dmacount);
261 if (ioat_ring_pending(ioat_chan)) {
262 struct ioat_ring_ent *desc;
264 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
265 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
266 __ioat_issue_pending(ioat_chan);
268 __ioat_start_null_desc(ioat_chan);
271 static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
273 unsigned long end = jiffies + tmo;
277 status = ioat_chansts(ioat_chan);
278 if (is_ioat_active(status) || is_ioat_idle(status))
279 ioat_suspend(ioat_chan);
280 while (is_ioat_active(status) || is_ioat_idle(status)) {
281 if (tmo && time_after(jiffies, end)) {
285 status = ioat_chansts(ioat_chan);
292 static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
294 unsigned long end = jiffies + tmo;
297 ioat_reset(ioat_chan);
298 while (ioat_reset_pending(ioat_chan)) {
299 if (end && time_after(jiffies, end)) {
309 static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
310 __releases(&ioat_chan->prep_lock)
312 struct dma_chan *c = tx->chan;
313 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
316 cookie = dma_cookie_assign(tx);
317 dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
319 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
320 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
322 /* make descriptor updates visible before advancing ioat->head,
323 * this is purposefully not smp_wmb() since we are also
324 * publishing the descriptor updates to a dma device
328 ioat_chan->head += ioat_chan->produce;
330 ioat_update_pending(ioat_chan);
331 spin_unlock_bh(&ioat_chan->prep_lock);
336 static struct ioat_ring_ent *
337 ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
339 struct ioat_dma_descriptor *hw;
340 struct ioat_ring_ent *desc;
341 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
347 chunk = idx / IOAT_DESCS_PER_CHUNK;
348 idx &= (IOAT_DESCS_PER_CHUNK - 1);
349 offs = idx * IOAT_DESC_SZ;
350 pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
351 phys = ioat_chan->descs[chunk].hw + offs;
352 hw = (struct ioat_dma_descriptor *)pos;
353 memset(hw, 0, sizeof(*hw));
355 desc = kmem_cache_zalloc(ioat_cache, flags);
359 dma_async_tx_descriptor_init(&desc->txd, chan);
360 desc->txd.tx_submit = ioat_tx_submit_unlock;
362 desc->txd.phys = phys;
366 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
368 kmem_cache_free(ioat_cache, desc);
371 struct ioat_ring_ent **
372 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
374 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
375 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
376 struct ioat_ring_ent **ring;
377 int total_descs = 1 << order;
380 /* allocate the array to hold the software ring */
381 ring = kcalloc(total_descs, sizeof(*ring), flags);
385 chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
386 ioat_chan->desc_chunks = chunks;
388 for (i = 0; i < chunks; i++) {
389 struct ioat_descs *descs = &ioat_chan->descs[i];
391 descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
392 IOAT_CHUNK_SIZE, &descs->hw, flags);
396 for (idx = 0; idx < i; idx++) {
397 descs = &ioat_chan->descs[idx];
398 dma_free_coherent(to_dev(ioat_chan),
400 descs->virt, descs->hw);
405 ioat_chan->desc_chunks = 0;
411 for (i = 0; i < total_descs; i++) {
412 ring[i] = ioat_alloc_ring_ent(c, i, flags);
417 ioat_free_ring_ent(ring[i], c);
419 for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
420 dma_free_coherent(to_dev(ioat_chan),
422 ioat_chan->descs[idx].virt,
423 ioat_chan->descs[idx].hw);
424 ioat_chan->descs[idx].virt = NULL;
425 ioat_chan->descs[idx].hw = 0;
428 ioat_chan->desc_chunks = 0;
432 set_desc_id(ring[i], i);
436 for (i = 0; i < total_descs-1; i++) {
437 struct ioat_ring_ent *next = ring[i+1];
438 struct ioat_dma_descriptor *hw = ring[i]->hw;
440 hw->next = next->txd.phys;
442 ring[i]->hw->next = ring[0]->txd.phys;
444 /* setup descriptor pre-fetching for v3.4 */
445 if (ioat_dma->cap & IOAT_CAP_DPS) {
446 u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
449 drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
451 writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
459 * ioat_check_space_lock - verify space and grab ring producer lock
460 * @ioat_chan: ioat,3 channel (ring) to operate on
461 * @num_descs: allocation length
463 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
464 __acquires(&ioat_chan->prep_lock)
466 spin_lock_bh(&ioat_chan->prep_lock);
467 /* never allow the last descriptor to be consumed, we need at
468 * least one free at all times to allow for on-the-fly ring
471 if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
472 dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
473 __func__, num_descs, ioat_chan->head,
474 ioat_chan->tail, ioat_chan->issued);
475 ioat_chan->produce = num_descs;
476 return 0; /* with ioat->prep_lock held */
478 spin_unlock_bh(&ioat_chan->prep_lock);
480 dev_dbg_ratelimited(to_dev(ioat_chan),
481 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
482 __func__, num_descs, ioat_chan->head,
483 ioat_chan->tail, ioat_chan->issued);
485 /* progress reclaim in the allocation failure case we may be
486 * called under bh_disabled so we need to trigger the timer
489 if (time_is_before_jiffies(ioat_chan->timer.expires)
490 && timer_pending(&ioat_chan->timer)) {
491 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
492 ioat_timer_event(&ioat_chan->timer);
498 static bool desc_has_ext(struct ioat_ring_ent *desc)
500 struct ioat_dma_descriptor *hw = desc->hw;
502 if (hw->ctl_f.op == IOAT_OP_XOR ||
503 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
504 struct ioat_xor_descriptor *xor = desc->xor;
506 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
508 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
509 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
510 struct ioat_pq_descriptor *pq = desc->pq;
512 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
520 ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
525 dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
526 kmem_cache_free(ioat_sed_cache, sed);
529 static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
534 completion = *ioat_chan->completion;
535 phys_complete = ioat_chansts_to_addr(completion);
537 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
538 (unsigned long long) phys_complete);
540 return phys_complete;
543 static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
546 *phys_complete = ioat_get_current_completion(ioat_chan);
547 if (*phys_complete == ioat_chan->last_completion)
550 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
551 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
557 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
559 struct ioat_dma_descriptor *hw = desc->hw;
561 switch (hw->ctl_f.op) {
563 case IOAT_OP_PQ_VAL_16S:
565 struct ioat_pq_descriptor *pq = desc->pq;
567 /* check if there's error written */
568 if (!pq->dwbes_f.wbes)
571 /* need to set a chanerr var for checking to clear later */
573 if (pq->dwbes_f.p_val_err)
574 *desc->result |= SUM_CHECK_P_RESULT;
576 if (pq->dwbes_f.q_val_err)
577 *desc->result |= SUM_CHECK_Q_RESULT;
587 * __cleanup - reclaim used descriptors
588 * @ioat_chan: channel (ring) to clean
589 * @phys_complete: zeroed (or not) completion address (from status)
591 static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
593 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
594 struct ioat_ring_ent *desc;
595 bool seen_current = false;
596 int idx = ioat_chan->tail, i;
599 dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
600 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
603 * At restart of the channel, the completion address and the
604 * channel status will be 0 due to starting a new chain. Since
605 * it's new chain and the first descriptor "fails", there is
606 * nothing to clean up. We do not want to reap the entire submitted
607 * chain due to this 0 address value and then BUG.
612 active = ioat_ring_active(ioat_chan);
613 for (i = 0; i < active && !seen_current; i++) {
614 struct dma_async_tx_descriptor *tx;
616 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
617 desc = ioat_get_ring_ent(ioat_chan, idx + i);
618 dump_desc_dbg(ioat_chan, desc);
620 /* set err stat if we are using dwbes */
621 if (ioat_dma->cap & IOAT_CAP_DWBES)
622 desc_get_errstat(ioat_chan, desc);
626 dma_cookie_complete(tx);
627 dma_descriptor_unmap(tx);
628 dmaengine_desc_get_callback_invoke(tx, NULL);
630 tx->callback_result = NULL;
633 if (tx->phys == phys_complete)
636 /* skip extended descriptors */
637 if (desc_has_ext(desc)) {
638 BUG_ON(i + 1 >= active);
642 /* cleanup super extended descriptors */
644 ioat_free_sed(ioat_dma, desc->sed);
649 /* finish all descriptor reads before incrementing tail */
651 ioat_chan->tail = idx + i;
652 /* no active descs have written a completion? */
653 BUG_ON(active && !seen_current);
654 ioat_chan->last_completion = phys_complete;
656 if (active - i == 0) {
657 dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
659 mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
662 /* microsecond delay by sysfs variable per pending descriptor */
663 if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
664 writew(min((ioat_chan->intr_coalesce * (active - i)),
665 IOAT_INTRDELAY_MASK),
666 ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
667 ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
671 static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
675 spin_lock_bh(&ioat_chan->cleanup_lock);
677 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
678 __cleanup(ioat_chan, phys_complete);
680 if (is_ioat_halted(*ioat_chan->completion)) {
681 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
684 (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
685 mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
690 spin_unlock_bh(&ioat_chan->cleanup_lock);
693 void ioat_cleanup_event(struct tasklet_struct *t)
695 struct ioatdma_chan *ioat_chan = from_tasklet(ioat_chan, t, cleanup_task);
697 ioat_cleanup(ioat_chan);
698 if (!test_bit(IOAT_RUN, &ioat_chan->state))
700 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
703 static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
707 /* set the completion address register again */
708 writel(lower_32_bits(ioat_chan->completion_dma),
709 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
710 writel(upper_32_bits(ioat_chan->completion_dma),
711 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
713 ioat_quiesce(ioat_chan, 0);
714 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
715 __cleanup(ioat_chan, phys_complete);
717 __ioat_restart_chan(ioat_chan);
721 static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
723 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
724 struct ioat_ring_ent *desc;
726 int idx = ioat_chan->tail, i;
729 * We assume that the failed descriptor has been processed.
730 * Now we are just returning all the remaining submitted
731 * descriptors to abort.
733 active = ioat_ring_active(ioat_chan);
735 /* we skip the failed descriptor that tail points to */
736 for (i = 1; i < active; i++) {
737 struct dma_async_tx_descriptor *tx;
739 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
740 desc = ioat_get_ring_ent(ioat_chan, idx + i);
744 struct dmaengine_result res;
746 dma_cookie_complete(tx);
747 dma_descriptor_unmap(tx);
748 res.result = DMA_TRANS_ABORTED;
749 dmaengine_desc_get_callback_invoke(tx, &res);
751 tx->callback_result = NULL;
754 /* skip extended descriptors */
755 if (desc_has_ext(desc)) {
756 WARN_ON(i + 1 >= active);
760 /* cleanup super extended descriptors */
762 ioat_free_sed(ioat_dma, desc->sed);
767 smp_mb(); /* finish all descriptor reads before incrementing tail */
768 ioat_chan->tail = idx + active;
770 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
771 ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
774 static void ioat_eh(struct ioatdma_chan *ioat_chan)
776 struct pci_dev *pdev = to_pdev(ioat_chan);
777 struct ioat_dma_descriptor *hw;
778 struct dma_async_tx_descriptor *tx;
780 struct ioat_ring_ent *desc;
785 struct dmaengine_result res;
787 /* cleanup so tail points to descriptor that caused the error */
788 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
789 __cleanup(ioat_chan, phys_complete);
791 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
792 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
794 dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
795 __func__, chanerr, chanerr_int);
797 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
799 dump_desc_dbg(ioat_chan, desc);
801 switch (hw->ctl_f.op) {
802 case IOAT_OP_XOR_VAL:
803 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
804 *desc->result |= SUM_CHECK_P_RESULT;
805 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
809 case IOAT_OP_PQ_VAL_16S:
810 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
811 *desc->result |= SUM_CHECK_P_RESULT;
812 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
814 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
815 *desc->result |= SUM_CHECK_Q_RESULT;
816 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
821 if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
822 if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
823 res.result = DMA_TRANS_READ_FAILED;
824 err_handled |= IOAT_CHANERR_READ_DATA_ERR;
825 } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
826 res.result = DMA_TRANS_WRITE_FAILED;
827 err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
832 res.result = DMA_TRANS_NOERROR;
834 /* fault on unhandled error or spurious halt */
835 if (chanerr ^ err_handled || chanerr == 0) {
836 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
837 __func__, chanerr, err_handled);
838 dev_err(to_dev(ioat_chan), "Errors handled:\n");
839 ioat_print_chanerrs(ioat_chan, err_handled);
840 dev_err(to_dev(ioat_chan), "Errors not handled:\n");
841 ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
846 /* cleanup the faulty descriptor since we are continuing */
849 dma_cookie_complete(tx);
850 dma_descriptor_unmap(tx);
851 dmaengine_desc_get_callback_invoke(tx, &res);
853 tx->callback_result = NULL;
856 /* mark faulting descriptor as complete */
857 *ioat_chan->completion = desc->txd.phys;
859 spin_lock_bh(&ioat_chan->prep_lock);
860 /* we need abort all descriptors */
862 ioat_abort_descs(ioat_chan);
863 /* clean up the channel, we could be in weird state */
864 ioat_reset_hw(ioat_chan);
867 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
868 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
870 ioat_restart_channel(ioat_chan);
871 spin_unlock_bh(&ioat_chan->prep_lock);
874 static void check_active(struct ioatdma_chan *ioat_chan)
876 if (ioat_ring_active(ioat_chan)) {
877 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
881 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
882 mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
885 static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
887 spin_lock_bh(&ioat_chan->prep_lock);
888 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
889 spin_unlock_bh(&ioat_chan->prep_lock);
891 ioat_abort_descs(ioat_chan);
892 dev_warn(to_dev(ioat_chan), "Reset channel...\n");
893 ioat_reset_hw(ioat_chan);
894 dev_warn(to_dev(ioat_chan), "Restart channel...\n");
895 ioat_restart_channel(ioat_chan);
897 spin_lock_bh(&ioat_chan->prep_lock);
898 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
899 spin_unlock_bh(&ioat_chan->prep_lock);
902 void ioat_timer_event(struct timer_list *t)
904 struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
905 dma_addr_t phys_complete;
908 status = ioat_chansts(ioat_chan);
910 /* when halted due to errors check for channel
911 * programming errors before advancing the completion state
913 if (is_ioat_halted(status)) {
916 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
917 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
919 dev_err(to_dev(ioat_chan), "Errors:\n");
920 ioat_print_chanerrs(ioat_chan, chanerr);
922 if (test_bit(IOAT_RUN, &ioat_chan->state)) {
923 spin_lock_bh(&ioat_chan->cleanup_lock);
924 ioat_reboot_chan(ioat_chan);
925 spin_unlock_bh(&ioat_chan->cleanup_lock);
931 spin_lock_bh(&ioat_chan->cleanup_lock);
933 /* handle the no-actives case */
934 if (!ioat_ring_active(ioat_chan)) {
935 spin_lock_bh(&ioat_chan->prep_lock);
936 check_active(ioat_chan);
937 spin_unlock_bh(&ioat_chan->prep_lock);
941 /* handle the missed cleanup case */
942 if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) {
943 /* timer restarted in ioat_cleanup_preamble
944 * and IOAT_COMPLETION_ACK cleared
946 __cleanup(ioat_chan, phys_complete);
950 /* if we haven't made progress and we have already
951 * acknowledged a pending completion once, then be more
952 * forceful with a restart
954 if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
957 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
958 dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
960 dev_err(to_dev(ioat_chan), "Errors:\n");
961 ioat_print_chanerrs(ioat_chan, chanerr);
963 dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
964 ioat_ring_active(ioat_chan));
966 ioat_reboot_chan(ioat_chan);
971 /* handle missed issue pending case */
972 if (ioat_ring_pending(ioat_chan)) {
973 dev_warn(to_dev(ioat_chan),
974 "Completion timeout with pending descriptors\n");
975 spin_lock_bh(&ioat_chan->prep_lock);
976 __ioat_issue_pending(ioat_chan);
977 spin_unlock_bh(&ioat_chan->prep_lock);
980 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
981 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
983 spin_unlock_bh(&ioat_chan->cleanup_lock);
987 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
988 struct dma_tx_state *txstate)
990 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
993 ret = dma_cookie_status(c, cookie, txstate);
994 if (ret == DMA_COMPLETE)
997 ioat_cleanup(ioat_chan);
999 return dma_cookie_status(c, cookie, txstate);
1002 int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
1004 /* throw away whatever the channel was doing and get it
1005 * initialized, with ioat3 specific workarounds
1007 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
1008 struct pci_dev *pdev = ioat_dma->pdev;
1013 ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
1015 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1016 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1018 if (ioat_dma->version < IOAT_VER_3_3) {
1019 /* clear any pending errors */
1020 err = pci_read_config_dword(pdev,
1021 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1024 "channel error register unreachable\n");
1027 pci_write_config_dword(pdev,
1028 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1030 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1031 * (workaround for spurious config parity error after restart)
1033 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1034 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1035 pci_write_config_dword(pdev,
1036 IOAT_PCI_DMAUNCERRSTS_OFFSET,
1041 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1042 ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1043 ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1044 ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1048 err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
1050 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1051 writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1052 writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1053 writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1058 dev_err(&pdev->dev, "Failed to reset: %d\n", err);