1 // SPDX-License-Identifier: GPL-2.0-only
3 * Thunderbolt driver - NHI driver
5 * The NHI (native host interface) is the pci device that allows us to send and
6 * receive frames from the thunderbolt bus.
8 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
9 * Copyright (C) 2018, Intel Corporation
12 #include <linux/pm_runtime.h>
13 #include <linux/slab.h>
14 #include <linux/errno.h>
15 #include <linux/pci.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/delay.h>
19 #include <linux/property.h>
25 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
27 #define RING_FIRST_USABLE_HOPID 1
29 * Used with QUIRK_E2E to specify an unused HopID the Rx credits are
32 #define RING_E2E_RESERVED_HOPID RING_FIRST_USABLE_HOPID
34 * Minimal number of vectors when we use MSI-X. Two for control channel
35 * Rx/Tx and the rest four are for cross domain DMA paths.
37 #define MSIX_MIN_VECS 6
38 #define MSIX_MAX_VECS 16
40 #define NHI_MAILBOX_TIMEOUT 500 /* ms */
42 /* Host interface quirks */
43 #define QUIRK_AUTO_CLEAR_INT BIT(0)
44 #define QUIRK_E2E BIT(1)
46 static int ring_interrupt_index(struct tb_ring *ring)
50 bit += ring->nhi->hop_count;
55 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
57 * ring->nhi->lock must be held.
59 static void ring_interrupt_active(struct tb_ring *ring, bool active)
61 int reg = REG_RING_INTERRUPT_BASE +
62 ring_interrupt_index(ring) / 32 * 4;
63 int bit = ring_interrupt_index(ring) & 31;
68 u32 step, shift, ivr, misc;
69 void __iomem *ivr_base;
75 index = ring->hop + ring->nhi->hop_count;
77 if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
79 * Ask the hardware to clear interrupt status
80 * bits automatically since we already know
81 * which interrupt was triggered.
83 misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
84 if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
85 misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
86 iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
90 ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
91 step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
92 shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
93 ivr = ioread32(ivr_base + step);
94 ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
96 ivr |= ring->vector << shift;
97 iowrite32(ivr, ivr_base + step);
100 old = ioread32(ring->nhi->iobase + reg);
106 dev_dbg(&ring->nhi->pdev->dev,
107 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
108 active ? "enabling" : "disabling", reg, bit, old, new);
111 dev_WARN(&ring->nhi->pdev->dev,
112 "interrupt for %s %d is already %s\n",
113 RING_TYPE(ring), ring->hop,
114 active ? "enabled" : "disabled");
115 iowrite32(new, ring->nhi->iobase + reg);
119 * nhi_disable_interrupts() - disable interrupts for all rings
121 * Use only during init and shutdown.
123 static void nhi_disable_interrupts(struct tb_nhi *nhi)
126 /* disable interrupts */
127 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
128 iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
130 /* clear interrupt status bits */
131 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
132 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
135 /* ring helper methods */
137 static void __iomem *ring_desc_base(struct tb_ring *ring)
139 void __iomem *io = ring->nhi->iobase;
140 io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
141 io += ring->hop * 16;
145 static void __iomem *ring_options_base(struct tb_ring *ring)
147 void __iomem *io = ring->nhi->iobase;
148 io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
149 io += ring->hop * 32;
153 static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
156 * The other 16-bits in the register is read-only and writes to it
157 * are ignored by the hardware so we can save one ioread32() by
158 * filling the read-only bits with zeroes.
160 iowrite32(cons, ring_desc_base(ring) + 8);
163 static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
165 /* See ring_iowrite_cons() above for explanation */
166 iowrite32(prod << 16, ring_desc_base(ring) + 8);
169 static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
171 iowrite32(value, ring_desc_base(ring) + offset);
174 static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
176 iowrite32(value, ring_desc_base(ring) + offset);
177 iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
180 static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
182 iowrite32(value, ring_options_base(ring) + offset);
185 static bool ring_full(struct tb_ring *ring)
187 return ((ring->head + 1) % ring->size) == ring->tail;
190 static bool ring_empty(struct tb_ring *ring)
192 return ring->head == ring->tail;
196 * ring_write_descriptors() - post frames from ring->queue to the controller
198 * ring->lock is held.
200 static void ring_write_descriptors(struct tb_ring *ring)
202 struct ring_frame *frame, *n;
203 struct ring_desc *descriptor;
204 list_for_each_entry_safe(frame, n, &ring->queue, list) {
207 list_move_tail(&frame->list, &ring->in_flight);
208 descriptor = &ring->descriptors[ring->head];
209 descriptor->phys = frame->buffer_phy;
210 descriptor->time = 0;
211 descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
213 descriptor->length = frame->size;
214 descriptor->eof = frame->eof;
215 descriptor->sof = frame->sof;
217 ring->head = (ring->head + 1) % ring->size;
219 ring_iowrite_prod(ring, ring->head);
221 ring_iowrite_cons(ring, ring->head);
226 * ring_work() - progress completed frames
228 * If the ring is shutting down then all frames are marked as canceled and
229 * their callbacks are invoked.
231 * Otherwise we collect all completed frame from the ring buffer, write new
232 * frame to the ring buffer and invoke the callbacks for the completed frames.
234 static void ring_work(struct work_struct *work)
236 struct tb_ring *ring = container_of(work, typeof(*ring), work);
237 struct ring_frame *frame;
238 bool canceled = false;
242 spin_lock_irqsave(&ring->lock, flags);
244 if (!ring->running) {
245 /* Move all frames to done and mark them as canceled. */
246 list_splice_tail_init(&ring->in_flight, &done);
247 list_splice_tail_init(&ring->queue, &done);
249 goto invoke_callback;
252 while (!ring_empty(ring)) {
253 if (!(ring->descriptors[ring->tail].flags
254 & RING_DESC_COMPLETED))
256 frame = list_first_entry(&ring->in_flight, typeof(*frame),
258 list_move_tail(&frame->list, &done);
260 frame->size = ring->descriptors[ring->tail].length;
261 frame->eof = ring->descriptors[ring->tail].eof;
262 frame->sof = ring->descriptors[ring->tail].sof;
263 frame->flags = ring->descriptors[ring->tail].flags;
265 ring->tail = (ring->tail + 1) % ring->size;
267 ring_write_descriptors(ring);
270 /* allow callbacks to schedule new work */
271 spin_unlock_irqrestore(&ring->lock, flags);
272 while (!list_empty(&done)) {
273 frame = list_first_entry(&done, typeof(*frame), list);
275 * The callback may reenqueue or delete frame.
276 * Do not hold on to it.
278 list_del_init(&frame->list);
280 frame->callback(ring, frame, canceled);
284 int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
289 spin_lock_irqsave(&ring->lock, flags);
291 list_add_tail(&frame->list, &ring->queue);
292 ring_write_descriptors(ring);
296 spin_unlock_irqrestore(&ring->lock, flags);
299 EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
302 * tb_ring_poll() - Poll one completed frame from the ring
303 * @ring: Ring to poll
305 * This function can be called when @start_poll callback of the @ring
306 * has been called. It will read one completed frame from the ring and
307 * return it to the caller. Returns %NULL if there is no more completed
310 struct ring_frame *tb_ring_poll(struct tb_ring *ring)
312 struct ring_frame *frame = NULL;
315 spin_lock_irqsave(&ring->lock, flags);
318 if (ring_empty(ring))
321 if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
322 frame = list_first_entry(&ring->in_flight, typeof(*frame),
324 list_del_init(&frame->list);
327 frame->size = ring->descriptors[ring->tail].length;
328 frame->eof = ring->descriptors[ring->tail].eof;
329 frame->sof = ring->descriptors[ring->tail].sof;
330 frame->flags = ring->descriptors[ring->tail].flags;
333 ring->tail = (ring->tail + 1) % ring->size;
337 spin_unlock_irqrestore(&ring->lock, flags);
340 EXPORT_SYMBOL_GPL(tb_ring_poll);
342 static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
344 int idx = ring_interrupt_index(ring);
345 int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4;
349 val = ioread32(ring->nhi->iobase + reg);
354 iowrite32(val, ring->nhi->iobase + reg);
357 /* Both @nhi->lock and @ring->lock should be held */
358 static void __ring_interrupt(struct tb_ring *ring)
363 if (ring->start_poll) {
364 __ring_interrupt_mask(ring, true);
365 ring->start_poll(ring->poll_data);
367 schedule_work(&ring->work);
372 * tb_ring_poll_complete() - Re-start interrupt for the ring
373 * @ring: Ring to re-start the interrupt
375 * This will re-start (unmask) the ring interrupt once the user is done
378 void tb_ring_poll_complete(struct tb_ring *ring)
382 spin_lock_irqsave(&ring->nhi->lock, flags);
383 spin_lock(&ring->lock);
384 if (ring->start_poll)
385 __ring_interrupt_mask(ring, false);
386 spin_unlock(&ring->lock);
387 spin_unlock_irqrestore(&ring->nhi->lock, flags);
389 EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
391 static void ring_clear_msix(const struct tb_ring *ring)
393 if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
397 ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE);
399 ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE +
400 4 * (ring->nhi->hop_count / 32));
403 static irqreturn_t ring_msix(int irq, void *data)
405 struct tb_ring *ring = data;
407 spin_lock(&ring->nhi->lock);
408 ring_clear_msix(ring);
409 spin_lock(&ring->lock);
410 __ring_interrupt(ring);
411 spin_unlock(&ring->lock);
412 spin_unlock(&ring->nhi->lock);
417 static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
419 struct tb_nhi *nhi = ring->nhi;
420 unsigned long irqflags;
423 if (!nhi->pdev->msix_enabled)
426 ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
432 ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
438 irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
439 ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
446 ida_simple_remove(&nhi->msix_ida, ring->vector);
451 static void ring_release_msix(struct tb_ring *ring)
456 free_irq(ring->irq, ring);
457 ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
462 static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
464 unsigned int start_hop = RING_FIRST_USABLE_HOPID;
467 if (nhi->quirks & QUIRK_E2E) {
468 start_hop = RING_FIRST_USABLE_HOPID + 1;
469 if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
470 dev_dbg(&nhi->pdev->dev, "quirking E2E TX HopID %u -> %u\n",
471 ring->e2e_tx_hop, RING_E2E_RESERVED_HOPID);
472 ring->e2e_tx_hop = RING_E2E_RESERVED_HOPID;
476 spin_lock_irq(&nhi->lock);
482 * Automatically allocate HopID from the non-reserved
483 * range 1 .. hop_count - 1.
485 for (i = start_hop; i < nhi->hop_count; i++) {
487 if (!nhi->tx_rings[i]) {
492 if (!nhi->rx_rings[i]) {
500 if (ring->hop > 0 && ring->hop < start_hop) {
501 dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
505 if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
506 dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
510 if (ring->is_tx && nhi->tx_rings[ring->hop]) {
511 dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
515 } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
516 dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
523 nhi->tx_rings[ring->hop] = ring;
525 nhi->rx_rings[ring->hop] = ring;
528 spin_unlock_irq(&nhi->lock);
533 static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
534 bool transmit, unsigned int flags,
535 int e2e_tx_hop, u16 sof_mask, u16 eof_mask,
536 void (*start_poll)(void *),
539 struct tb_ring *ring = NULL;
541 dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
542 transmit ? "TX" : "RX", hop, size);
544 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
548 spin_lock_init(&ring->lock);
549 INIT_LIST_HEAD(&ring->queue);
550 INIT_LIST_HEAD(&ring->in_flight);
551 INIT_WORK(&ring->work, ring_work);
555 ring->is_tx = transmit;
558 ring->e2e_tx_hop = e2e_tx_hop;
559 ring->sof_mask = sof_mask;
560 ring->eof_mask = eof_mask;
563 ring->running = false;
564 ring->start_poll = start_poll;
565 ring->poll_data = poll_data;
567 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
568 size * sizeof(*ring->descriptors),
569 &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
570 if (!ring->descriptors)
573 if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
576 if (nhi_alloc_hop(nhi, ring))
577 goto err_release_msix;
582 ring_release_msix(ring);
584 dma_free_coherent(&ring->nhi->pdev->dev,
585 ring->size * sizeof(*ring->descriptors),
586 ring->descriptors, ring->descriptors_dma);
594 * tb_ring_alloc_tx() - Allocate DMA ring for transmit
595 * @nhi: Pointer to the NHI the ring is to be allocated
596 * @hop: HopID (ring) to allocate
597 * @size: Number of entries in the ring
598 * @flags: Flags for the ring
600 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
603 return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, 0, NULL, NULL);
605 EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
608 * tb_ring_alloc_rx() - Allocate DMA ring for receive
609 * @nhi: Pointer to the NHI the ring is to be allocated
610 * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
611 * @size: Number of entries in the ring
612 * @flags: Flags for the ring
613 * @e2e_tx_hop: Transmit HopID when E2E is enabled in @flags
614 * @sof_mask: Mask of PDF values that start a frame
615 * @eof_mask: Mask of PDF values that end a frame
616 * @start_poll: If not %NULL the ring will call this function when an
617 * interrupt is triggered and masked, instead of callback
619 * @poll_data: Optional data passed to @start_poll
621 struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
622 unsigned int flags, int e2e_tx_hop,
623 u16 sof_mask, u16 eof_mask,
624 void (*start_poll)(void *), void *poll_data)
626 return tb_ring_alloc(nhi, hop, size, false, flags, e2e_tx_hop, sof_mask, eof_mask,
627 start_poll, poll_data);
629 EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
632 * tb_ring_start() - enable a ring
633 * @ring: Ring to start
635 * Must not be invoked in parallel with tb_ring_stop().
637 void tb_ring_start(struct tb_ring *ring)
642 spin_lock_irq(&ring->nhi->lock);
643 spin_lock(&ring->lock);
644 if (ring->nhi->going_away)
647 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
650 dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
651 RING_TYPE(ring), ring->hop);
653 if (ring->flags & RING_FLAG_FRAME) {
656 flags = RING_FLAG_ENABLE;
658 frame_size = TB_FRAME_SIZE;
659 flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
662 ring_iowrite64desc(ring, ring->descriptors_dma, 0);
664 ring_iowrite32desc(ring, ring->size, 12);
665 ring_iowrite32options(ring, 0, 4); /* time releated ? */
666 ring_iowrite32options(ring, flags, 0);
668 u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
670 ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
671 ring_iowrite32options(ring, sof_eof_mask, 4);
672 ring_iowrite32options(ring, flags, 0);
676 * Now that the ring valid bit is set we can configure E2E if
677 * enabled for the ring.
679 if (ring->flags & RING_FLAG_E2E) {
683 hop = ring->e2e_tx_hop << REG_RX_OPTIONS_E2E_HOP_SHIFT;
684 hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
687 dev_dbg(&ring->nhi->pdev->dev,
688 "enabling E2E for %s %d with TX HopID %d\n",
689 RING_TYPE(ring), ring->hop, ring->e2e_tx_hop);
691 dev_dbg(&ring->nhi->pdev->dev, "enabling E2E for %s %d\n",
692 RING_TYPE(ring), ring->hop);
695 flags |= RING_FLAG_E2E_FLOW_CONTROL;
696 ring_iowrite32options(ring, flags, 0);
699 ring_interrupt_active(ring, true);
700 ring->running = true;
702 spin_unlock(&ring->lock);
703 spin_unlock_irq(&ring->nhi->lock);
705 EXPORT_SYMBOL_GPL(tb_ring_start);
708 * tb_ring_stop() - shutdown a ring
709 * @ring: Ring to stop
711 * Must not be invoked from a callback.
713 * This method will disable the ring. Further calls to
714 * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
717 * All enqueued frames will be canceled and their callbacks will be executed
718 * with frame->canceled set to true (on the callback thread). This method
719 * returns only after all callback invocations have finished.
721 void tb_ring_stop(struct tb_ring *ring)
723 spin_lock_irq(&ring->nhi->lock);
724 spin_lock(&ring->lock);
725 dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
726 RING_TYPE(ring), ring->hop);
727 if (ring->nhi->going_away)
729 if (!ring->running) {
730 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
731 RING_TYPE(ring), ring->hop);
734 ring_interrupt_active(ring, false);
736 ring_iowrite32options(ring, 0, 0);
737 ring_iowrite64desc(ring, 0, 0);
738 ring_iowrite32desc(ring, 0, 8);
739 ring_iowrite32desc(ring, 0, 12);
742 ring->running = false;
745 spin_unlock(&ring->lock);
746 spin_unlock_irq(&ring->nhi->lock);
749 * schedule ring->work to invoke callbacks on all remaining frames.
751 schedule_work(&ring->work);
752 flush_work(&ring->work);
754 EXPORT_SYMBOL_GPL(tb_ring_stop);
757 * tb_ring_free() - free ring
759 * When this method returns all invocations of ring->callback will have
762 * Ring must be stopped.
764 * Must NOT be called from ring_frame->callback!
766 void tb_ring_free(struct tb_ring *ring)
768 spin_lock_irq(&ring->nhi->lock);
770 * Dissociate the ring from the NHI. This also ensures that
771 * nhi_interrupt_work cannot reschedule ring->work.
774 ring->nhi->tx_rings[ring->hop] = NULL;
776 ring->nhi->rx_rings[ring->hop] = NULL;
779 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
780 RING_TYPE(ring), ring->hop);
782 spin_unlock_irq(&ring->nhi->lock);
784 ring_release_msix(ring);
786 dma_free_coherent(&ring->nhi->pdev->dev,
787 ring->size * sizeof(*ring->descriptors),
788 ring->descriptors, ring->descriptors_dma);
790 ring->descriptors = NULL;
791 ring->descriptors_dma = 0;
794 dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
798 * ring->work can no longer be scheduled (it is scheduled only
799 * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
800 * to finish before freeing the ring.
802 flush_work(&ring->work);
805 EXPORT_SYMBOL_GPL(tb_ring_free);
808 * nhi_mailbox_cmd() - Send a command through NHI mailbox
809 * @nhi: Pointer to the NHI structure
810 * @cmd: Command to send
811 * @data: Data to be send with the command
813 * Sends mailbox command to the firmware running on NHI. Returns %0 in
814 * case of success and negative errno in case of failure.
816 int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
821 iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
823 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
824 val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
825 val |= REG_INMAIL_OP_REQUEST | cmd;
826 iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
828 timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
830 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
831 if (!(val & REG_INMAIL_OP_REQUEST))
833 usleep_range(10, 20);
834 } while (ktime_before(ktime_get(), timeout));
836 if (val & REG_INMAIL_OP_REQUEST)
838 if (val & REG_INMAIL_ERROR)
845 * nhi_mailbox_mode() - Return current firmware operation mode
846 * @nhi: Pointer to the NHI structure
848 * The function reads current firmware operation mode using NHI mailbox
849 * registers and returns it to the caller.
851 enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
855 val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
856 val &= REG_OUTMAIL_CMD_OPMODE_MASK;
857 val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
859 return (enum nhi_fw_mode)val;
862 static void nhi_interrupt_work(struct work_struct *work)
864 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
865 int value = 0; /* Suppress uninitialized usage warning. */
868 int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
869 struct tb_ring *ring;
871 spin_lock_irq(&nhi->lock);
874 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
875 * (TX, RX, RX overflow). We iterate over the bits and read a new
876 * dwords as required. The registers are cleared on read.
878 for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
880 value = ioread32(nhi->iobase
881 + REG_RING_NOTIFY_BASE
883 if (++hop == nhi->hop_count) {
887 if ((value & (1 << (bit % 32))) == 0)
890 dev_warn(&nhi->pdev->dev,
891 "RX overflow for ring %d\n",
896 ring = nhi->tx_rings[hop];
898 ring = nhi->rx_rings[hop];
900 dev_warn(&nhi->pdev->dev,
901 "got interrupt for inactive %s ring %d\n",
907 spin_lock(&ring->lock);
908 __ring_interrupt(ring);
909 spin_unlock(&ring->lock);
911 spin_unlock_irq(&nhi->lock);
914 static irqreturn_t nhi_msi(int irq, void *data)
916 struct tb_nhi *nhi = data;
917 schedule_work(&nhi->interrupt_work);
921 static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
923 struct pci_dev *pdev = to_pci_dev(dev);
924 struct tb *tb = pci_get_drvdata(pdev);
925 struct tb_nhi *nhi = tb->nhi;
928 ret = tb_domain_suspend_noirq(tb);
932 if (nhi->ops && nhi->ops->suspend_noirq) {
933 ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
941 static int nhi_suspend_noirq(struct device *dev)
943 return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
946 static int nhi_freeze_noirq(struct device *dev)
948 struct pci_dev *pdev = to_pci_dev(dev);
949 struct tb *tb = pci_get_drvdata(pdev);
951 return tb_domain_freeze_noirq(tb);
954 static int nhi_thaw_noirq(struct device *dev)
956 struct pci_dev *pdev = to_pci_dev(dev);
957 struct tb *tb = pci_get_drvdata(pdev);
959 return tb_domain_thaw_noirq(tb);
962 static bool nhi_wake_supported(struct pci_dev *pdev)
967 * If power rails are sustainable for wakeup from S4 this
968 * property is set by the BIOS.
970 if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
976 static int nhi_poweroff_noirq(struct device *dev)
978 struct pci_dev *pdev = to_pci_dev(dev);
981 wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
982 return __nhi_suspend_noirq(dev, wakeup);
985 static void nhi_enable_int_throttling(struct tb_nhi *nhi)
987 /* Throttling is specified in 256ns increments */
988 u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256);
992 * Configure interrupt throttling for all vectors even if we
995 for (i = 0; i < MSIX_MAX_VECS; i++) {
996 u32 reg = REG_INT_THROTTLING_RATE + i * 4;
997 iowrite32(throttle, nhi->iobase + reg);
1001 static int nhi_resume_noirq(struct device *dev)
1003 struct pci_dev *pdev = to_pci_dev(dev);
1004 struct tb *tb = pci_get_drvdata(pdev);
1005 struct tb_nhi *nhi = tb->nhi;
1009 * Check that the device is still there. It may be that the user
1010 * unplugged last device which causes the host controller to go
1013 if (!pci_device_is_present(pdev)) {
1014 nhi->going_away = true;
1016 if (nhi->ops && nhi->ops->resume_noirq) {
1017 ret = nhi->ops->resume_noirq(nhi);
1021 nhi_enable_int_throttling(tb->nhi);
1024 return tb_domain_resume_noirq(tb);
1027 static int nhi_suspend(struct device *dev)
1029 struct pci_dev *pdev = to_pci_dev(dev);
1030 struct tb *tb = pci_get_drvdata(pdev);
1032 return tb_domain_suspend(tb);
1035 static void nhi_complete(struct device *dev)
1037 struct pci_dev *pdev = to_pci_dev(dev);
1038 struct tb *tb = pci_get_drvdata(pdev);
1041 * If we were runtime suspended when system suspend started,
1042 * schedule runtime resume now. It should bring the domain back
1043 * to functional state.
1045 if (pm_runtime_suspended(&pdev->dev))
1046 pm_runtime_resume(&pdev->dev);
1048 tb_domain_complete(tb);
1051 static int nhi_runtime_suspend(struct device *dev)
1053 struct pci_dev *pdev = to_pci_dev(dev);
1054 struct tb *tb = pci_get_drvdata(pdev);
1055 struct tb_nhi *nhi = tb->nhi;
1058 ret = tb_domain_runtime_suspend(tb);
1062 if (nhi->ops && nhi->ops->runtime_suspend) {
1063 ret = nhi->ops->runtime_suspend(tb->nhi);
1070 static int nhi_runtime_resume(struct device *dev)
1072 struct pci_dev *pdev = to_pci_dev(dev);
1073 struct tb *tb = pci_get_drvdata(pdev);
1074 struct tb_nhi *nhi = tb->nhi;
1077 if (nhi->ops && nhi->ops->runtime_resume) {
1078 ret = nhi->ops->runtime_resume(nhi);
1083 nhi_enable_int_throttling(nhi);
1084 return tb_domain_runtime_resume(tb);
1087 static void nhi_shutdown(struct tb_nhi *nhi)
1091 dev_dbg(&nhi->pdev->dev, "shutdown\n");
1093 for (i = 0; i < nhi->hop_count; i++) {
1094 if (nhi->tx_rings[i])
1095 dev_WARN(&nhi->pdev->dev,
1096 "TX ring %d is still active\n", i);
1097 if (nhi->rx_rings[i])
1098 dev_WARN(&nhi->pdev->dev,
1099 "RX ring %d is still active\n", i);
1101 nhi_disable_interrupts(nhi);
1103 * We have to release the irq before calling flush_work. Otherwise an
1104 * already executing IRQ handler could call schedule_work again.
1106 if (!nhi->pdev->msix_enabled) {
1107 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
1108 flush_work(&nhi->interrupt_work);
1110 ida_destroy(&nhi->msix_ida);
1112 if (nhi->ops && nhi->ops->shutdown)
1113 nhi->ops->shutdown(nhi);
1116 static void nhi_check_quirks(struct tb_nhi *nhi)
1118 if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) {
1120 * Intel hardware supports auto clear of the interrupt
1121 * status register right after interrupt is being
1124 nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
1126 switch (nhi->pdev->device) {
1127 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1128 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1130 * Falcon Ridge controller needs the end-to-end
1131 * flow control workaround to avoid losing Rx
1132 * packets when RING_FLAG_E2E is set.
1134 nhi->quirks |= QUIRK_E2E;
1140 static int nhi_init_msi(struct tb_nhi *nhi)
1142 struct pci_dev *pdev = nhi->pdev;
1145 /* In case someone left them on. */
1146 nhi_disable_interrupts(nhi);
1148 nhi_enable_int_throttling(nhi);
1150 ida_init(&nhi->msix_ida);
1153 * The NHI has 16 MSI-X vectors or a single MSI. We first try to
1154 * get all MSI-X vectors and if we succeed, each ring will have
1155 * one MSI-X. If for some reason that does not work out, we
1156 * fallback to a single MSI.
1158 nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
1161 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
1165 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
1167 irq = pci_irq_vector(nhi->pdev, 0);
1171 res = devm_request_irq(&pdev->dev, irq, nhi_msi,
1172 IRQF_NO_SUSPEND, "thunderbolt", nhi);
1174 dev_err(&pdev->dev, "request_irq failed, aborting\n");
1182 static bool nhi_imr_valid(struct pci_dev *pdev)
1186 if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
1192 static struct tb *nhi_select_cm(struct tb_nhi *nhi)
1197 * USB4 case is simple. If we got control of any of the
1198 * capabilities, we use software CM.
1200 if (tb_acpi_is_native())
1201 return tb_probe(nhi);
1204 * Either firmware based CM is running (we did not get control
1205 * from the firmware) or this is pre-USB4 PC so try first
1206 * firmware CM and then fallback to software CM.
1208 tb = icm_probe(nhi);
1215 static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1221 if (!nhi_imr_valid(pdev)) {
1222 dev_warn(&pdev->dev, "firmware image not valid, aborting\n");
1226 res = pcim_enable_device(pdev);
1228 dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
1232 res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
1234 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
1238 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
1243 nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
1244 /* cannot fail - table is allocated bin pcim_iomap_regions */
1245 nhi->iobase = pcim_iomap_table(pdev)[0];
1246 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
1247 dev_dbg(&pdev->dev, "total paths: %d\n", nhi->hop_count);
1249 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1250 sizeof(*nhi->tx_rings), GFP_KERNEL);
1251 nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1252 sizeof(*nhi->rx_rings), GFP_KERNEL);
1253 if (!nhi->tx_rings || !nhi->rx_rings)
1256 nhi_check_quirks(nhi);
1258 res = nhi_init_msi(nhi);
1260 dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
1264 spin_lock_init(&nhi->lock);
1266 res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1268 res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1270 dev_err(&pdev->dev, "failed to set DMA mask\n");
1274 pci_set_master(pdev);
1276 if (nhi->ops && nhi->ops->init) {
1277 res = nhi->ops->init(nhi);
1282 tb = nhi_select_cm(nhi);
1284 dev_err(&nhi->pdev->dev,
1285 "failed to determine connection manager, aborting\n");
1289 dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
1291 res = tb_domain_add(tb);
1294 * At this point the RX/TX rings might already have been
1295 * activated. Do a proper shutdown.
1301 pci_set_drvdata(pdev, tb);
1303 device_wakeup_enable(&pdev->dev);
1305 pm_runtime_allow(&pdev->dev);
1306 pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
1307 pm_runtime_use_autosuspend(&pdev->dev);
1308 pm_runtime_put_autosuspend(&pdev->dev);
1313 static void nhi_remove(struct pci_dev *pdev)
1315 struct tb *tb = pci_get_drvdata(pdev);
1316 struct tb_nhi *nhi = tb->nhi;
1318 pm_runtime_get_sync(&pdev->dev);
1319 pm_runtime_dont_use_autosuspend(&pdev->dev);
1320 pm_runtime_forbid(&pdev->dev);
1322 tb_domain_remove(tb);
1327 * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
1328 * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
1329 * resume_noirq until we are done.
1331 static const struct dev_pm_ops nhi_pm_ops = {
1332 .suspend_noirq = nhi_suspend_noirq,
1333 .resume_noirq = nhi_resume_noirq,
1334 .freeze_noirq = nhi_freeze_noirq, /*
1335 * we just disable hotplug, the
1336 * pci-tunnels stay alive.
1338 .thaw_noirq = nhi_thaw_noirq,
1339 .restore_noirq = nhi_resume_noirq,
1340 .suspend = nhi_suspend,
1341 .poweroff_noirq = nhi_poweroff_noirq,
1342 .poweroff = nhi_suspend,
1343 .complete = nhi_complete,
1344 .runtime_suspend = nhi_runtime_suspend,
1345 .runtime_resume = nhi_runtime_resume,
1348 static struct pci_device_id nhi_ids[] = {
1350 * We have to specify class, the TB bridges use the same device and
1351 * vendor (sub)id on gen 1 and gen 2 controllers.
1354 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1355 .vendor = PCI_VENDOR_ID_INTEL,
1356 .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
1357 .subvendor = 0x2222, .subdevice = 0x1111,
1360 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1361 .vendor = PCI_VENDOR_ID_INTEL,
1362 .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
1363 .subvendor = 0x2222, .subdevice = 0x1111,
1366 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1367 .vendor = PCI_VENDOR_ID_INTEL,
1368 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
1369 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
1372 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1373 .vendor = PCI_VENDOR_ID_INTEL,
1374 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
1375 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
1379 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
1380 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
1381 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
1382 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
1383 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
1384 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
1385 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
1386 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
1387 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
1388 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
1389 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
1390 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1391 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
1392 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1393 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
1394 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1395 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
1396 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1397 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI0),
1398 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1399 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1),
1400 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1401 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI0),
1402 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1403 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI1),
1404 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1406 /* Any USB4 compliant host */
1407 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
1412 MODULE_DEVICE_TABLE(pci, nhi_ids);
1413 MODULE_LICENSE("GPL");
1415 static struct pci_driver nhi_driver = {
1416 .name = "thunderbolt",
1417 .id_table = nhi_ids,
1419 .remove = nhi_remove,
1420 .shutdown = nhi_remove,
1421 .driver.pm = &nhi_pm_ops,
1424 static int __init nhi_init(void)
1428 ret = tb_domain_init();
1431 ret = pci_register_driver(&nhi_driver);
1437 static void __exit nhi_unload(void)
1439 pci_unregister_driver(&nhi_driver);
1443 rootfs_initcall(nhi_init);
1444 module_exit(nhi_unload);