1 // SPDX-License-Identifier: GPL-2.0-only
3 * Thunderbolt driver - NHI driver
5 * The NHI (native host interface) is the pci device that allows us to send and
6 * receive frames from the thunderbolt bus.
8 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
9 * Copyright (C) 2018, Intel Corporation
12 #include <linux/pm_runtime.h>
13 #include <linux/slab.h>
14 #include <linux/errno.h>
15 #include <linux/pci.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/interrupt.h>
18 #include <linux/iommu.h>
19 #include <linux/module.h>
20 #include <linux/delay.h>
21 #include <linux/property.h>
22 #include <linux/string_helpers.h>
28 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
30 #define RING_FIRST_USABLE_HOPID 1
32 * Used with QUIRK_E2E to specify an unused HopID the Rx credits are
35 #define RING_E2E_RESERVED_HOPID RING_FIRST_USABLE_HOPID
37 * Minimal number of vectors when we use MSI-X. Two for control channel
38 * Rx/Tx and the rest four are for cross domain DMA paths.
40 #define MSIX_MIN_VECS 6
41 #define MSIX_MAX_VECS 16
43 #define NHI_MAILBOX_TIMEOUT 500 /* ms */
45 /* Host interface quirks */
46 #define QUIRK_AUTO_CLEAR_INT BIT(0)
47 #define QUIRK_E2E BIT(1)
49 static bool host_reset = true;
50 module_param(host_reset, bool, 0444);
51 MODULE_PARM_DESC(host_reset, "reset USBv2 host router (default: true)");
53 static int ring_interrupt_index(const struct tb_ring *ring)
57 bit += ring->nhi->hop_count;
61 static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
63 if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
66 val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
67 iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
69 iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
73 static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
75 if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
76 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + ring);
78 iowrite32(~0, nhi->iobase + REG_RING_INT_CLEAR + ring);
82 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
84 * ring->nhi->lock must be held.
86 static void ring_interrupt_active(struct tb_ring *ring, bool active)
88 int index = ring_interrupt_index(ring) / 32 * 4;
89 int reg = REG_RING_INTERRUPT_BASE + index;
90 int interrupt_bit = ring_interrupt_index(ring) & 31;
91 int mask = 1 << interrupt_bit;
95 u32 step, shift, ivr, misc;
96 void __iomem *ivr_base;
103 index = ring->hop + ring->nhi->hop_count;
106 * Intel routers support a bit that isn't part of
107 * the USB4 spec to ask the hardware to clear
108 * interrupt status bits automatically since
109 * we already know which interrupt was triggered.
111 * Other routers explicitly disable auto-clear
112 * to prevent conditions that may occur where two
113 * MSIX interrupts are simultaneously active and
114 * reading the register clears both of them.
116 misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
117 if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
118 auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR;
120 auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR;
121 if (!(misc & auto_clear_bit))
122 iowrite32(misc | auto_clear_bit,
123 ring->nhi->iobase + REG_DMA_MISC);
125 ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
126 step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
127 shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
128 ivr = ioread32(ivr_base + step);
129 ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
131 ivr |= ring->vector << shift;
132 iowrite32(ivr, ivr_base + step);
135 old = ioread32(ring->nhi->iobase + reg);
141 dev_dbg(&ring->nhi->pdev->dev,
142 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
143 active ? "enabling" : "disabling", reg, interrupt_bit, old, new);
146 dev_WARN(&ring->nhi->pdev->dev,
147 "interrupt for %s %d is already %s\n",
148 RING_TYPE(ring), ring->hop,
149 active ? "enabled" : "disabled");
152 iowrite32(new, ring->nhi->iobase + reg);
154 nhi_mask_interrupt(ring->nhi, mask, index);
158 * nhi_disable_interrupts() - disable interrupts for all rings
160 * Use only during init and shutdown.
162 static void nhi_disable_interrupts(struct tb_nhi *nhi)
165 /* disable interrupts */
166 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
167 nhi_mask_interrupt(nhi, ~0, 4 * i);
169 /* clear interrupt status bits */
170 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
171 nhi_clear_interrupt(nhi, 4 * i);
174 /* ring helper methods */
176 static void __iomem *ring_desc_base(struct tb_ring *ring)
178 void __iomem *io = ring->nhi->iobase;
179 io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
180 io += ring->hop * 16;
184 static void __iomem *ring_options_base(struct tb_ring *ring)
186 void __iomem *io = ring->nhi->iobase;
187 io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
188 io += ring->hop * 32;
192 static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
195 * The other 16-bits in the register is read-only and writes to it
196 * are ignored by the hardware so we can save one ioread32() by
197 * filling the read-only bits with zeroes.
199 iowrite32(cons, ring_desc_base(ring) + 8);
202 static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
204 /* See ring_iowrite_cons() above for explanation */
205 iowrite32(prod << 16, ring_desc_base(ring) + 8);
208 static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
210 iowrite32(value, ring_desc_base(ring) + offset);
213 static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
215 iowrite32(value, ring_desc_base(ring) + offset);
216 iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
219 static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
221 iowrite32(value, ring_options_base(ring) + offset);
224 static bool ring_full(struct tb_ring *ring)
226 return ((ring->head + 1) % ring->size) == ring->tail;
229 static bool ring_empty(struct tb_ring *ring)
231 return ring->head == ring->tail;
235 * ring_write_descriptors() - post frames from ring->queue to the controller
237 * ring->lock is held.
239 static void ring_write_descriptors(struct tb_ring *ring)
241 struct ring_frame *frame, *n;
242 struct ring_desc *descriptor;
243 list_for_each_entry_safe(frame, n, &ring->queue, list) {
246 list_move_tail(&frame->list, &ring->in_flight);
247 descriptor = &ring->descriptors[ring->head];
248 descriptor->phys = frame->buffer_phy;
249 descriptor->time = 0;
250 descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
252 descriptor->length = frame->size;
253 descriptor->eof = frame->eof;
254 descriptor->sof = frame->sof;
256 ring->head = (ring->head + 1) % ring->size;
258 ring_iowrite_prod(ring, ring->head);
260 ring_iowrite_cons(ring, ring->head);
265 * ring_work() - progress completed frames
267 * If the ring is shutting down then all frames are marked as canceled and
268 * their callbacks are invoked.
270 * Otherwise we collect all completed frame from the ring buffer, write new
271 * frame to the ring buffer and invoke the callbacks for the completed frames.
273 static void ring_work(struct work_struct *work)
275 struct tb_ring *ring = container_of(work, typeof(*ring), work);
276 struct ring_frame *frame;
277 bool canceled = false;
281 spin_lock_irqsave(&ring->lock, flags);
283 if (!ring->running) {
284 /* Move all frames to done and mark them as canceled. */
285 list_splice_tail_init(&ring->in_flight, &done);
286 list_splice_tail_init(&ring->queue, &done);
288 goto invoke_callback;
291 while (!ring_empty(ring)) {
292 if (!(ring->descriptors[ring->tail].flags
293 & RING_DESC_COMPLETED))
295 frame = list_first_entry(&ring->in_flight, typeof(*frame),
297 list_move_tail(&frame->list, &done);
299 frame->size = ring->descriptors[ring->tail].length;
300 frame->eof = ring->descriptors[ring->tail].eof;
301 frame->sof = ring->descriptors[ring->tail].sof;
302 frame->flags = ring->descriptors[ring->tail].flags;
304 ring->tail = (ring->tail + 1) % ring->size;
306 ring_write_descriptors(ring);
309 /* allow callbacks to schedule new work */
310 spin_unlock_irqrestore(&ring->lock, flags);
311 while (!list_empty(&done)) {
312 frame = list_first_entry(&done, typeof(*frame), list);
314 * The callback may reenqueue or delete frame.
315 * Do not hold on to it.
317 list_del_init(&frame->list);
319 frame->callback(ring, frame, canceled);
323 int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
328 spin_lock_irqsave(&ring->lock, flags);
330 list_add_tail(&frame->list, &ring->queue);
331 ring_write_descriptors(ring);
335 spin_unlock_irqrestore(&ring->lock, flags);
338 EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
341 * tb_ring_poll() - Poll one completed frame from the ring
342 * @ring: Ring to poll
344 * This function can be called when @start_poll callback of the @ring
345 * has been called. It will read one completed frame from the ring and
346 * return it to the caller. Returns %NULL if there is no more completed
349 struct ring_frame *tb_ring_poll(struct tb_ring *ring)
351 struct ring_frame *frame = NULL;
354 spin_lock_irqsave(&ring->lock, flags);
357 if (ring_empty(ring))
360 if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
361 frame = list_first_entry(&ring->in_flight, typeof(*frame),
363 list_del_init(&frame->list);
366 frame->size = ring->descriptors[ring->tail].length;
367 frame->eof = ring->descriptors[ring->tail].eof;
368 frame->sof = ring->descriptors[ring->tail].sof;
369 frame->flags = ring->descriptors[ring->tail].flags;
372 ring->tail = (ring->tail + 1) % ring->size;
376 spin_unlock_irqrestore(&ring->lock, flags);
379 EXPORT_SYMBOL_GPL(tb_ring_poll);
381 static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
383 int idx = ring_interrupt_index(ring);
384 int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4;
388 val = ioread32(ring->nhi->iobase + reg);
393 iowrite32(val, ring->nhi->iobase + reg);
396 /* Both @nhi->lock and @ring->lock should be held */
397 static void __ring_interrupt(struct tb_ring *ring)
402 if (ring->start_poll) {
403 __ring_interrupt_mask(ring, true);
404 ring->start_poll(ring->poll_data);
406 schedule_work(&ring->work);
411 * tb_ring_poll_complete() - Re-start interrupt for the ring
412 * @ring: Ring to re-start the interrupt
414 * This will re-start (unmask) the ring interrupt once the user is done
417 void tb_ring_poll_complete(struct tb_ring *ring)
421 spin_lock_irqsave(&ring->nhi->lock, flags);
422 spin_lock(&ring->lock);
423 if (ring->start_poll)
424 __ring_interrupt_mask(ring, false);
425 spin_unlock(&ring->lock);
426 spin_unlock_irqrestore(&ring->nhi->lock, flags);
428 EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
430 static void ring_clear_msix(const struct tb_ring *ring)
434 if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
437 bit = ring_interrupt_index(ring) & 31;
439 iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR);
441 iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR +
442 4 * (ring->nhi->hop_count / 32));
445 static irqreturn_t ring_msix(int irq, void *data)
447 struct tb_ring *ring = data;
449 spin_lock(&ring->nhi->lock);
450 ring_clear_msix(ring);
451 spin_lock(&ring->lock);
452 __ring_interrupt(ring);
453 spin_unlock(&ring->lock);
454 spin_unlock(&ring->nhi->lock);
459 static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
461 struct tb_nhi *nhi = ring->nhi;
462 unsigned long irqflags;
465 if (!nhi->pdev->msix_enabled)
468 ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
474 ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
480 irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
481 ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
488 ida_simple_remove(&nhi->msix_ida, ring->vector);
493 static void ring_release_msix(struct tb_ring *ring)
498 free_irq(ring->irq, ring);
499 ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
504 static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
506 unsigned int start_hop = RING_FIRST_USABLE_HOPID;
509 if (nhi->quirks & QUIRK_E2E) {
510 start_hop = RING_FIRST_USABLE_HOPID + 1;
511 if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
512 dev_dbg(&nhi->pdev->dev, "quirking E2E TX HopID %u -> %u\n",
513 ring->e2e_tx_hop, RING_E2E_RESERVED_HOPID);
514 ring->e2e_tx_hop = RING_E2E_RESERVED_HOPID;
518 spin_lock_irq(&nhi->lock);
524 * Automatically allocate HopID from the non-reserved
525 * range 1 .. hop_count - 1.
527 for (i = start_hop; i < nhi->hop_count; i++) {
529 if (!nhi->tx_rings[i]) {
534 if (!nhi->rx_rings[i]) {
542 if (ring->hop > 0 && ring->hop < start_hop) {
543 dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
547 if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
548 dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
552 if (ring->is_tx && nhi->tx_rings[ring->hop]) {
553 dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
558 if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
559 dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
566 nhi->tx_rings[ring->hop] = ring;
568 nhi->rx_rings[ring->hop] = ring;
571 spin_unlock_irq(&nhi->lock);
576 static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
577 bool transmit, unsigned int flags,
578 int e2e_tx_hop, u16 sof_mask, u16 eof_mask,
579 void (*start_poll)(void *),
582 struct tb_ring *ring = NULL;
584 dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
585 transmit ? "TX" : "RX", hop, size);
587 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
591 spin_lock_init(&ring->lock);
592 INIT_LIST_HEAD(&ring->queue);
593 INIT_LIST_HEAD(&ring->in_flight);
594 INIT_WORK(&ring->work, ring_work);
598 ring->is_tx = transmit;
601 ring->e2e_tx_hop = e2e_tx_hop;
602 ring->sof_mask = sof_mask;
603 ring->eof_mask = eof_mask;
606 ring->running = false;
607 ring->start_poll = start_poll;
608 ring->poll_data = poll_data;
610 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
611 size * sizeof(*ring->descriptors),
612 &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
613 if (!ring->descriptors)
616 if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
619 if (nhi_alloc_hop(nhi, ring))
620 goto err_release_msix;
625 ring_release_msix(ring);
627 dma_free_coherent(&ring->nhi->pdev->dev,
628 ring->size * sizeof(*ring->descriptors),
629 ring->descriptors, ring->descriptors_dma);
637 * tb_ring_alloc_tx() - Allocate DMA ring for transmit
638 * @nhi: Pointer to the NHI the ring is to be allocated
639 * @hop: HopID (ring) to allocate
640 * @size: Number of entries in the ring
641 * @flags: Flags for the ring
643 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
646 return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, 0, NULL, NULL);
648 EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
651 * tb_ring_alloc_rx() - Allocate DMA ring for receive
652 * @nhi: Pointer to the NHI the ring is to be allocated
653 * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
654 * @size: Number of entries in the ring
655 * @flags: Flags for the ring
656 * @e2e_tx_hop: Transmit HopID when E2E is enabled in @flags
657 * @sof_mask: Mask of PDF values that start a frame
658 * @eof_mask: Mask of PDF values that end a frame
659 * @start_poll: If not %NULL the ring will call this function when an
660 * interrupt is triggered and masked, instead of callback
662 * @poll_data: Optional data passed to @start_poll
664 struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
665 unsigned int flags, int e2e_tx_hop,
666 u16 sof_mask, u16 eof_mask,
667 void (*start_poll)(void *), void *poll_data)
669 return tb_ring_alloc(nhi, hop, size, false, flags, e2e_tx_hop, sof_mask, eof_mask,
670 start_poll, poll_data);
672 EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
675 * tb_ring_start() - enable a ring
676 * @ring: Ring to start
678 * Must not be invoked in parallel with tb_ring_stop().
680 void tb_ring_start(struct tb_ring *ring)
685 spin_lock_irq(&ring->nhi->lock);
686 spin_lock(&ring->lock);
687 if (ring->nhi->going_away)
690 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
693 dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
694 RING_TYPE(ring), ring->hop);
696 if (ring->flags & RING_FLAG_FRAME) {
699 flags = RING_FLAG_ENABLE;
701 frame_size = TB_FRAME_SIZE;
702 flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
705 ring_iowrite64desc(ring, ring->descriptors_dma, 0);
707 ring_iowrite32desc(ring, ring->size, 12);
708 ring_iowrite32options(ring, 0, 4); /* time releated ? */
709 ring_iowrite32options(ring, flags, 0);
711 u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
713 ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
714 ring_iowrite32options(ring, sof_eof_mask, 4);
715 ring_iowrite32options(ring, flags, 0);
719 * Now that the ring valid bit is set we can configure E2E if
720 * enabled for the ring.
722 if (ring->flags & RING_FLAG_E2E) {
726 hop = ring->e2e_tx_hop << REG_RX_OPTIONS_E2E_HOP_SHIFT;
727 hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
730 dev_dbg(&ring->nhi->pdev->dev,
731 "enabling E2E for %s %d with TX HopID %d\n",
732 RING_TYPE(ring), ring->hop, ring->e2e_tx_hop);
734 dev_dbg(&ring->nhi->pdev->dev, "enabling E2E for %s %d\n",
735 RING_TYPE(ring), ring->hop);
738 flags |= RING_FLAG_E2E_FLOW_CONTROL;
739 ring_iowrite32options(ring, flags, 0);
742 ring_interrupt_active(ring, true);
743 ring->running = true;
745 spin_unlock(&ring->lock);
746 spin_unlock_irq(&ring->nhi->lock);
748 EXPORT_SYMBOL_GPL(tb_ring_start);
751 * tb_ring_stop() - shutdown a ring
752 * @ring: Ring to stop
754 * Must not be invoked from a callback.
756 * This method will disable the ring. Further calls to
757 * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
760 * All enqueued frames will be canceled and their callbacks will be executed
761 * with frame->canceled set to true (on the callback thread). This method
762 * returns only after all callback invocations have finished.
764 void tb_ring_stop(struct tb_ring *ring)
766 spin_lock_irq(&ring->nhi->lock);
767 spin_lock(&ring->lock);
768 dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
769 RING_TYPE(ring), ring->hop);
770 if (ring->nhi->going_away)
772 if (!ring->running) {
773 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
774 RING_TYPE(ring), ring->hop);
777 ring_interrupt_active(ring, false);
779 ring_iowrite32options(ring, 0, 0);
780 ring_iowrite64desc(ring, 0, 0);
781 ring_iowrite32desc(ring, 0, 8);
782 ring_iowrite32desc(ring, 0, 12);
785 ring->running = false;
788 spin_unlock(&ring->lock);
789 spin_unlock_irq(&ring->nhi->lock);
792 * schedule ring->work to invoke callbacks on all remaining frames.
794 schedule_work(&ring->work);
795 flush_work(&ring->work);
797 EXPORT_SYMBOL_GPL(tb_ring_stop);
800 * tb_ring_free() - free ring
802 * When this method returns all invocations of ring->callback will have
805 * Ring must be stopped.
807 * Must NOT be called from ring_frame->callback!
809 void tb_ring_free(struct tb_ring *ring)
811 spin_lock_irq(&ring->nhi->lock);
813 * Dissociate the ring from the NHI. This also ensures that
814 * nhi_interrupt_work cannot reschedule ring->work.
817 ring->nhi->tx_rings[ring->hop] = NULL;
819 ring->nhi->rx_rings[ring->hop] = NULL;
822 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
823 RING_TYPE(ring), ring->hop);
825 spin_unlock_irq(&ring->nhi->lock);
827 ring_release_msix(ring);
829 dma_free_coherent(&ring->nhi->pdev->dev,
830 ring->size * sizeof(*ring->descriptors),
831 ring->descriptors, ring->descriptors_dma);
833 ring->descriptors = NULL;
834 ring->descriptors_dma = 0;
837 dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
841 * ring->work can no longer be scheduled (it is scheduled only
842 * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
843 * to finish before freeing the ring.
845 flush_work(&ring->work);
848 EXPORT_SYMBOL_GPL(tb_ring_free);
851 * nhi_mailbox_cmd() - Send a command through NHI mailbox
852 * @nhi: Pointer to the NHI structure
853 * @cmd: Command to send
854 * @data: Data to be send with the command
856 * Sends mailbox command to the firmware running on NHI. Returns %0 in
857 * case of success and negative errno in case of failure.
859 int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
864 iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
866 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
867 val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
868 val |= REG_INMAIL_OP_REQUEST | cmd;
869 iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
871 timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
873 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
874 if (!(val & REG_INMAIL_OP_REQUEST))
876 usleep_range(10, 20);
877 } while (ktime_before(ktime_get(), timeout));
879 if (val & REG_INMAIL_OP_REQUEST)
881 if (val & REG_INMAIL_ERROR)
888 * nhi_mailbox_mode() - Return current firmware operation mode
889 * @nhi: Pointer to the NHI structure
891 * The function reads current firmware operation mode using NHI mailbox
892 * registers and returns it to the caller.
894 enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
898 val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
899 val &= REG_OUTMAIL_CMD_OPMODE_MASK;
900 val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
902 return (enum nhi_fw_mode)val;
905 static void nhi_interrupt_work(struct work_struct *work)
907 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
908 int value = 0; /* Suppress uninitialized usage warning. */
911 int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
912 struct tb_ring *ring;
914 spin_lock_irq(&nhi->lock);
917 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
918 * (TX, RX, RX overflow). We iterate over the bits and read a new
919 * dwords as required. The registers are cleared on read.
921 for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
923 value = ioread32(nhi->iobase
924 + REG_RING_NOTIFY_BASE
926 if (++hop == nhi->hop_count) {
930 if ((value & (1 << (bit % 32))) == 0)
933 dev_warn(&nhi->pdev->dev,
934 "RX overflow for ring %d\n",
939 ring = nhi->tx_rings[hop];
941 ring = nhi->rx_rings[hop];
943 dev_warn(&nhi->pdev->dev,
944 "got interrupt for inactive %s ring %d\n",
950 spin_lock(&ring->lock);
951 __ring_interrupt(ring);
952 spin_unlock(&ring->lock);
954 spin_unlock_irq(&nhi->lock);
957 static irqreturn_t nhi_msi(int irq, void *data)
959 struct tb_nhi *nhi = data;
960 schedule_work(&nhi->interrupt_work);
964 static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
966 struct pci_dev *pdev = to_pci_dev(dev);
967 struct tb *tb = pci_get_drvdata(pdev);
968 struct tb_nhi *nhi = tb->nhi;
971 ret = tb_domain_suspend_noirq(tb);
975 if (nhi->ops && nhi->ops->suspend_noirq) {
976 ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
984 static int nhi_suspend_noirq(struct device *dev)
986 return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
989 static int nhi_freeze_noirq(struct device *dev)
991 struct pci_dev *pdev = to_pci_dev(dev);
992 struct tb *tb = pci_get_drvdata(pdev);
994 return tb_domain_freeze_noirq(tb);
997 static int nhi_thaw_noirq(struct device *dev)
999 struct pci_dev *pdev = to_pci_dev(dev);
1000 struct tb *tb = pci_get_drvdata(pdev);
1002 return tb_domain_thaw_noirq(tb);
1005 static bool nhi_wake_supported(struct pci_dev *pdev)
1010 * If power rails are sustainable for wakeup from S4 this
1011 * property is set by the BIOS.
1013 if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
1019 static int nhi_poweroff_noirq(struct device *dev)
1021 struct pci_dev *pdev = to_pci_dev(dev);
1024 wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
1025 return __nhi_suspend_noirq(dev, wakeup);
1028 static void nhi_enable_int_throttling(struct tb_nhi *nhi)
1030 /* Throttling is specified in 256ns increments */
1031 u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256);
1035 * Configure interrupt throttling for all vectors even if we
1038 for (i = 0; i < MSIX_MAX_VECS; i++) {
1039 u32 reg = REG_INT_THROTTLING_RATE + i * 4;
1040 iowrite32(throttle, nhi->iobase + reg);
1044 static int nhi_resume_noirq(struct device *dev)
1046 struct pci_dev *pdev = to_pci_dev(dev);
1047 struct tb *tb = pci_get_drvdata(pdev);
1048 struct tb_nhi *nhi = tb->nhi;
1052 * Check that the device is still there. It may be that the user
1053 * unplugged last device which causes the host controller to go
1056 if (!pci_device_is_present(pdev)) {
1057 nhi->going_away = true;
1059 if (nhi->ops && nhi->ops->resume_noirq) {
1060 ret = nhi->ops->resume_noirq(nhi);
1064 nhi_enable_int_throttling(tb->nhi);
1067 return tb_domain_resume_noirq(tb);
1070 static int nhi_suspend(struct device *dev)
1072 struct pci_dev *pdev = to_pci_dev(dev);
1073 struct tb *tb = pci_get_drvdata(pdev);
1075 return tb_domain_suspend(tb);
1078 static void nhi_complete(struct device *dev)
1080 struct pci_dev *pdev = to_pci_dev(dev);
1081 struct tb *tb = pci_get_drvdata(pdev);
1084 * If we were runtime suspended when system suspend started,
1085 * schedule runtime resume now. It should bring the domain back
1086 * to functional state.
1088 if (pm_runtime_suspended(&pdev->dev))
1089 pm_runtime_resume(&pdev->dev);
1091 tb_domain_complete(tb);
1094 static int nhi_runtime_suspend(struct device *dev)
1096 struct pci_dev *pdev = to_pci_dev(dev);
1097 struct tb *tb = pci_get_drvdata(pdev);
1098 struct tb_nhi *nhi = tb->nhi;
1101 ret = tb_domain_runtime_suspend(tb);
1105 if (nhi->ops && nhi->ops->runtime_suspend) {
1106 ret = nhi->ops->runtime_suspend(tb->nhi);
1113 static int nhi_runtime_resume(struct device *dev)
1115 struct pci_dev *pdev = to_pci_dev(dev);
1116 struct tb *tb = pci_get_drvdata(pdev);
1117 struct tb_nhi *nhi = tb->nhi;
1120 if (nhi->ops && nhi->ops->runtime_resume) {
1121 ret = nhi->ops->runtime_resume(nhi);
1126 nhi_enable_int_throttling(nhi);
1127 return tb_domain_runtime_resume(tb);
1130 static void nhi_shutdown(struct tb_nhi *nhi)
1134 dev_dbg(&nhi->pdev->dev, "shutdown\n");
1136 for (i = 0; i < nhi->hop_count; i++) {
1137 if (nhi->tx_rings[i])
1138 dev_WARN(&nhi->pdev->dev,
1139 "TX ring %d is still active\n", i);
1140 if (nhi->rx_rings[i])
1141 dev_WARN(&nhi->pdev->dev,
1142 "RX ring %d is still active\n", i);
1144 nhi_disable_interrupts(nhi);
1146 * We have to release the irq before calling flush_work. Otherwise an
1147 * already executing IRQ handler could call schedule_work again.
1149 if (!nhi->pdev->msix_enabled) {
1150 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
1151 flush_work(&nhi->interrupt_work);
1153 ida_destroy(&nhi->msix_ida);
1155 if (nhi->ops && nhi->ops->shutdown)
1156 nhi->ops->shutdown(nhi);
1159 static void nhi_check_quirks(struct tb_nhi *nhi)
1161 if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) {
1163 * Intel hardware supports auto clear of the interrupt
1164 * status register right after interrupt is being
1167 nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
1169 switch (nhi->pdev->device) {
1170 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1171 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1173 * Falcon Ridge controller needs the end-to-end
1174 * flow control workaround to avoid losing Rx
1175 * packets when RING_FLAG_E2E is set.
1177 nhi->quirks |= QUIRK_E2E;
1183 static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data)
1185 if (!pdev->external_facing ||
1186 !device_iommu_capable(&pdev->dev, IOMMU_CAP_PRE_BOOT_PROTECTION))
1188 *(bool *)data = true;
1189 return 1; /* Stop walking */
1192 static void nhi_check_iommu(struct tb_nhi *nhi)
1194 struct pci_bus *bus = nhi->pdev->bus;
1195 bool port_ok = false;
1198 * Ideally what we'd do here is grab every PCI device that
1199 * represents a tunnelling adapter for this NHI and check their
1200 * status directly, but unfortunately USB4 seems to make it
1201 * obnoxiously difficult to reliably make any correlation.
1203 * So for now we'll have to bodge it... Hoping that the system
1204 * is at least sane enough that an adapter is in the same PCI
1205 * segment as its NHI, if we can find *something* on that segment
1206 * which meets the requirements for Kernel DMA Protection, we'll
1207 * take that to imply that firmware is aware and has (hopefully)
1208 * done the right thing in general. We need to know that the PCI
1209 * layer has seen the ExternalFacingPort property which will then
1210 * inform the IOMMU layer to enforce the complete "untrusted DMA"
1211 * flow, but also that the IOMMU driver itself can be trusted not
1212 * to have been subverted by a pre-boot DMA attack.
1217 pci_walk_bus(bus, nhi_check_iommu_pdev, &port_ok);
1219 nhi->iommu_dma_protection = port_ok;
1220 dev_dbg(&nhi->pdev->dev, "IOMMU DMA protection is %s\n",
1221 str_enabled_disabled(port_ok));
1224 static void nhi_reset(struct tb_nhi *nhi)
1229 val = ioread32(nhi->iobase + REG_CAPS);
1230 /* Reset only v2 and later routers */
1231 if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2)
1235 dev_dbg(&nhi->pdev->dev, "skipping host router reset\n");
1239 iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET);
1242 timeout = ktime_add_ms(ktime_get(), 500);
1244 val = ioread32(nhi->iobase + REG_RESET);
1245 if (!(val & REG_RESET_HRR)) {
1246 dev_warn(&nhi->pdev->dev, "host router reset successful\n");
1249 usleep_range(10, 20);
1250 } while (ktime_before(ktime_get(), timeout));
1252 dev_warn(&nhi->pdev->dev, "timeout resetting host router\n");
1255 static int nhi_init_msi(struct tb_nhi *nhi)
1257 struct pci_dev *pdev = nhi->pdev;
1258 struct device *dev = &pdev->dev;
1261 /* In case someone left them on. */
1262 nhi_disable_interrupts(nhi);
1264 nhi_enable_int_throttling(nhi);
1266 ida_init(&nhi->msix_ida);
1269 * The NHI has 16 MSI-X vectors or a single MSI. We first try to
1270 * get all MSI-X vectors and if we succeed, each ring will have
1271 * one MSI-X. If for some reason that does not work out, we
1272 * fallback to a single MSI.
1274 nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
1277 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
1281 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
1283 irq = pci_irq_vector(nhi->pdev, 0);
1287 res = devm_request_irq(&pdev->dev, irq, nhi_msi,
1288 IRQF_NO_SUSPEND, "thunderbolt", nhi);
1290 return dev_err_probe(dev, res, "request_irq failed, aborting\n");
1296 static bool nhi_imr_valid(struct pci_dev *pdev)
1300 if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
1306 static struct tb *nhi_select_cm(struct tb_nhi *nhi)
1311 * USB4 case is simple. If we got control of any of the
1312 * capabilities, we use software CM.
1314 if (tb_acpi_is_native())
1315 return tb_probe(nhi);
1318 * Either firmware based CM is running (we did not get control
1319 * from the firmware) or this is pre-USB4 PC so try first
1320 * firmware CM and then fallback to software CM.
1322 tb = icm_probe(nhi);
1329 static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1331 struct device *dev = &pdev->dev;
1336 if (!nhi_imr_valid(pdev))
1337 return dev_err_probe(dev, -ENODEV, "firmware image not valid, aborting\n");
1339 res = pcim_enable_device(pdev);
1341 return dev_err_probe(dev, res, "cannot enable PCI device, aborting\n");
1343 res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
1345 return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n");
1347 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
1352 nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
1353 /* cannot fail - table is allocated in pcim_iomap_regions */
1354 nhi->iobase = pcim_iomap_table(pdev)[0];
1355 nhi->hop_count = ioread32(nhi->iobase + REG_CAPS) & 0x3ff;
1356 dev_dbg(dev, "total paths: %d\n", nhi->hop_count);
1358 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1359 sizeof(*nhi->tx_rings), GFP_KERNEL);
1360 nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1361 sizeof(*nhi->rx_rings), GFP_KERNEL);
1362 if (!nhi->tx_rings || !nhi->rx_rings)
1365 nhi_check_quirks(nhi);
1366 nhi_check_iommu(nhi);
1370 res = nhi_init_msi(nhi);
1372 return dev_err_probe(dev, res, "cannot enable MSI, aborting\n");
1374 spin_lock_init(&nhi->lock);
1376 res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1378 return dev_err_probe(dev, res, "failed to set DMA mask\n");
1380 pci_set_master(pdev);
1382 if (nhi->ops && nhi->ops->init) {
1383 res = nhi->ops->init(nhi);
1388 tb = nhi_select_cm(nhi);
1390 return dev_err_probe(dev, -ENODEV,
1391 "failed to determine connection manager, aborting\n");
1393 dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
1395 res = tb_domain_add(tb);
1398 * At this point the RX/TX rings might already have been
1399 * activated. Do a proper shutdown.
1405 pci_set_drvdata(pdev, tb);
1407 device_wakeup_enable(&pdev->dev);
1409 pm_runtime_allow(&pdev->dev);
1410 pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
1411 pm_runtime_use_autosuspend(&pdev->dev);
1412 pm_runtime_put_autosuspend(&pdev->dev);
1417 static void nhi_remove(struct pci_dev *pdev)
1419 struct tb *tb = pci_get_drvdata(pdev);
1420 struct tb_nhi *nhi = tb->nhi;
1422 pm_runtime_get_sync(&pdev->dev);
1423 pm_runtime_dont_use_autosuspend(&pdev->dev);
1424 pm_runtime_forbid(&pdev->dev);
1426 tb_domain_remove(tb);
1431 * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
1432 * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
1433 * resume_noirq until we are done.
1435 static const struct dev_pm_ops nhi_pm_ops = {
1436 .suspend_noirq = nhi_suspend_noirq,
1437 .resume_noirq = nhi_resume_noirq,
1438 .freeze_noirq = nhi_freeze_noirq, /*
1439 * we just disable hotplug, the
1440 * pci-tunnels stay alive.
1442 .thaw_noirq = nhi_thaw_noirq,
1443 .restore_noirq = nhi_resume_noirq,
1444 .suspend = nhi_suspend,
1445 .poweroff_noirq = nhi_poweroff_noirq,
1446 .poweroff = nhi_suspend,
1447 .complete = nhi_complete,
1448 .runtime_suspend = nhi_runtime_suspend,
1449 .runtime_resume = nhi_runtime_resume,
1452 static struct pci_device_id nhi_ids[] = {
1454 * We have to specify class, the TB bridges use the same device and
1455 * vendor (sub)id on gen 1 and gen 2 controllers.
1458 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1459 .vendor = PCI_VENDOR_ID_INTEL,
1460 .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
1461 .subvendor = 0x2222, .subdevice = 0x1111,
1464 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1465 .vendor = PCI_VENDOR_ID_INTEL,
1466 .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
1467 .subvendor = 0x2222, .subdevice = 0x1111,
1470 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1471 .vendor = PCI_VENDOR_ID_INTEL,
1472 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
1473 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
1476 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1477 .vendor = PCI_VENDOR_ID_INTEL,
1478 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
1479 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
1483 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
1484 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
1485 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
1486 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
1487 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
1488 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
1489 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
1490 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
1491 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
1492 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
1493 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
1494 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1495 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
1496 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1498 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
1499 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1500 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
1501 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1502 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI0),
1503 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1504 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1),
1505 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1506 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI0),
1507 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1508 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI1),
1509 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1510 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI0),
1511 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1512 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI1),
1513 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1514 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_M_NHI0),
1515 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1516 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI0),
1517 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1518 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
1519 .driver_data = (kernel_ulong_t)&icl_nhi_ops },
1520 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
1521 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
1523 /* Any USB4 compliant host */
1524 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
1529 MODULE_DEVICE_TABLE(pci, nhi_ids);
1530 MODULE_DESCRIPTION("Thunderbolt/USB4 core driver");
1531 MODULE_LICENSE("GPL");
1533 static struct pci_driver nhi_driver = {
1534 .name = "thunderbolt",
1535 .id_table = nhi_ids,
1537 .remove = nhi_remove,
1538 .shutdown = nhi_remove,
1539 .driver.pm = &nhi_pm_ops,
1542 static int __init nhi_init(void)
1546 ret = tb_domain_init();
1549 ret = pci_register_driver(&nhi_driver);
1555 static void __exit nhi_unload(void)
1557 pci_unregister_driver(&nhi_driver);
1561 rootfs_initcall(nhi_init);
1562 module_exit(nhi_unload);