1 // SPDX-License-Identifier: GPL-2.0
3 * PCI Message Signaled Interrupt (MSI)
5 * Copyright (C) 2003-2004 Intel
6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
7 * Copyright (C) 2016 Christoph Hellwig.
10 #include <linux/err.h>
12 #include <linux/irq.h>
13 #include <linux/interrupt.h>
14 #include <linux/export.h>
15 #include <linux/ioport.h>
16 #include <linux/pci.h>
17 #include <linux/proc_fs.h>
18 #include <linux/msi.h>
19 #include <linux/smp.h>
20 #include <linux/errno.h>
22 #include <linux/acpi_iort.h>
23 #include <linux/slab.h>
24 #include <linux/irqdomain.h>
25 #include <linux/of_irq.h>
31 static int pci_msi_enable = 1;
32 int pci_msi_ignore_mask;
34 #define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
36 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
37 static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
39 struct irq_domain *domain;
41 domain = dev_get_msi_domain(&dev->dev);
42 if (domain && irq_domain_is_hierarchy(domain))
43 return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
45 return arch_setup_msi_irqs(dev, nvec, type);
48 static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
50 struct irq_domain *domain;
52 domain = dev_get_msi_domain(&dev->dev);
53 if (domain && irq_domain_is_hierarchy(domain))
54 msi_domain_free_irqs(domain, &dev->dev);
56 arch_teardown_msi_irqs(dev);
59 #define pci_msi_setup_msi_irqs arch_setup_msi_irqs
60 #define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs
63 #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
65 int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
70 void __weak arch_teardown_msi_irq(unsigned int irq)
74 int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
76 struct msi_desc *entry;
80 * If an architecture wants to support multiple MSI, it needs to
81 * override arch_setup_msi_irqs()
83 if (type == PCI_CAP_ID_MSI && nvec > 1)
86 for_each_pci_msi_entry(entry, dev) {
87 ret = arch_setup_msi_irq(dev, entry);
97 void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
100 struct msi_desc *entry;
102 for_each_pci_msi_entry(entry, dev)
104 for (i = 0; i < entry->nvec_used; i++)
105 arch_teardown_msi_irq(entry->irq + i);
107 #endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
109 static void default_restore_msi_irq(struct pci_dev *dev, int irq)
111 struct msi_desc *entry;
114 if (dev->msix_enabled) {
115 for_each_pci_msi_entry(entry, dev) {
116 if (irq == entry->irq)
119 } else if (dev->msi_enabled) {
120 entry = irq_get_msi_desc(irq);
124 __pci_write_msi_msg(entry, &entry->msg);
127 void __weak arch_restore_msi_irqs(struct pci_dev *dev)
129 return default_restore_msi_irqs(dev);
132 static inline __attribute_const__ u32 msi_mask(unsigned x)
134 /* Don't shift by >= width of type */
137 return (1 << (1 << x)) - 1;
141 * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
142 * mask all MSI interrupts by clearing the MSI enable bit does not work
143 * reliably as devices without an INTx disable bit will then generate a
144 * level IRQ which will never be cleared.
146 u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
148 u32 mask_bits = desc->masked;
150 if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit)
155 pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
161 static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
163 desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag);
166 static void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
168 if (desc->msi_attrib.is_virtual)
171 return desc->mask_base +
172 desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
176 * This internal function does not flush PCI writes to the device.
177 * All users must ensure that they read from the device before either
178 * assuming that the device state is up to date, or returning out of this
179 * file. This saves a few milliseconds when initialising devices with lots
180 * of MSI-X interrupts.
182 u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
184 u32 mask_bits = desc->masked;
185 void __iomem *desc_addr;
187 if (pci_msi_ignore_mask)
190 desc_addr = pci_msix_desc_addr(desc);
194 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
195 if (flag & PCI_MSIX_ENTRY_CTRL_MASKBIT)
196 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
198 writel(mask_bits, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
203 static void msix_mask_irq(struct msi_desc *desc, u32 flag)
205 desc->masked = __pci_msix_desc_mask_irq(desc, flag);
208 static void msi_set_mask_bit(struct irq_data *data, u32 flag)
210 struct msi_desc *desc = irq_data_get_msi_desc(data);
212 if (desc->msi_attrib.is_msix) {
213 msix_mask_irq(desc, flag);
214 readl(desc->mask_base); /* Flush write to device */
216 unsigned offset = data->irq - desc->irq;
217 msi_mask_irq(desc, 1 << offset, flag << offset);
222 * pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts
223 * @data: pointer to irqdata associated to that interrupt
225 void pci_msi_mask_irq(struct irq_data *data)
227 msi_set_mask_bit(data, 1);
229 EXPORT_SYMBOL_GPL(pci_msi_mask_irq);
232 * pci_msi_unmask_irq - Generic IRQ chip callback to unmask PCI/MSI interrupts
233 * @data: pointer to irqdata associated to that interrupt
235 void pci_msi_unmask_irq(struct irq_data *data)
237 msi_set_mask_bit(data, 0);
239 EXPORT_SYMBOL_GPL(pci_msi_unmask_irq);
241 void default_restore_msi_irqs(struct pci_dev *dev)
243 struct msi_desc *entry;
245 for_each_pci_msi_entry(entry, dev)
246 default_restore_msi_irq(dev, entry->irq);
249 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
251 struct pci_dev *dev = msi_desc_to_pci_dev(entry);
253 BUG_ON(dev->current_state != PCI_D0);
255 if (entry->msi_attrib.is_msix) {
256 void __iomem *base = pci_msix_desc_addr(entry);
263 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
264 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR);
265 msg->data = readl(base + PCI_MSIX_ENTRY_DATA);
267 int pos = dev->msi_cap;
270 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
272 if (entry->msi_attrib.is_64) {
273 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
275 pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data);
278 pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data);
284 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
286 struct pci_dev *dev = msi_desc_to_pci_dev(entry);
288 if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) {
289 /* Don't touch the hardware now */
290 } else if (entry->msi_attrib.is_msix) {
291 void __iomem *base = pci_msix_desc_addr(entry);
292 bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT);
298 * The specification mandates that the entry is masked
299 * when the message is modified:
301 * "If software changes the Address or Data value of an
302 * entry while the entry is unmasked, the result is
306 __pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT);
308 writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR);
309 writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR);
310 writel(msg->data, base + PCI_MSIX_ENTRY_DATA);
313 __pci_msix_desc_mask_irq(entry, 0);
315 /* Ensure that the writes are visible in the device */
316 readl(base + PCI_MSIX_ENTRY_DATA);
318 int pos = dev->msi_cap;
321 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
322 msgctl &= ~PCI_MSI_FLAGS_QSIZE;
323 msgctl |= entry->msi_attrib.multiple << 4;
324 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl);
326 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
328 if (entry->msi_attrib.is_64) {
329 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
331 pci_write_config_word(dev, pos + PCI_MSI_DATA_64,
334 pci_write_config_word(dev, pos + PCI_MSI_DATA_32,
337 /* Ensure that the writes are visible in the device */
338 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
344 if (entry->write_msi_msg)
345 entry->write_msi_msg(entry, entry->write_msi_msg_data);
349 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
351 struct msi_desc *entry = irq_get_msi_desc(irq);
353 __pci_write_msi_msg(entry, msg);
355 EXPORT_SYMBOL_GPL(pci_write_msi_msg);
357 static void free_msi_irqs(struct pci_dev *dev)
359 struct list_head *msi_list = dev_to_msi_list(&dev->dev);
360 struct msi_desc *entry, *tmp;
361 struct attribute **msi_attrs;
362 struct device_attribute *dev_attr;
365 for_each_pci_msi_entry(entry, dev)
367 for (i = 0; i < entry->nvec_used; i++)
368 BUG_ON(irq_has_action(entry->irq + i));
370 pci_msi_teardown_msi_irqs(dev);
372 list_for_each_entry_safe(entry, tmp, msi_list, list) {
373 if (entry->msi_attrib.is_msix) {
374 if (list_is_last(&entry->list, msi_list))
375 iounmap(entry->mask_base);
378 list_del(&entry->list);
379 free_msi_entry(entry);
382 if (dev->msi_irq_groups) {
383 sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups);
384 msi_attrs = dev->msi_irq_groups[0]->attrs;
385 while (msi_attrs[count]) {
386 dev_attr = container_of(msi_attrs[count],
387 struct device_attribute, attr);
388 kfree(dev_attr->attr.name);
393 kfree(dev->msi_irq_groups[0]);
394 kfree(dev->msi_irq_groups);
395 dev->msi_irq_groups = NULL;
399 static void pci_intx_for_msi(struct pci_dev *dev, int enable)
401 if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
402 pci_intx(dev, enable);
405 static void pci_msi_set_enable(struct pci_dev *dev, int enable)
409 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
410 control &= ~PCI_MSI_FLAGS_ENABLE;
412 control |= PCI_MSI_FLAGS_ENABLE;
413 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
416 static void __pci_restore_msi_state(struct pci_dev *dev)
419 struct msi_desc *entry;
421 if (!dev->msi_enabled)
424 entry = irq_get_msi_desc(dev->irq);
426 pci_intx_for_msi(dev, 0);
427 pci_msi_set_enable(dev, 0);
428 arch_restore_msi_irqs(dev);
430 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
431 msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap),
433 control &= ~PCI_MSI_FLAGS_QSIZE;
434 control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
435 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
438 static void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
442 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
445 pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl);
448 static void __pci_restore_msix_state(struct pci_dev *dev)
450 struct msi_desc *entry;
452 if (!dev->msix_enabled)
454 BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
456 /* route the table */
457 pci_intx_for_msi(dev, 0);
458 pci_msix_clear_and_set_ctrl(dev, 0,
459 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
461 arch_restore_msi_irqs(dev);
462 for_each_pci_msi_entry(entry, dev)
463 msix_mask_irq(entry, entry->masked);
465 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
468 void pci_restore_msi_state(struct pci_dev *dev)
470 __pci_restore_msi_state(dev);
471 __pci_restore_msix_state(dev);
473 EXPORT_SYMBOL_GPL(pci_restore_msi_state);
475 static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
478 struct msi_desc *entry;
482 retval = kstrtoul(attr->attr.name, 10, &irq);
486 entry = irq_get_msi_desc(irq);
490 return sysfs_emit(buf, "%s\n",
491 entry->msi_attrib.is_msix ? "msix" : "msi");
494 static int populate_msi_sysfs(struct pci_dev *pdev)
496 struct attribute **msi_attrs;
497 struct attribute *msi_attr;
498 struct device_attribute *msi_dev_attr;
499 struct attribute_group *msi_irq_group;
500 const struct attribute_group **msi_irq_groups;
501 struct msi_desc *entry;
507 /* Determine how many msi entries we have */
508 for_each_pci_msi_entry(entry, pdev)
509 num_msi += entry->nvec_used;
513 /* Dynamically create the MSI attributes for the PCI device */
514 msi_attrs = kcalloc(num_msi + 1, sizeof(void *), GFP_KERNEL);
517 for_each_pci_msi_entry(entry, pdev) {
518 for (i = 0; i < entry->nvec_used; i++) {
519 msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
522 msi_attrs[count] = &msi_dev_attr->attr;
524 sysfs_attr_init(&msi_dev_attr->attr);
525 msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
527 if (!msi_dev_attr->attr.name)
529 msi_dev_attr->attr.mode = S_IRUGO;
530 msi_dev_attr->show = msi_mode_show;
535 msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
538 msi_irq_group->name = "msi_irqs";
539 msi_irq_group->attrs = msi_attrs;
541 msi_irq_groups = kcalloc(2, sizeof(void *), GFP_KERNEL);
543 goto error_irq_group;
544 msi_irq_groups[0] = msi_irq_group;
546 ret = sysfs_create_groups(&pdev->dev.kobj, msi_irq_groups);
548 goto error_irq_groups;
549 pdev->msi_irq_groups = msi_irq_groups;
554 kfree(msi_irq_groups);
556 kfree(msi_irq_group);
559 msi_attr = msi_attrs[count];
561 msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
562 kfree(msi_attr->name);
565 msi_attr = msi_attrs[count];
571 static struct msi_desc *
572 msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
574 struct irq_affinity_desc *masks = NULL;
575 struct msi_desc *entry;
579 masks = irq_create_affinity_masks(nvec, affd);
581 /* MSI Entry Initialization */
582 entry = alloc_msi_entry(&dev->dev, nvec, masks);
586 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
588 entry->msi_attrib.is_msix = 0;
589 entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
590 entry->msi_attrib.is_virtual = 0;
591 entry->msi_attrib.entry_nr = 0;
592 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT);
593 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
594 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
595 entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
597 if (control & PCI_MSI_FLAGS_64BIT)
598 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
600 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
602 /* Save the initial mask status */
603 if (entry->msi_attrib.maskbit)
604 pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
611 static int msi_verify_entries(struct pci_dev *dev)
613 struct msi_desc *entry;
615 for_each_pci_msi_entry(entry, dev) {
616 if (entry->msg.address_hi && dev->no_64bit_msi) {
617 pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n",
618 entry->msg.address_hi, entry->msg.address_lo);
626 * msi_capability_init - configure device's MSI capability structure
627 * @dev: pointer to the pci_dev data structure of MSI device function
628 * @nvec: number of interrupts to allocate
629 * @affd: description of automatic IRQ affinity assignments (may be %NULL)
631 * Setup the MSI capability structure of the device with the requested
632 * number of interrupts. A return value of zero indicates the successful
633 * setup of an entry with the new MSI IRQ. A negative return value indicates
634 * an error, and a positive return value indicates the number of interrupts
635 * which could have been allocated.
637 static int msi_capability_init(struct pci_dev *dev, int nvec,
638 struct irq_affinity *affd)
640 struct msi_desc *entry;
644 pci_msi_set_enable(dev, 0); /* Disable MSI during set up */
646 entry = msi_setup_entry(dev, nvec, affd);
650 /* All MSIs are unmasked by default; mask them all */
651 mask = msi_mask(entry->msi_attrib.multi_cap);
652 msi_mask_irq(entry, mask, mask);
654 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
656 /* Configure MSI capability structure */
657 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
659 msi_mask_irq(entry, mask, 0);
664 ret = msi_verify_entries(dev);
666 msi_mask_irq(entry, mask, 0);
671 ret = populate_msi_sysfs(dev);
673 msi_mask_irq(entry, mask, 0);
678 /* Set MSI enabled bits */
679 pci_intx_for_msi(dev, 0);
680 pci_msi_set_enable(dev, 1);
681 dev->msi_enabled = 1;
683 pcibios_free_irq(dev);
684 dev->irq = entry->irq;
688 static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
690 resource_size_t phys_addr;
695 pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE,
697 bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
698 flags = pci_resource_flags(dev, bir);
699 if (!flags || (flags & IORESOURCE_UNSET))
702 table_offset &= PCI_MSIX_TABLE_OFFSET;
703 phys_addr = pci_resource_start(dev, bir) + table_offset;
705 return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
708 static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
709 struct msix_entry *entries, int nvec,
710 struct irq_affinity *affd)
712 struct irq_affinity_desc *curmsk, *masks = NULL;
713 struct msi_desc *entry;
716 int vec_count = pci_msix_vec_count(dev);
719 masks = irq_create_affinity_masks(nvec, affd);
721 for (i = 0, curmsk = masks; i < nvec; i++) {
722 entry = alloc_msi_entry(&dev->dev, 1, curmsk);
728 /* No enough memory. Don't try again */
733 entry->msi_attrib.is_msix = 1;
734 entry->msi_attrib.is_64 = 1;
737 entry->msi_attrib.entry_nr = entries[i].entry;
739 entry->msi_attrib.entry_nr = i;
741 entry->msi_attrib.is_virtual =
742 entry->msi_attrib.entry_nr >= vec_count;
744 entry->msi_attrib.default_irq = dev->irq;
745 entry->mask_base = base;
747 addr = pci_msix_desc_addr(entry);
749 entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
751 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
761 static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
763 struct msi_desc *entry;
765 for_each_pci_msi_entry(entry, dev) {
767 entries->vector = entry->irq;
773 static void msix_mask_all(void __iomem *base, int tsize)
775 u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
778 for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
779 writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
783 * msix_capability_init - configure device's MSI-X capability
784 * @dev: pointer to the pci_dev data structure of MSI-X device function
785 * @entries: pointer to an array of struct msix_entry entries
786 * @nvec: number of @entries
787 * @affd: Optional pointer to enable automatic affinity assignment
789 * Setup the MSI-X capability structure of device function with a
790 * single MSI-X IRQ. A return of zero indicates the successful setup of
791 * requested MSI-X entries with allocated IRQs or non-zero for otherwise.
793 static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
794 int nvec, struct irq_affinity *affd)
801 * Some devices require MSI-X to be enabled before the MSI-X
802 * registers can be accessed. Mask all the vectors to prevent
803 * interrupts coming in before they're fully set up.
805 pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
806 PCI_MSIX_FLAGS_ENABLE);
808 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
809 /* Request & Map MSI-X table region */
810 tsize = msix_table_size(control);
811 base = msix_map_region(dev, tsize);
817 /* Ensure that all table entries are masked. */
818 msix_mask_all(base, tsize);
820 ret = msix_setup_entries(dev, base, entries, nvec, affd);
824 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
828 /* Check if all MSI entries honor device restrictions */
829 ret = msi_verify_entries(dev);
833 msix_update_entries(dev, entries);
835 ret = populate_msi_sysfs(dev);
839 /* Set MSI-X enabled bits and unmask the function */
840 pci_intx_for_msi(dev, 0);
841 dev->msix_enabled = 1;
842 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
844 pcibios_free_irq(dev);
850 * If we had some success, report the number of IRQs
851 * we succeeded in setting up.
853 struct msi_desc *entry;
856 for_each_pci_msi_entry(entry, dev) {
868 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
874 * pci_msi_supported - check whether MSI may be enabled on a device
875 * @dev: pointer to the pci_dev data structure of MSI device function
876 * @nvec: how many MSIs have been requested?
878 * Look at global flags, the device itself, and its parent buses
879 * to determine if MSI/-X are supported for the device. If MSI/-X is
880 * supported return 1, else return 0.
882 static int pci_msi_supported(struct pci_dev *dev, int nvec)
886 /* MSI must be globally enabled and supported by the device */
890 if (!dev || dev->no_msi)
894 * You can't ask to have 0 or less MSIs configured.
896 * b) the list manipulation code assumes nvec >= 1.
902 * Any bridge which does NOT route MSI transactions from its
903 * secondary bus to its primary bus must set NO_MSI flag on
904 * the secondary pci_bus.
906 * The NO_MSI flag can either be set directly by:
907 * - arch-specific PCI host bus controller drivers (deprecated)
908 * - quirks for specific PCI bridges
910 * or indirectly by platform-specific PCI host bridge drivers by
911 * advertising the 'msi_domain' property, which results in
912 * the NO_MSI flag when no MSI domain is found for this bridge
915 for (bus = dev->bus; bus; bus = bus->parent)
916 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
923 * pci_msi_vec_count - Return the number of MSI vectors a device can send
924 * @dev: device to report about
926 * This function returns the number of MSI vectors a device requested via
927 * Multiple Message Capable register. It returns a negative errno if the
928 * device is not capable sending MSI interrupts. Otherwise, the call succeeds
929 * and returns a power of two, up to a maximum of 2^5 (32), according to the
932 int pci_msi_vec_count(struct pci_dev *dev)
940 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl);
941 ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1);
945 EXPORT_SYMBOL(pci_msi_vec_count);
947 static void pci_msi_shutdown(struct pci_dev *dev)
949 struct msi_desc *desc;
952 if (!pci_msi_enable || !dev || !dev->msi_enabled)
955 BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
956 desc = first_pci_msi_entry(dev);
958 pci_msi_set_enable(dev, 0);
959 pci_intx_for_msi(dev, 1);
960 dev->msi_enabled = 0;
962 /* Return the device with MSI unmasked as initial states */
963 mask = msi_mask(desc->msi_attrib.multi_cap);
964 __pci_msi_desc_mask_irq(desc, mask, 0);
966 /* Restore dev->irq to its default pin-assertion IRQ */
967 dev->irq = desc->msi_attrib.default_irq;
968 pcibios_alloc_irq(dev);
971 void pci_disable_msi(struct pci_dev *dev)
973 if (!pci_msi_enable || !dev || !dev->msi_enabled)
976 pci_msi_shutdown(dev);
979 EXPORT_SYMBOL(pci_disable_msi);
982 * pci_msix_vec_count - return the number of device's MSI-X table entries
983 * @dev: pointer to the pci_dev data structure of MSI-X device function
984 * This function returns the number of device's MSI-X table entries and
985 * therefore the number of MSI-X vectors device is capable of sending.
986 * It returns a negative errno if the device is not capable of sending MSI-X
989 int pci_msix_vec_count(struct pci_dev *dev)
996 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
997 return msix_table_size(control);
999 EXPORT_SYMBOL(pci_msix_vec_count);
1001 static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
1002 int nvec, struct irq_affinity *affd, int flags)
1007 if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0)
1010 nr_entries = pci_msix_vec_count(dev);
1013 if (nvec > nr_entries && !(flags & PCI_IRQ_VIRTUAL))
1017 /* Check for any invalid entries */
1018 for (i = 0; i < nvec; i++) {
1019 if (entries[i].entry >= nr_entries)
1020 return -EINVAL; /* invalid entry */
1021 for (j = i + 1; j < nvec; j++) {
1022 if (entries[i].entry == entries[j].entry)
1023 return -EINVAL; /* duplicate entry */
1028 /* Check whether driver already requested for MSI IRQ */
1029 if (dev->msi_enabled) {
1030 pci_info(dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
1033 return msix_capability_init(dev, entries, nvec, affd);
1036 static void pci_msix_shutdown(struct pci_dev *dev)
1038 struct msi_desc *entry;
1040 if (!pci_msi_enable || !dev || !dev->msix_enabled)
1043 if (pci_dev_is_disconnected(dev)) {
1044 dev->msix_enabled = 0;
1048 /* Return the device with MSI-X masked as initial states */
1049 for_each_pci_msi_entry(entry, dev)
1050 __pci_msix_desc_mask_irq(entry, 1);
1052 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1053 pci_intx_for_msi(dev, 1);
1054 dev->msix_enabled = 0;
1055 pcibios_alloc_irq(dev);
1058 void pci_disable_msix(struct pci_dev *dev)
1060 if (!pci_msi_enable || !dev || !dev->msix_enabled)
1063 pci_msix_shutdown(dev);
1066 EXPORT_SYMBOL(pci_disable_msix);
1068 void pci_no_msi(void)
1074 * pci_msi_enabled - is MSI enabled?
1076 * Returns true if MSI has not been disabled by the command-line option
1079 int pci_msi_enabled(void)
1081 return pci_msi_enable;
1083 EXPORT_SYMBOL(pci_msi_enabled);
1085 static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
1086 struct irq_affinity *affd)
1091 if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0)
1094 /* Check whether driver already requested MSI-X IRQs */
1095 if (dev->msix_enabled) {
1096 pci_info(dev, "can't enable MSI (MSI-X already enabled)\n");
1100 if (maxvec < minvec)
1103 if (WARN_ON_ONCE(dev->msi_enabled))
1106 nvec = pci_msi_vec_count(dev);
1117 nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
1122 rc = msi_capability_init(dev, nvec, affd);
1135 /* deprecated, don't use */
1136 int pci_enable_msi(struct pci_dev *dev)
1138 int rc = __pci_enable_msi_range(dev, 1, 1, NULL);
1143 EXPORT_SYMBOL(pci_enable_msi);
1145 static int __pci_enable_msix_range(struct pci_dev *dev,
1146 struct msix_entry *entries, int minvec,
1147 int maxvec, struct irq_affinity *affd,
1150 int rc, nvec = maxvec;
1152 if (maxvec < minvec)
1155 if (WARN_ON_ONCE(dev->msix_enabled))
1160 nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
1165 rc = __pci_enable_msix(dev, entries, nvec, affd, flags);
1179 * pci_enable_msix_range - configure device's MSI-X capability structure
1180 * @dev: pointer to the pci_dev data structure of MSI-X device function
1181 * @entries: pointer to an array of MSI-X entries
1182 * @minvec: minimum number of MSI-X IRQs requested
1183 * @maxvec: maximum number of MSI-X IRQs requested
1185 * Setup the MSI-X capability structure of device function with a maximum
1186 * possible number of interrupts in the range between @minvec and @maxvec
1187 * upon its software driver call to request for MSI-X mode enabled on its
1188 * hardware device function. It returns a negative errno if an error occurs.
1189 * If it succeeds, it returns the actual number of interrupts allocated and
1190 * indicates the successful configuration of MSI-X capability structure
1191 * with new allocated MSI-X interrupts.
1193 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1194 int minvec, int maxvec)
1196 return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL, 0);
1198 EXPORT_SYMBOL(pci_enable_msix_range);
1201 * pci_alloc_irq_vectors_affinity - allocate multiple IRQs for a device
1202 * @dev: PCI device to operate on
1203 * @min_vecs: minimum number of vectors required (must be >= 1)
1204 * @max_vecs: maximum (desired) number of vectors
1205 * @flags: flags or quirks for the allocation
1206 * @affd: optional description of the affinity requirements
1208 * Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI
1209 * vectors if available, and fall back to a single legacy vector
1210 * if neither is available. Return the number of vectors allocated,
1211 * (which might be smaller than @max_vecs) if successful, or a negative
1212 * error code on error. If less than @min_vecs interrupt vectors are
1213 * available for @dev the function will fail with -ENOSPC.
1215 * To get the Linux IRQ number used for a vector that can be passed to
1216 * request_irq() use the pci_irq_vector() helper.
1218 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1219 unsigned int max_vecs, unsigned int flags,
1220 struct irq_affinity *affd)
1222 struct irq_affinity msi_default_affd = {0};
1223 int nvecs = -ENOSPC;
1225 if (flags & PCI_IRQ_AFFINITY) {
1227 affd = &msi_default_affd;
1233 if (flags & PCI_IRQ_MSIX) {
1234 nvecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs,
1240 if (flags & PCI_IRQ_MSI) {
1241 nvecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd);
1246 /* use legacy IRQ if allowed */
1247 if (flags & PCI_IRQ_LEGACY) {
1248 if (min_vecs == 1 && dev->irq) {
1250 * Invoke the affinity spreading logic to ensure that
1251 * the device driver can adjust queue configuration
1252 * for the single interrupt case.
1255 irq_create_affinity_masks(1, affd);
1263 EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity);
1266 * pci_free_irq_vectors - free previously allocated IRQs for a device
1267 * @dev: PCI device to operate on
1269 * Undoes the allocations and enabling in pci_alloc_irq_vectors().
1271 void pci_free_irq_vectors(struct pci_dev *dev)
1273 pci_disable_msix(dev);
1274 pci_disable_msi(dev);
1276 EXPORT_SYMBOL(pci_free_irq_vectors);
1279 * pci_irq_vector - return Linux IRQ number of a device vector
1280 * @dev: PCI device to operate on
1281 * @nr: device-relative interrupt vector index (0-based).
1283 int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1285 if (dev->msix_enabled) {
1286 struct msi_desc *entry;
1289 for_each_pci_msi_entry(entry, dev) {
1298 if (dev->msi_enabled) {
1299 struct msi_desc *entry = first_pci_msi_entry(dev);
1301 if (WARN_ON_ONCE(nr >= entry->nvec_used))
1304 if (WARN_ON_ONCE(nr > 0))
1308 return dev->irq + nr;
1310 EXPORT_SYMBOL(pci_irq_vector);
1313 * pci_irq_get_affinity - return the affinity of a particular MSI vector
1314 * @dev: PCI device to operate on
1315 * @nr: device-relative interrupt vector index (0-based).
1317 const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
1319 if (dev->msix_enabled) {
1320 struct msi_desc *entry;
1323 for_each_pci_msi_entry(entry, dev) {
1325 return &entry->affinity->mask;
1330 } else if (dev->msi_enabled) {
1331 struct msi_desc *entry = first_pci_msi_entry(dev);
1333 if (WARN_ON_ONCE(!entry || !entry->affinity ||
1334 nr >= entry->nvec_used))
1337 return &entry->affinity[nr].mask;
1339 return cpu_possible_mask;
1342 EXPORT_SYMBOL(pci_irq_get_affinity);
1344 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
1346 return to_pci_dev(desc->dev);
1348 EXPORT_SYMBOL(msi_desc_to_pci_dev);
1350 void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
1352 struct pci_dev *dev = msi_desc_to_pci_dev(desc);
1354 return dev->bus->sysdata;
1356 EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata);
1358 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
1360 * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
1361 * @irq_data: Pointer to interrupt data of the MSI interrupt
1362 * @msg: Pointer to the message
1364 void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
1366 struct msi_desc *desc = irq_data_get_msi_desc(irq_data);
1369 * For MSI-X desc->irq is always equal to irq_data->irq. For
1370 * MSI only the first interrupt of MULTI MSI passes the test.
1372 if (desc->irq == irq_data->irq)
1373 __pci_write_msi_msg(desc, msg);
1377 * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source
1378 * @desc: Pointer to the MSI descriptor
1380 * The ID number is only used within the irqdomain.
1382 static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
1384 struct pci_dev *dev = msi_desc_to_pci_dev(desc);
1386 return (irq_hw_number_t)desc->msi_attrib.entry_nr |
1387 pci_dev_id(dev) << 11 |
1388 (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
1391 static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
1393 return !desc->msi_attrib.is_msix && desc->nvec_used > 1;
1397 * pci_msi_domain_check_cap - Verify that @domain supports the capabilities
1399 * @domain: The interrupt domain to check
1400 * @info: The domain info for verification
1401 * @dev: The device to check
1404 * 0 if the functionality is supported
1405 * 1 if Multi MSI is requested, but the domain does not support it
1406 * -ENOTSUPP otherwise
1408 int pci_msi_domain_check_cap(struct irq_domain *domain,
1409 struct msi_domain_info *info, struct device *dev)
1411 struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
1413 /* Special handling to support __pci_enable_msi_range() */
1414 if (pci_msi_desc_is_multi_msi(desc) &&
1415 !(info->flags & MSI_FLAG_MULTI_PCI_MSI))
1417 else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX))
1423 static int pci_msi_domain_handle_error(struct irq_domain *domain,
1424 struct msi_desc *desc, int error)
1426 /* Special handling to support __pci_enable_msi_range() */
1427 if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC)
1433 static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
1434 struct msi_desc *desc)
1437 arg->hwirq = pci_msi_domain_calc_hwirq(desc);
1440 static struct msi_domain_ops pci_msi_domain_ops_default = {
1441 .set_desc = pci_msi_domain_set_desc,
1442 .msi_check = pci_msi_domain_check_cap,
1443 .handle_error = pci_msi_domain_handle_error,
1446 static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info)
1448 struct msi_domain_ops *ops = info->ops;
1451 info->ops = &pci_msi_domain_ops_default;
1453 if (ops->set_desc == NULL)
1454 ops->set_desc = pci_msi_domain_set_desc;
1455 if (ops->msi_check == NULL)
1456 ops->msi_check = pci_msi_domain_check_cap;
1457 if (ops->handle_error == NULL)
1458 ops->handle_error = pci_msi_domain_handle_error;
1462 static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
1464 struct irq_chip *chip = info->chip;
1467 if (!chip->irq_write_msi_msg)
1468 chip->irq_write_msi_msg = pci_msi_domain_write_msg;
1469 if (!chip->irq_mask)
1470 chip->irq_mask = pci_msi_mask_irq;
1471 if (!chip->irq_unmask)
1472 chip->irq_unmask = pci_msi_unmask_irq;
1476 * pci_msi_create_irq_domain - Create a MSI interrupt domain
1477 * @fwnode: Optional fwnode of the interrupt controller
1478 * @info: MSI domain info
1479 * @parent: Parent irq domain
1481 * Updates the domain and chip ops and creates a MSI interrupt domain.
1484 * A domain pointer or NULL in case of failure.
1486 struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
1487 struct msi_domain_info *info,
1488 struct irq_domain *parent)
1490 struct irq_domain *domain;
1492 if (WARN_ON(info->flags & MSI_FLAG_LEVEL_CAPABLE))
1493 info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
1495 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
1496 pci_msi_domain_update_dom_ops(info);
1497 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
1498 pci_msi_domain_update_chip_ops(info);
1500 info->flags |= MSI_FLAG_ACTIVATE_EARLY;
1501 if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
1502 info->flags |= MSI_FLAG_MUST_REACTIVATE;
1504 /* PCI-MSI is oneshot-safe */
1505 info->chip->flags |= IRQCHIP_ONESHOT_SAFE;
1507 domain = msi_create_irq_domain(fwnode, info, parent);
1511 irq_domain_update_bus_token(domain, DOMAIN_BUS_PCI_MSI);
1514 EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain);
1517 * Users of the generic MSI infrastructure expect a device to have a single ID,
1518 * so with DMA aliases we have to pick the least-worst compromise. Devices with
1519 * DMA phantom functions tend to still emit MSIs from the real function number,
1520 * so we ignore those and only consider topological aliases where either the
1521 * alias device or RID appears on a different bus number. We also make the
1522 * reasonable assumption that bridges are walked in an upstream direction (so
1523 * the last one seen wins), and the much braver assumption that the most likely
1524 * case is that of PCI->PCIe so we should always use the alias RID. This echoes
1525 * the logic from intel_irq_remapping's set_msi_sid(), which presumably works
1526 * well enough in practice; in the face of the horrible PCIe<->PCI-X conditions
1527 * for taking ownership all we can really do is close our eyes and hope...
1529 static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
1532 u8 bus = PCI_BUS_NUM(*pa);
1534 if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus)
1541 * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID)
1542 * @domain: The interrupt domain
1543 * @pdev: The PCI device.
1545 * The RID for a device is formed from the alias, with a firmware
1546 * supplied mapping applied
1550 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
1552 struct device_node *of_node;
1553 u32 rid = pci_dev_id(pdev);
1555 pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
1557 of_node = irq_domain_get_of_node(domain);
1558 rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) :
1559 iort_msi_map_id(&pdev->dev, rid);
1565 * pci_msi_get_device_domain - Get the MSI domain for a given PCI device
1566 * @pdev: The PCI device
1568 * Use the firmware data to find a device-specific MSI domain
1569 * (i.e. not one that is set as a default).
1571 * Returns: The corresponding MSI domain or NULL if none has been found.
1573 struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
1575 struct irq_domain *dom;
1576 u32 rid = pci_dev_id(pdev);
1578 pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
1579 dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI);
1581 dom = iort_get_device_domain(&pdev->dev, rid,
1582 DOMAIN_BUS_PCI_MSI);
1587 * pci_dev_has_special_msi_domain - Check whether the device is handled by
1588 * a non-standard PCI-MSI domain
1589 * @pdev: The PCI device to check.
1591 * Returns: True if the device irqdomain or the bus irqdomain is
1592 * non-standard PCI/MSI.
1594 bool pci_dev_has_special_msi_domain(struct pci_dev *pdev)
1596 struct irq_domain *dom = dev_get_msi_domain(&pdev->dev);
1599 dom = dev_get_msi_domain(&pdev->bus->dev);
1604 return dom->bus_token != DOMAIN_BUS_PCI_MSI;
1607 #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
1608 #endif /* CONFIG_PCI_MSI */
1610 void pci_msi_init(struct pci_dev *dev)
1615 * Disable the MSI hardware to avoid screaming interrupts
1616 * during boot. This is the power on reset default so
1617 * usually this should be a noop.
1619 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1623 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl);
1624 if (ctrl & PCI_MSI_FLAGS_ENABLE)
1625 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS,
1626 ctrl & ~PCI_MSI_FLAGS_ENABLE);
1628 if (!(ctrl & PCI_MSI_FLAGS_64BIT))
1629 dev->no_64bit_msi = 1;
1632 void pci_msix_init(struct pci_dev *dev)
1636 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1640 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
1641 if (ctrl & PCI_MSIX_FLAGS_ENABLE)
1642 pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS,
1643 ctrl & ~PCI_MSIX_FLAGS_ENABLE);