2 * PCI Bus Services, see include/linux/pci.h for further explanation.
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/log2.h>
20 #include <linux/pci-aspm.h>
21 #include <linux/pm_wakeup.h>
22 #include <linux/interrupt.h>
23 #include <linux/device.h>
24 #include <linux/pm_runtime.h>
25 #include <asm-generic/pci-bridge.h>
26 #include <asm/setup.h>
29 const char *pci_power_names[] = {
30 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
32 EXPORT_SYMBOL_GPL(pci_power_names);
34 int isa_dma_bridge_buggy;
35 EXPORT_SYMBOL(isa_dma_bridge_buggy);
38 EXPORT_SYMBOL(pci_pci_problems);
40 unsigned int pci_pm_d3_delay;
42 static void pci_pme_list_scan(struct work_struct *work);
44 static LIST_HEAD(pci_pme_list);
45 static DEFINE_MUTEX(pci_pme_list_mutex);
46 static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
48 struct pci_pme_device {
49 struct list_head list;
53 #define PME_TIMEOUT 1000 /* How long between PME checks */
55 static void pci_dev_d3_sleep(struct pci_dev *dev)
57 unsigned int delay = dev->d3_delay;
59 if (delay < pci_pm_d3_delay)
60 delay = pci_pm_d3_delay;
65 #ifdef CONFIG_PCI_DOMAINS
66 int pci_domains_supported = 1;
69 #define DEFAULT_CARDBUS_IO_SIZE (256)
70 #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
71 /* pci=cbmemsize=nnM,cbiosize=nn can override this */
72 unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
73 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
75 #define DEFAULT_HOTPLUG_IO_SIZE (256)
76 #define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
77 /* pci=hpmemsize=nnM,hpiosize=nn can override this */
78 unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
79 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
81 enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
84 * The default CLS is used if arch didn't set CLS explicitly and not
85 * all pci devices agree on the same value. Arch can override either
86 * the dfl or actual value as it sees fit. Don't forget this is
87 * measured in 32-bit words, not bytes.
89 u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
90 u8 pci_cache_line_size;
93 * If we set up a device for bus mastering, we need to check the latency
94 * timer as certain BIOSes forget to set it properly.
96 unsigned int pcibios_max_latency = 255;
98 /* If set, the PCIe ARI capability will not be used. */
99 static bool pcie_ari_disabled;
102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
103 * @bus: pointer to PCI bus structure to search
105 * Given a PCI bus, returns the highest PCI bus number present in the set
106 * including the given PCI bus and its list of child PCI buses.
108 unsigned char pci_bus_max_busnr(struct pci_bus* bus)
110 struct list_head *tmp;
111 unsigned char max, n;
113 max = bus->subordinate;
114 list_for_each(tmp, &bus->children) {
115 n = pci_bus_max_busnr(pci_bus_b(tmp));
121 EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
123 #ifdef CONFIG_HAS_IOMEM
124 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
127 * Make sure the BAR is actually a memory resource, not an IO resource
129 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
133 return ioremap_nocache(pci_resource_start(pdev, bar),
134 pci_resource_len(pdev, bar));
136 EXPORT_SYMBOL_GPL(pci_ioremap_bar);
141 * pci_max_busnr - returns maximum PCI bus number
143 * Returns the highest PCI bus number present in the system global list of
146 unsigned char __devinit
149 struct pci_bus *bus = NULL;
150 unsigned char max, n;
153 while ((bus = pci_find_next_bus(bus)) != NULL) {
154 n = pci_bus_max_busnr(bus);
163 #define PCI_FIND_CAP_TTL 48
165 static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
166 u8 pos, int cap, int *ttl)
171 pci_bus_read_config_byte(bus, devfn, pos, &pos);
175 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
181 pos += PCI_CAP_LIST_NEXT;
186 static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
189 int ttl = PCI_FIND_CAP_TTL;
191 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
194 int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
196 return __pci_find_next_cap(dev->bus, dev->devfn,
197 pos + PCI_CAP_LIST_NEXT, cap);
199 EXPORT_SYMBOL_GPL(pci_find_next_capability);
201 static int __pci_bus_find_cap_start(struct pci_bus *bus,
202 unsigned int devfn, u8 hdr_type)
206 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
207 if (!(status & PCI_STATUS_CAP_LIST))
211 case PCI_HEADER_TYPE_NORMAL:
212 case PCI_HEADER_TYPE_BRIDGE:
213 return PCI_CAPABILITY_LIST;
214 case PCI_HEADER_TYPE_CARDBUS:
215 return PCI_CB_CAPABILITY_LIST;
224 * pci_find_capability - query for devices' capabilities
225 * @dev: PCI device to query
226 * @cap: capability code
228 * Tell if a device supports a given PCI capability.
229 * Returns the address of the requested capability structure within the
230 * device's PCI configuration space or 0 in case the device does not
231 * support it. Possible values for @cap:
233 * %PCI_CAP_ID_PM Power Management
234 * %PCI_CAP_ID_AGP Accelerated Graphics Port
235 * %PCI_CAP_ID_VPD Vital Product Data
236 * %PCI_CAP_ID_SLOTID Slot Identification
237 * %PCI_CAP_ID_MSI Message Signalled Interrupts
238 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
239 * %PCI_CAP_ID_PCIX PCI-X
240 * %PCI_CAP_ID_EXP PCI Express
242 int pci_find_capability(struct pci_dev *dev, int cap)
246 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
248 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
254 * pci_bus_find_capability - query for devices' capabilities
255 * @bus: the PCI bus to query
256 * @devfn: PCI device to query
257 * @cap: capability code
259 * Like pci_find_capability() but works for pci devices that do not have a
260 * pci_dev structure set up yet.
262 * Returns the address of the requested capability structure within the
263 * device's PCI configuration space or 0 in case the device does not
266 int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
271 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
273 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
275 pos = __pci_find_next_cap(bus, devfn, pos, cap);
281 * pci_find_ext_capability - Find an extended capability
282 * @dev: PCI device to query
283 * @cap: capability code
285 * Returns the address of the requested extended capability structure
286 * within the device's PCI configuration space or 0 if the device does
287 * not support it. Possible values for @cap:
289 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
290 * %PCI_EXT_CAP_ID_VC Virtual Channel
291 * %PCI_EXT_CAP_ID_DSN Device Serial Number
292 * %PCI_EXT_CAP_ID_PWR Power Budgeting
294 int pci_find_ext_capability(struct pci_dev *dev, int cap)
298 int pos = PCI_CFG_SPACE_SIZE;
300 /* minimum 8 bytes per capability */
301 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
303 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
306 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
310 * If we have no capabilities, this is indicated by cap ID,
311 * cap version and next pointer all being 0.
317 if (PCI_EXT_CAP_ID(header) == cap)
320 pos = PCI_EXT_CAP_NEXT(header);
321 if (pos < PCI_CFG_SPACE_SIZE)
324 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
330 EXPORT_SYMBOL_GPL(pci_find_ext_capability);
333 * pci_bus_find_ext_capability - find an extended capability
334 * @bus: the PCI bus to query
335 * @devfn: PCI device to query
336 * @cap: capability code
338 * Like pci_find_ext_capability() but works for pci devices that do not have a
339 * pci_dev structure set up yet.
341 * Returns the address of the requested capability structure within the
342 * device's PCI configuration space or 0 in case the device does not
345 int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
350 int pos = PCI_CFG_SPACE_SIZE;
352 /* minimum 8 bytes per capability */
353 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
355 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
357 if (header == 0xffffffff || header == 0)
361 if (PCI_EXT_CAP_ID(header) == cap)
364 pos = PCI_EXT_CAP_NEXT(header);
365 if (pos < PCI_CFG_SPACE_SIZE)
368 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
375 static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
377 int rc, ttl = PCI_FIND_CAP_TTL;
380 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
381 mask = HT_3BIT_CAP_MASK;
383 mask = HT_5BIT_CAP_MASK;
385 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
386 PCI_CAP_ID_HT, &ttl);
388 rc = pci_read_config_byte(dev, pos + 3, &cap);
389 if (rc != PCIBIOS_SUCCESSFUL)
392 if ((cap & mask) == ht_cap)
395 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
396 pos + PCI_CAP_LIST_NEXT,
397 PCI_CAP_ID_HT, &ttl);
403 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
404 * @dev: PCI device to query
405 * @pos: Position from which to continue searching
406 * @ht_cap: Hypertransport capability code
408 * To be used in conjunction with pci_find_ht_capability() to search for
409 * all capabilities matching @ht_cap. @pos should always be a value returned
410 * from pci_find_ht_capability().
412 * NB. To be 100% safe against broken PCI devices, the caller should take
413 * steps to avoid an infinite loop.
415 int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
417 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
419 EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
422 * pci_find_ht_capability - query a device's Hypertransport capabilities
423 * @dev: PCI device to query
424 * @ht_cap: Hypertransport capability code
426 * Tell if a device supports a given Hypertransport capability.
427 * Returns an address within the device's PCI configuration space
428 * or 0 in case the device does not support the request capability.
429 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
430 * which has a Hypertransport capability matching @ht_cap.
432 int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
436 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
438 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
442 EXPORT_SYMBOL_GPL(pci_find_ht_capability);
445 * pci_find_parent_resource - return resource region of parent bus of given region
446 * @dev: PCI device structure contains resources to be searched
447 * @res: child resource record for which parent is sought
449 * For given resource region of given device, return the resource
450 * region of parent bus the given region is contained in or where
451 * it should be allocated from.
454 pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
456 const struct pci_bus *bus = dev->bus;
458 struct resource *best = NULL, *r;
460 pci_bus_for_each_resource(bus, r, i) {
463 if (res->start && !(res->start >= r->start && res->end <= r->end))
464 continue; /* Not contained */
465 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
466 continue; /* Wrong type */
467 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
468 return r; /* Exact match */
469 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
470 if (r->flags & IORESOURCE_PREFETCH)
472 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
480 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
481 * @dev: PCI device to have its BARs restored
483 * Restore the BAR values for a given device, so as to make it
484 * accessible by its driver.
487 pci_restore_bars(struct pci_dev *dev)
491 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
492 pci_update_resource(dev, i);
495 static struct pci_platform_pm_ops *pci_platform_pm;
497 int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
499 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
500 || !ops->sleep_wake || !ops->can_wakeup)
502 pci_platform_pm = ops;
506 static inline bool platform_pci_power_manageable(struct pci_dev *dev)
508 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
511 static inline int platform_pci_set_power_state(struct pci_dev *dev,
514 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
517 static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
519 return pci_platform_pm ?
520 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
523 static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
525 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
528 static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
530 return pci_platform_pm ?
531 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
534 static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
536 return pci_platform_pm ?
537 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
541 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
543 * @dev: PCI device to handle.
544 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
547 * -EINVAL if the requested state is invalid.
548 * -EIO if device does not support PCI PM or its PM capabilities register has a
549 * wrong version, or device doesn't support the requested state.
550 * 0 if device already is in the requested state.
551 * 0 if device's power state has been successfully changed.
553 static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
556 bool need_restore = false;
558 /* Check if we're already there */
559 if (dev->current_state == state)
565 if (state < PCI_D0 || state > PCI_D3hot)
568 /* Validate current state:
569 * Can enter D0 from any state, but if we can only go deeper
570 * to sleep if we're already in a low power state
572 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
573 && dev->current_state > state) {
574 dev_err(&dev->dev, "invalid power transition "
575 "(from state %d to %d)\n", dev->current_state, state);
579 /* check if this device supports the desired state */
580 if ((state == PCI_D1 && !dev->d1_support)
581 || (state == PCI_D2 && !dev->d2_support))
584 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
586 /* If we're (effectively) in D3, force entire word to 0.
587 * This doesn't affect PME_Status, disables PME_En, and
588 * sets PowerState to 0.
590 switch (dev->current_state) {
594 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
599 case PCI_UNKNOWN: /* Boot-up */
600 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
601 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
603 /* Fall-through: force to D0 */
609 /* enter specified state */
610 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
612 /* Mandatory power management transition delays */
613 /* see PCI PM 1.1 5.6.1 table 18 */
614 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
615 pci_dev_d3_sleep(dev);
616 else if (state == PCI_D2 || dev->current_state == PCI_D2)
617 udelay(PCI_PM_D2_DELAY);
619 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
620 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
621 if (dev->current_state != state && printk_ratelimit())
622 dev_info(&dev->dev, "Refused to change power state, "
623 "currently in D%d\n", dev->current_state);
625 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
626 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
627 * from D3hot to D0 _may_ perform an internal reset, thereby
628 * going to "D0 Uninitialized" rather than "D0 Initialized".
629 * For example, at least some versions of the 3c905B and the
630 * 3c556B exhibit this behaviour.
632 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
633 * devices in a D3hot state at boot. Consequently, we need to
634 * restore at least the BARs so that the device will be
635 * accessible to its driver.
638 pci_restore_bars(dev);
641 pcie_aspm_pm_state_change(dev->bus->self);
647 * pci_update_current_state - Read PCI power state of given device from its
648 * PCI PM registers and cache it
649 * @dev: PCI device to handle.
650 * @state: State to cache in case the device doesn't have the PM capability
652 void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
657 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
658 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
660 dev->current_state = state;
665 * pci_platform_power_transition - Use platform to change device power state
666 * @dev: PCI device to handle.
667 * @state: State to put the device into.
669 static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
673 if (platform_pci_power_manageable(dev)) {
674 error = platform_pci_set_power_state(dev, state);
676 pci_update_current_state(dev, state);
677 /* Fall back to PCI_D0 if native PM is not supported */
679 dev->current_state = PCI_D0;
682 /* Fall back to PCI_D0 if native PM is not supported */
684 dev->current_state = PCI_D0;
691 * __pci_start_power_transition - Start power transition of a PCI device
692 * @dev: PCI device to handle.
693 * @state: State to put the device into.
695 static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
698 pci_platform_power_transition(dev, PCI_D0);
702 * __pci_complete_power_transition - Complete power transition of a PCI device
703 * @dev: PCI device to handle.
704 * @state: State to put the device into.
706 * This function should not be called directly by device drivers.
708 int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
710 return state >= PCI_D0 ?
711 pci_platform_power_transition(dev, state) : -EINVAL;
713 EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
716 * pci_set_power_state - Set the power state of a PCI device
717 * @dev: PCI device to handle.
718 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
720 * Transition a device to a new power state, using the platform firmware and/or
721 * the device's PCI PM registers.
724 * -EINVAL if the requested state is invalid.
725 * -EIO if device does not support PCI PM or its PM capabilities register has a
726 * wrong version, or device doesn't support the requested state.
727 * 0 if device already is in the requested state.
728 * 0 if device's power state has been successfully changed.
730 int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
734 /* bound the state we're entering */
735 if (state > PCI_D3hot)
737 else if (state < PCI_D0)
739 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
741 * If the device or the parent bridge do not support PCI PM,
742 * ignore the request if we're doing anything other than putting
743 * it into D0 (which would only happen on boot).
747 __pci_start_power_transition(dev, state);
749 /* This device is quirked not to be put into D3, so
750 don't put it in D3 */
751 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
754 error = pci_raw_set_power_state(dev, state);
756 if (!__pci_complete_power_transition(dev, state))
759 * When aspm_policy is "powersave" this call ensures
760 * that ASPM is configured.
762 if (!error && dev->bus->self)
763 pcie_aspm_powersave_config_link(dev->bus->self);
769 * pci_choose_state - Choose the power state of a PCI device
770 * @dev: PCI device to be suspended
771 * @state: target sleep state for the whole system. This is the value
772 * that is passed to suspend() function.
774 * Returns PCI power state suitable for given device and given system
778 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
782 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
785 ret = platform_pci_choose_state(dev);
786 if (ret != PCI_POWER_ERROR)
789 switch (state.event) {
792 case PM_EVENT_FREEZE:
793 case PM_EVENT_PRETHAW:
794 /* REVISIT both freeze and pre-thaw "should" use D0 */
795 case PM_EVENT_SUSPEND:
796 case PM_EVENT_HIBERNATE:
799 dev_info(&dev->dev, "unrecognized suspend event %d\n",
806 EXPORT_SYMBOL(pci_choose_state);
808 #define PCI_EXP_SAVE_REGS 7
810 #define pcie_cap_has_devctl(type, flags) 1
811 #define pcie_cap_has_lnkctl(type, flags) \
812 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
813 (type == PCI_EXP_TYPE_ROOT_PORT || \
814 type == PCI_EXP_TYPE_ENDPOINT || \
815 type == PCI_EXP_TYPE_LEG_END))
816 #define pcie_cap_has_sltctl(type, flags) \
817 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
818 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
819 (type == PCI_EXP_TYPE_DOWNSTREAM && \
820 (flags & PCI_EXP_FLAGS_SLOT))))
821 #define pcie_cap_has_rtctl(type, flags) \
822 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
823 (type == PCI_EXP_TYPE_ROOT_PORT || \
824 type == PCI_EXP_TYPE_RC_EC))
825 #define pcie_cap_has_devctl2(type, flags) \
826 ((flags & PCI_EXP_FLAGS_VERS) > 1)
827 #define pcie_cap_has_lnkctl2(type, flags) \
828 ((flags & PCI_EXP_FLAGS_VERS) > 1)
829 #define pcie_cap_has_sltctl2(type, flags) \
830 ((flags & PCI_EXP_FLAGS_VERS) > 1)
832 static struct pci_cap_saved_state *pci_find_saved_cap(
833 struct pci_dev *pci_dev, char cap)
835 struct pci_cap_saved_state *tmp;
836 struct hlist_node *pos;
838 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
839 if (tmp->cap.cap_nr == cap)
845 static int pci_save_pcie_state(struct pci_dev *dev)
848 struct pci_cap_saved_state *save_state;
852 pos = pci_pcie_cap(dev);
856 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
858 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
861 cap = (u16 *)&save_state->cap.data[0];
863 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
865 if (pcie_cap_has_devctl(dev->pcie_type, flags))
866 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
867 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
868 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
869 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
870 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
871 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
872 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
873 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
874 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
875 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
876 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
877 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
878 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
883 static void pci_restore_pcie_state(struct pci_dev *dev)
886 struct pci_cap_saved_state *save_state;
890 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
891 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
892 if (!save_state || pos <= 0)
894 cap = (u16 *)&save_state->cap.data[0];
896 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
898 if (pcie_cap_has_devctl(dev->pcie_type, flags))
899 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
900 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
901 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
902 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
903 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
904 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
905 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
906 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
907 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
908 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
909 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
910 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
911 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
915 static int pci_save_pcix_state(struct pci_dev *dev)
918 struct pci_cap_saved_state *save_state;
920 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
924 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
926 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
930 pci_read_config_word(dev, pos + PCI_X_CMD,
931 (u16 *)save_state->cap.data);
936 static void pci_restore_pcix_state(struct pci_dev *dev)
939 struct pci_cap_saved_state *save_state;
942 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
943 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
944 if (!save_state || pos <= 0)
946 cap = (u16 *)&save_state->cap.data[0];
948 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
953 * pci_save_state - save the PCI configuration space of a device before suspending
954 * @dev: - PCI device that we're dealing with
957 pci_save_state(struct pci_dev *dev)
960 /* XXX: 100% dword access ok here? */
961 for (i = 0; i < 16; i++)
962 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
963 dev->state_saved = true;
964 if ((i = pci_save_pcie_state(dev)) != 0)
966 if ((i = pci_save_pcix_state(dev)) != 0)
971 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
972 u32 saved_val, int retry)
976 pci_read_config_dword(pdev, offset, &val);
977 if (val == saved_val)
981 dev_dbg(&pdev->dev, "restoring config space at offset "
982 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
983 pci_write_config_dword(pdev, offset, saved_val);
987 pci_read_config_dword(pdev, offset, &val);
988 if (val == saved_val)
995 static void pci_restore_config_space_range(struct pci_dev *pdev,
996 int start, int end, int retry)
1000 for (index = end; index >= start; index--)
1001 pci_restore_config_dword(pdev, 4 * index,
1002 pdev->saved_config_space[index],
1006 static void pci_restore_config_space(struct pci_dev *pdev)
1008 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1009 pci_restore_config_space_range(pdev, 10, 15, 0);
1010 /* Restore BARs before the command register. */
1011 pci_restore_config_space_range(pdev, 4, 9, 10);
1012 pci_restore_config_space_range(pdev, 0, 3, 0);
1014 pci_restore_config_space_range(pdev, 0, 15, 0);
1019 * pci_restore_state - Restore the saved state of a PCI device
1020 * @dev: - PCI device that we're dealing with
1022 void pci_restore_state(struct pci_dev *dev)
1024 if (!dev->state_saved)
1027 /* PCI Express register must be restored first */
1028 pci_restore_pcie_state(dev);
1029 pci_restore_ats_state(dev);
1031 pci_restore_config_space(dev);
1033 pci_restore_pcix_state(dev);
1034 pci_restore_msi_state(dev);
1035 pci_restore_iov_state(dev);
1037 dev->state_saved = false;
1040 struct pci_saved_state {
1041 u32 config_space[16];
1042 struct pci_cap_saved_data cap[0];
1046 * pci_store_saved_state - Allocate and return an opaque struct containing
1047 * the device saved state.
1048 * @dev: PCI device that we're dealing with
1050 * Rerturn NULL if no state or error.
1052 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1054 struct pci_saved_state *state;
1055 struct pci_cap_saved_state *tmp;
1056 struct pci_cap_saved_data *cap;
1057 struct hlist_node *pos;
1060 if (!dev->state_saved)
1063 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1065 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1066 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1068 state = kzalloc(size, GFP_KERNEL);
1072 memcpy(state->config_space, dev->saved_config_space,
1073 sizeof(state->config_space));
1076 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1077 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1078 memcpy(cap, &tmp->cap, len);
1079 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1081 /* Empty cap_save terminates list */
1085 EXPORT_SYMBOL_GPL(pci_store_saved_state);
1088 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1089 * @dev: PCI device that we're dealing with
1090 * @state: Saved state returned from pci_store_saved_state()
1092 int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1094 struct pci_cap_saved_data *cap;
1096 dev->state_saved = false;
1101 memcpy(dev->saved_config_space, state->config_space,
1102 sizeof(state->config_space));
1106 struct pci_cap_saved_state *tmp;
1108 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1109 if (!tmp || tmp->cap.size != cap->size)
1112 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1113 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1114 sizeof(struct pci_cap_saved_data) + cap->size);
1117 dev->state_saved = true;
1120 EXPORT_SYMBOL_GPL(pci_load_saved_state);
1123 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1124 * and free the memory allocated for it.
1125 * @dev: PCI device that we're dealing with
1126 * @state: Pointer to saved state returned from pci_store_saved_state()
1128 int pci_load_and_free_saved_state(struct pci_dev *dev,
1129 struct pci_saved_state **state)
1131 int ret = pci_load_saved_state(dev, *state);
1136 EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1138 static int do_pci_enable_device(struct pci_dev *dev, int bars)
1142 err = pci_set_power_state(dev, PCI_D0);
1143 if (err < 0 && err != -EIO)
1145 err = pcibios_enable_device(dev, bars);
1148 pci_fixup_device(pci_fixup_enable, dev);
1154 * pci_reenable_device - Resume abandoned device
1155 * @dev: PCI device to be resumed
1157 * Note this function is a backend of pci_default_resume and is not supposed
1158 * to be called by normal code, write proper resume handler and use it instead.
1160 int pci_reenable_device(struct pci_dev *dev)
1162 if (pci_is_enabled(dev))
1163 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1167 static int __pci_enable_device_flags(struct pci_dev *dev,
1168 resource_size_t flags)
1174 * Power state could be unknown at this point, either due to a fresh
1175 * boot or a device removal call. So get the current power state
1176 * so that things like MSI message writing will behave as expected
1177 * (e.g. if the device really is in D0 at enable time).
1181 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1182 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1185 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1186 return 0; /* already enabled */
1188 /* only skip sriov related */
1189 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1190 if (dev->resource[i].flags & flags)
1192 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1193 if (dev->resource[i].flags & flags)
1196 err = do_pci_enable_device(dev, bars);
1198 atomic_dec(&dev->enable_cnt);
1203 * pci_enable_device_io - Initialize a device for use with IO space
1204 * @dev: PCI device to be initialized
1206 * Initialize device before it's used by a driver. Ask low-level code
1207 * to enable I/O resources. Wake up the device if it was suspended.
1208 * Beware, this function can fail.
1210 int pci_enable_device_io(struct pci_dev *dev)
1212 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1216 * pci_enable_device_mem - Initialize a device for use with Memory space
1217 * @dev: PCI device to be initialized
1219 * Initialize device before it's used by a driver. Ask low-level code
1220 * to enable Memory resources. Wake up the device if it was suspended.
1221 * Beware, this function can fail.
1223 int pci_enable_device_mem(struct pci_dev *dev)
1225 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1229 * pci_enable_device - Initialize device before it's used by a driver.
1230 * @dev: PCI device to be initialized
1232 * Initialize device before it's used by a driver. Ask low-level code
1233 * to enable I/O and memory. Wake up the device if it was suspended.
1234 * Beware, this function can fail.
1236 * Note we don't actually enable the device many times if we call
1237 * this function repeatedly (we just increment the count).
1239 int pci_enable_device(struct pci_dev *dev)
1241 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1245 * Managed PCI resources. This manages device on/off, intx/msi/msix
1246 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1247 * there's no need to track it separately. pci_devres is initialized
1248 * when a device is enabled using managed PCI device enable interface.
1251 unsigned int enabled:1;
1252 unsigned int pinned:1;
1253 unsigned int orig_intx:1;
1254 unsigned int restore_intx:1;
1258 static void pcim_release(struct device *gendev, void *res)
1260 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1261 struct pci_devres *this = res;
1264 if (dev->msi_enabled)
1265 pci_disable_msi(dev);
1266 if (dev->msix_enabled)
1267 pci_disable_msix(dev);
1269 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1270 if (this->region_mask & (1 << i))
1271 pci_release_region(dev, i);
1273 if (this->restore_intx)
1274 pci_intx(dev, this->orig_intx);
1276 if (this->enabled && !this->pinned)
1277 pci_disable_device(dev);
1280 static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1282 struct pci_devres *dr, *new_dr;
1284 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1288 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1291 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1294 static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1296 if (pci_is_managed(pdev))
1297 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1302 * pcim_enable_device - Managed pci_enable_device()
1303 * @pdev: PCI device to be initialized
1305 * Managed pci_enable_device().
1307 int pcim_enable_device(struct pci_dev *pdev)
1309 struct pci_devres *dr;
1312 dr = get_pci_dr(pdev);
1318 rc = pci_enable_device(pdev);
1320 pdev->is_managed = 1;
1327 * pcim_pin_device - Pin managed PCI device
1328 * @pdev: PCI device to pin
1330 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1331 * driver detach. @pdev must have been enabled with
1332 * pcim_enable_device().
1334 void pcim_pin_device(struct pci_dev *pdev)
1336 struct pci_devres *dr;
1338 dr = find_pci_dr(pdev);
1339 WARN_ON(!dr || !dr->enabled);
1345 * pcibios_disable_device - disable arch specific PCI resources for device dev
1346 * @dev: the PCI device to disable
1348 * Disables architecture specific PCI resources for the device. This
1349 * is the default implementation. Architecture implementations can
1352 void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1354 static void do_pci_disable_device(struct pci_dev *dev)
1358 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1359 if (pci_command & PCI_COMMAND_MASTER) {
1360 pci_command &= ~PCI_COMMAND_MASTER;
1361 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1364 pcibios_disable_device(dev);
1368 * pci_disable_enabled_device - Disable device without updating enable_cnt
1369 * @dev: PCI device to disable
1371 * NOTE: This function is a backend of PCI power management routines and is
1372 * not supposed to be called drivers.
1374 void pci_disable_enabled_device(struct pci_dev *dev)
1376 if (pci_is_enabled(dev))
1377 do_pci_disable_device(dev);
1381 * pci_disable_device - Disable PCI device after use
1382 * @dev: PCI device to be disabled
1384 * Signal to the system that the PCI device is not in use by the system
1385 * anymore. This only involves disabling PCI bus-mastering, if active.
1387 * Note we don't actually disable the device until all callers of
1388 * pci_enable_device() have called pci_disable_device().
1391 pci_disable_device(struct pci_dev *dev)
1393 struct pci_devres *dr;
1395 dr = find_pci_dr(dev);
1399 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1402 do_pci_disable_device(dev);
1404 dev->is_busmaster = 0;
1408 * pcibios_set_pcie_reset_state - set reset state for device dev
1409 * @dev: the PCIe device reset
1410 * @state: Reset state to enter into
1413 * Sets the PCIe reset state for the device. This is the default
1414 * implementation. Architecture implementations can override this.
1416 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1417 enum pcie_reset_state state)
1423 * pci_set_pcie_reset_state - set reset state for device dev
1424 * @dev: the PCIe device reset
1425 * @state: Reset state to enter into
1428 * Sets the PCI reset state for the device.
1430 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1432 return pcibios_set_pcie_reset_state(dev, state);
1436 * pci_check_pme_status - Check if given device has generated PME.
1437 * @dev: Device to check.
1439 * Check the PME status of the device and if set, clear it and clear PME enable
1440 * (if set). Return 'true' if PME status and PME enable were both set or
1441 * 'false' otherwise.
1443 bool pci_check_pme_status(struct pci_dev *dev)
1452 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1453 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1454 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1457 /* Clear PME status. */
1458 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1459 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1460 /* Disable PME to avoid interrupt flood. */
1461 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1465 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1471 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1472 * @dev: Device to handle.
1473 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1475 * Check if @dev has generated PME and queue a resume request for it in that
1478 static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1480 if (pme_poll_reset && dev->pme_poll)
1481 dev->pme_poll = false;
1483 if (pci_check_pme_status(dev)) {
1484 pci_wakeup_event(dev);
1485 pm_request_resume(&dev->dev);
1491 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1492 * @bus: Top bus of the subtree to walk.
1494 void pci_pme_wakeup_bus(struct pci_bus *bus)
1497 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1501 * pci_pme_capable - check the capability of PCI device to generate PME#
1502 * @dev: PCI device to handle.
1503 * @state: PCI state from which device will issue PME#.
1505 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1510 return !!(dev->pme_support & (1 << state));
1513 static void pci_pme_list_scan(struct work_struct *work)
1515 struct pci_pme_device *pme_dev, *n;
1517 mutex_lock(&pci_pme_list_mutex);
1518 if (!list_empty(&pci_pme_list)) {
1519 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1520 if (pme_dev->dev->pme_poll) {
1521 pci_pme_wakeup(pme_dev->dev, NULL);
1523 list_del(&pme_dev->list);
1527 if (!list_empty(&pci_pme_list))
1528 schedule_delayed_work(&pci_pme_work,
1529 msecs_to_jiffies(PME_TIMEOUT));
1531 mutex_unlock(&pci_pme_list_mutex);
1535 * pci_pme_active - enable or disable PCI device's PME# function
1536 * @dev: PCI device to handle.
1537 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1539 * The caller must verify that the device is capable of generating PME# before
1540 * calling this function with @enable equal to 'true'.
1542 void pci_pme_active(struct pci_dev *dev, bool enable)
1549 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1550 /* Clear PME_Status by writing 1 to it and enable PME# */
1551 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1553 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1555 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1557 /* PCI (as opposed to PCIe) PME requires that the device have
1558 its PME# line hooked up correctly. Not all hardware vendors
1559 do this, so the PME never gets delivered and the device
1560 remains asleep. The easiest way around this is to
1561 periodically walk the list of suspended devices and check
1562 whether any have their PME flag set. The assumption is that
1563 we'll wake up often enough anyway that this won't be a huge
1564 hit, and the power savings from the devices will still be a
1567 if (dev->pme_poll) {
1568 struct pci_pme_device *pme_dev;
1570 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1575 mutex_lock(&pci_pme_list_mutex);
1576 list_add(&pme_dev->list, &pci_pme_list);
1577 if (list_is_singular(&pci_pme_list))
1578 schedule_delayed_work(&pci_pme_work,
1579 msecs_to_jiffies(PME_TIMEOUT));
1580 mutex_unlock(&pci_pme_list_mutex);
1582 mutex_lock(&pci_pme_list_mutex);
1583 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1584 if (pme_dev->dev == dev) {
1585 list_del(&pme_dev->list);
1590 mutex_unlock(&pci_pme_list_mutex);
1595 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1599 * __pci_enable_wake - enable PCI device as wakeup event source
1600 * @dev: PCI device affected
1601 * @state: PCI state from which device will issue wakeup events
1602 * @runtime: True if the events are to be generated at run time
1603 * @enable: True to enable event generation; false to disable
1605 * This enables the device as a wakeup event source, or disables it.
1606 * When such events involves platform-specific hooks, those hooks are
1607 * called automatically by this routine.
1609 * Devices with legacy power management (no standard PCI PM capabilities)
1610 * always require such platform hooks.
1613 * 0 is returned on success
1614 * -EINVAL is returned if device is not supposed to wake up the system
1615 * Error code depending on the platform is returned if both the platform and
1616 * the native mechanism fail to enable the generation of wake-up events
1618 int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1619 bool runtime, bool enable)
1623 if (enable && !runtime && !device_may_wakeup(&dev->dev))
1626 /* Don't do the same thing twice in a row for one device. */
1627 if (!!enable == !!dev->wakeup_prepared)
1631 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1632 * Anderson we should be doing PME# wake enable followed by ACPI wake
1633 * enable. To disable wake-up we call the platform first, for symmetry.
1639 if (pci_pme_capable(dev, state))
1640 pci_pme_active(dev, true);
1643 error = runtime ? platform_pci_run_wake(dev, true) :
1644 platform_pci_sleep_wake(dev, true);
1648 dev->wakeup_prepared = true;
1651 platform_pci_run_wake(dev, false);
1653 platform_pci_sleep_wake(dev, false);
1654 pci_pme_active(dev, false);
1655 dev->wakeup_prepared = false;
1660 EXPORT_SYMBOL(__pci_enable_wake);
1663 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1664 * @dev: PCI device to prepare
1665 * @enable: True to enable wake-up event generation; false to disable
1667 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1668 * and this function allows them to set that up cleanly - pci_enable_wake()
1669 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1670 * ordering constraints.
1672 * This function only returns error code if the device is not capable of
1673 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1674 * enable wake-up power for it.
1676 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1678 return pci_pme_capable(dev, PCI_D3cold) ?
1679 pci_enable_wake(dev, PCI_D3cold, enable) :
1680 pci_enable_wake(dev, PCI_D3hot, enable);
1684 * pci_target_state - find an appropriate low power state for a given PCI dev
1687 * Use underlying platform code to find a supported low power state for @dev.
1688 * If the platform can't manage @dev, return the deepest state from which it
1689 * can generate wake events, based on any available PME info.
1691 pci_power_t pci_target_state(struct pci_dev *dev)
1693 pci_power_t target_state = PCI_D3hot;
1695 if (platform_pci_power_manageable(dev)) {
1697 * Call the platform to choose the target state of the device
1698 * and enable wake-up from this state if supported.
1700 pci_power_t state = platform_pci_choose_state(dev);
1703 case PCI_POWER_ERROR:
1708 if (pci_no_d1d2(dev))
1711 target_state = state;
1713 } else if (!dev->pm_cap) {
1714 target_state = PCI_D0;
1715 } else if (device_may_wakeup(&dev->dev)) {
1717 * Find the deepest state from which the device can generate
1718 * wake-up events, make it the target state and enable device
1721 if (dev->pme_support) {
1723 && !(dev->pme_support & (1 << target_state)))
1728 return target_state;
1732 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1733 * @dev: Device to handle.
1735 * Choose the power state appropriate for the device depending on whether
1736 * it can wake up the system and/or is power manageable by the platform
1737 * (PCI_D3hot is the default) and put the device into that state.
1739 int pci_prepare_to_sleep(struct pci_dev *dev)
1741 pci_power_t target_state = pci_target_state(dev);
1744 if (target_state == PCI_POWER_ERROR)
1747 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1749 error = pci_set_power_state(dev, target_state);
1752 pci_enable_wake(dev, target_state, false);
1758 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1759 * @dev: Device to handle.
1761 * Disable device's system wake-up capability and put it into D0.
1763 int pci_back_from_sleep(struct pci_dev *dev)
1765 pci_enable_wake(dev, PCI_D0, false);
1766 return pci_set_power_state(dev, PCI_D0);
1770 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1771 * @dev: PCI device being suspended.
1773 * Prepare @dev to generate wake-up events at run time and put it into a low
1776 int pci_finish_runtime_suspend(struct pci_dev *dev)
1778 pci_power_t target_state = pci_target_state(dev);
1781 if (target_state == PCI_POWER_ERROR)
1784 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1786 error = pci_set_power_state(dev, target_state);
1789 __pci_enable_wake(dev, target_state, true, false);
1795 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1796 * @dev: Device to check.
1798 * Return true if the device itself is cabable of generating wake-up events
1799 * (through the platform or using the native PCIe PME) or if the device supports
1800 * PME and one of its upstream bridges can generate wake-up events.
1802 bool pci_dev_run_wake(struct pci_dev *dev)
1804 struct pci_bus *bus = dev->bus;
1806 if (device_run_wake(&dev->dev))
1809 if (!dev->pme_support)
1812 while (bus->parent) {
1813 struct pci_dev *bridge = bus->self;
1815 if (device_run_wake(&bridge->dev))
1821 /* We have reached the root bus. */
1823 return device_run_wake(bus->bridge);
1827 EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1830 * pci_pm_init - Initialize PM functions of given PCI device
1831 * @dev: PCI device to handle.
1833 void pci_pm_init(struct pci_dev *dev)
1838 pm_runtime_forbid(&dev->dev);
1839 device_enable_async_suspend(&dev->dev);
1840 dev->wakeup_prepared = false;
1844 /* find PCI PM capability in list */
1845 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1848 /* Check device's ability to generate PME# */
1849 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1851 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1852 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1853 pmc & PCI_PM_CAP_VER_MASK);
1858 dev->d3_delay = PCI_PM_D3_WAIT;
1860 dev->d1_support = false;
1861 dev->d2_support = false;
1862 if (!pci_no_d1d2(dev)) {
1863 if (pmc & PCI_PM_CAP_D1)
1864 dev->d1_support = true;
1865 if (pmc & PCI_PM_CAP_D2)
1866 dev->d2_support = true;
1868 if (dev->d1_support || dev->d2_support)
1869 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1870 dev->d1_support ? " D1" : "",
1871 dev->d2_support ? " D2" : "");
1874 pmc &= PCI_PM_CAP_PME_MASK;
1876 dev_printk(KERN_DEBUG, &dev->dev,
1877 "PME# supported from%s%s%s%s%s\n",
1878 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1879 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1880 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1881 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1882 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1883 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1884 dev->pme_poll = true;
1886 * Make device's PM flags reflect the wake-up capability, but
1887 * let the user space enable it to wake up the system as needed.
1889 device_set_wakeup_capable(&dev->dev, true);
1890 /* Disable the PME# generation functionality */
1891 pci_pme_active(dev, false);
1893 dev->pme_support = 0;
1898 * platform_pci_wakeup_init - init platform wakeup if present
1901 * Some devices don't have PCI PM caps but can still generate wakeup
1902 * events through platform methods (like ACPI events). If @dev supports
1903 * platform wakeup events, set the device flag to indicate as much. This
1904 * may be redundant if the device also supports PCI PM caps, but double
1905 * initialization should be safe in that case.
1907 void platform_pci_wakeup_init(struct pci_dev *dev)
1909 if (!platform_pci_can_wakeup(dev))
1912 device_set_wakeup_capable(&dev->dev, true);
1913 platform_pci_sleep_wake(dev, false);
1916 static void pci_add_saved_cap(struct pci_dev *pci_dev,
1917 struct pci_cap_saved_state *new_cap)
1919 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1923 * pci_add_save_buffer - allocate buffer for saving given capability registers
1924 * @dev: the PCI device
1925 * @cap: the capability to allocate the buffer for
1926 * @size: requested size of the buffer
1928 static int pci_add_cap_save_buffer(
1929 struct pci_dev *dev, char cap, unsigned int size)
1932 struct pci_cap_saved_state *save_state;
1934 pos = pci_find_capability(dev, cap);
1938 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1942 save_state->cap.cap_nr = cap;
1943 save_state->cap.size = size;
1944 pci_add_saved_cap(dev, save_state);
1950 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1951 * @dev: the PCI device
1953 void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1957 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1958 PCI_EXP_SAVE_REGS * sizeof(u16));
1961 "unable to preallocate PCI Express save buffer\n");
1963 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1966 "unable to preallocate PCI-X save buffer\n");
1969 void pci_free_cap_save_buffers(struct pci_dev *dev)
1971 struct pci_cap_saved_state *tmp;
1972 struct hlist_node *pos, *n;
1974 hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
1979 * pci_enable_ari - enable ARI forwarding if hardware support it
1980 * @dev: the PCI device
1982 void pci_enable_ari(struct pci_dev *dev)
1987 struct pci_dev *bridge;
1989 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
1992 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1996 bridge = dev->bus->self;
1997 if (!bridge || !pci_is_pcie(bridge))
2000 pos = pci_pcie_cap(bridge);
2004 /* ARI is a PCIe v2 feature */
2005 pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
2006 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
2009 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
2010 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2013 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
2014 ctrl |= PCI_EXP_DEVCTL2_ARI;
2015 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
2017 bridge->ari_enabled = 1;
2021 * pci_enable_ido - enable ID-based ordering on a device
2022 * @dev: the PCI device
2023 * @type: which types of IDO to enable
2025 * Enable ID-based ordering on @dev. @type can contain the bits
2026 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2027 * which types of transactions are allowed to be re-ordered.
2029 void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2034 pos = pci_pcie_cap(dev);
2038 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2039 if (type & PCI_EXP_IDO_REQUEST)
2040 ctrl |= PCI_EXP_IDO_REQ_EN;
2041 if (type & PCI_EXP_IDO_COMPLETION)
2042 ctrl |= PCI_EXP_IDO_CMP_EN;
2043 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2045 EXPORT_SYMBOL(pci_enable_ido);
2048 * pci_disable_ido - disable ID-based ordering on a device
2049 * @dev: the PCI device
2050 * @type: which types of IDO to disable
2052 void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2057 if (!pci_is_pcie(dev))
2060 pos = pci_pcie_cap(dev);
2064 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2065 if (type & PCI_EXP_IDO_REQUEST)
2066 ctrl &= ~PCI_EXP_IDO_REQ_EN;
2067 if (type & PCI_EXP_IDO_COMPLETION)
2068 ctrl &= ~PCI_EXP_IDO_CMP_EN;
2069 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2071 EXPORT_SYMBOL(pci_disable_ido);
2074 * pci_enable_obff - enable optimized buffer flush/fill
2076 * @type: type of signaling to use
2078 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2079 * signaling if possible, falling back to message signaling only if
2080 * WAKE# isn't supported. @type should indicate whether the PCIe link
2081 * be brought out of L0s or L1 to send the message. It should be either
2082 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2084 * If your device can benefit from receiving all messages, even at the
2085 * power cost of bringing the link back up from a low power state, use
2086 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2090 * Zero on success, appropriate error number on failure.
2092 int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2099 if (!pci_is_pcie(dev))
2102 pos = pci_pcie_cap(dev);
2106 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2107 if (!(cap & PCI_EXP_OBFF_MASK))
2108 return -ENOTSUPP; /* no OBFF support at all */
2110 /* Make sure the topology supports OBFF as well */
2112 ret = pci_enable_obff(dev->bus->self, type);
2117 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2118 if (cap & PCI_EXP_OBFF_WAKE)
2119 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2122 case PCI_EXP_OBFF_SIGNAL_L0:
2123 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2124 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2126 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2127 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2128 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2131 WARN(1, "bad OBFF signal type\n");
2135 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2139 EXPORT_SYMBOL(pci_enable_obff);
2142 * pci_disable_obff - disable optimized buffer flush/fill
2145 * Disable OBFF on @dev.
2147 void pci_disable_obff(struct pci_dev *dev)
2152 if (!pci_is_pcie(dev))
2155 pos = pci_pcie_cap(dev);
2159 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2160 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2161 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2163 EXPORT_SYMBOL(pci_disable_obff);
2166 * pci_ltr_supported - check whether a device supports LTR
2170 * True if @dev supports latency tolerance reporting, false otherwise.
2172 bool pci_ltr_supported(struct pci_dev *dev)
2177 if (!pci_is_pcie(dev))
2180 pos = pci_pcie_cap(dev);
2184 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2186 return cap & PCI_EXP_DEVCAP2_LTR;
2188 EXPORT_SYMBOL(pci_ltr_supported);
2191 * pci_enable_ltr - enable latency tolerance reporting
2194 * Enable LTR on @dev if possible, which means enabling it first on
2198 * Zero on success, errno on failure.
2200 int pci_enable_ltr(struct pci_dev *dev)
2206 if (!pci_ltr_supported(dev))
2209 pos = pci_pcie_cap(dev);
2213 /* Only primary function can enable/disable LTR */
2214 if (PCI_FUNC(dev->devfn) != 0)
2217 /* Enable upstream ports first */
2219 ret = pci_enable_ltr(dev->bus->self);
2224 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2225 ctrl |= PCI_EXP_LTR_EN;
2226 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2230 EXPORT_SYMBOL(pci_enable_ltr);
2233 * pci_disable_ltr - disable latency tolerance reporting
2236 void pci_disable_ltr(struct pci_dev *dev)
2241 if (!pci_ltr_supported(dev))
2244 pos = pci_pcie_cap(dev);
2248 /* Only primary function can enable/disable LTR */
2249 if (PCI_FUNC(dev->devfn) != 0)
2252 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2253 ctrl &= ~PCI_EXP_LTR_EN;
2254 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2256 EXPORT_SYMBOL(pci_disable_ltr);
2258 static int __pci_ltr_scale(int *val)
2262 while (*val > 1023) {
2263 *val = (*val + 31) / 32;
2270 * pci_set_ltr - set LTR latency values
2272 * @snoop_lat_ns: snoop latency in nanoseconds
2273 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2275 * Figure out the scale and set the LTR values accordingly.
2277 int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2279 int pos, ret, snoop_scale, nosnoop_scale;
2282 if (!pci_ltr_supported(dev))
2285 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2286 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2288 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2289 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2292 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2293 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2296 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2300 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2301 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2305 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2306 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2312 EXPORT_SYMBOL(pci_set_ltr);
2314 static int pci_acs_enable;
2317 * pci_request_acs - ask for ACS to be enabled if supported
2319 void pci_request_acs(void)
2325 * pci_enable_acs - enable ACS if hardware support it
2326 * @dev: the PCI device
2328 void pci_enable_acs(struct pci_dev *dev)
2334 if (!pci_acs_enable)
2337 if (!pci_is_pcie(dev))
2340 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2344 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2345 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2347 /* Source Validation */
2348 ctrl |= (cap & PCI_ACS_SV);
2350 /* P2P Request Redirect */
2351 ctrl |= (cap & PCI_ACS_RR);
2353 /* P2P Completion Redirect */
2354 ctrl |= (cap & PCI_ACS_CR);
2356 /* Upstream Forwarding */
2357 ctrl |= (cap & PCI_ACS_UF);
2359 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2363 * pci_acs_enabled - test ACS against required flags for a given device
2364 * @pdev: device to test
2365 * @acs_flags: required PCI ACS flags
2367 * Return true if the device supports the provided flags. Automatically
2368 * filters out flags that are not implemented on multifunction devices.
2370 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2375 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2379 if (!pci_is_pcie(pdev))
2382 /* Filter out flags not applicable to multifunction */
2383 if (pdev->multifunction)
2384 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
2385 PCI_ACS_EC | PCI_ACS_DT);
2387 if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM ||
2388 pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
2389 pdev->multifunction) {
2390 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2394 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2395 if ((ctrl & acs_flags) != acs_flags)
2403 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2404 * @start: starting downstream device
2405 * @end: ending upstream device or NULL to search to the root bus
2406 * @acs_flags: required flags
2408 * Walk up a device tree from start to end testing PCI ACS support. If
2409 * any step along the way does not support the required flags, return false.
2411 bool pci_acs_path_enabled(struct pci_dev *start,
2412 struct pci_dev *end, u16 acs_flags)
2414 struct pci_dev *pdev, *parent = start;
2419 if (!pci_acs_enabled(pdev, acs_flags))
2422 if (pci_is_root_bus(pdev->bus))
2423 return (end == NULL);
2425 parent = pdev->bus->self;
2426 } while (pdev != end);
2432 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2433 * @dev: the PCI device
2434 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2436 * Perform INTx swizzling for a device behind one level of bridge. This is
2437 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2438 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2439 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2440 * the PCI Express Base Specification, Revision 2.1)
2442 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2446 if (pci_ari_enabled(dev->bus))
2449 slot = PCI_SLOT(dev->devfn);
2451 return (((pin - 1) + slot) % 4) + 1;
2455 pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2463 while (!pci_is_root_bus(dev->bus)) {
2464 pin = pci_swizzle_interrupt_pin(dev, pin);
2465 dev = dev->bus->self;
2472 * pci_common_swizzle - swizzle INTx all the way to root bridge
2473 * @dev: the PCI device
2474 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2476 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2477 * bridges all the way up to a PCI root bus.
2479 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2483 while (!pci_is_root_bus(dev->bus)) {
2484 pin = pci_swizzle_interrupt_pin(dev, pin);
2485 dev = dev->bus->self;
2488 return PCI_SLOT(dev->devfn);
2492 * pci_release_region - Release a PCI bar
2493 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2494 * @bar: BAR to release
2496 * Releases the PCI I/O and memory resources previously reserved by a
2497 * successful call to pci_request_region. Call this function only
2498 * after all use of the PCI regions has ceased.
2500 void pci_release_region(struct pci_dev *pdev, int bar)
2502 struct pci_devres *dr;
2504 if (pci_resource_len(pdev, bar) == 0)
2506 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2507 release_region(pci_resource_start(pdev, bar),
2508 pci_resource_len(pdev, bar));
2509 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2510 release_mem_region(pci_resource_start(pdev, bar),
2511 pci_resource_len(pdev, bar));
2513 dr = find_pci_dr(pdev);
2515 dr->region_mask &= ~(1 << bar);
2519 * __pci_request_region - Reserved PCI I/O and memory resource
2520 * @pdev: PCI device whose resources are to be reserved
2521 * @bar: BAR to be reserved
2522 * @res_name: Name to be associated with resource.
2523 * @exclusive: whether the region access is exclusive or not
2525 * Mark the PCI region associated with PCI device @pdev BR @bar as
2526 * being reserved by owner @res_name. Do not access any
2527 * address inside the PCI regions unless this call returns
2530 * If @exclusive is set, then the region is marked so that userspace
2531 * is explicitly not allowed to map the resource via /dev/mem or
2532 * sysfs MMIO access.
2534 * Returns 0 on success, or %EBUSY on error. A warning
2535 * message is also printed on failure.
2537 static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2540 struct pci_devres *dr;
2542 if (pci_resource_len(pdev, bar) == 0)
2545 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2546 if (!request_region(pci_resource_start(pdev, bar),
2547 pci_resource_len(pdev, bar), res_name))
2550 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2551 if (!__request_mem_region(pci_resource_start(pdev, bar),
2552 pci_resource_len(pdev, bar), res_name,
2557 dr = find_pci_dr(pdev);
2559 dr->region_mask |= 1 << bar;
2564 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2565 &pdev->resource[bar]);
2570 * pci_request_region - Reserve PCI I/O and memory resource
2571 * @pdev: PCI device whose resources are to be reserved
2572 * @bar: BAR to be reserved
2573 * @res_name: Name to be associated with resource
2575 * Mark the PCI region associated with PCI device @pdev BAR @bar as
2576 * being reserved by owner @res_name. Do not access any
2577 * address inside the PCI regions unless this call returns
2580 * Returns 0 on success, or %EBUSY on error. A warning
2581 * message is also printed on failure.
2583 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2585 return __pci_request_region(pdev, bar, res_name, 0);
2589 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2590 * @pdev: PCI device whose resources are to be reserved
2591 * @bar: BAR to be reserved
2592 * @res_name: Name to be associated with resource.
2594 * Mark the PCI region associated with PCI device @pdev BR @bar as
2595 * being reserved by owner @res_name. Do not access any
2596 * address inside the PCI regions unless this call returns
2599 * Returns 0 on success, or %EBUSY on error. A warning
2600 * message is also printed on failure.
2602 * The key difference that _exclusive makes it that userspace is
2603 * explicitly not allowed to map the resource via /dev/mem or
2606 int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2608 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2611 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2612 * @pdev: PCI device whose resources were previously reserved
2613 * @bars: Bitmask of BARs to be released
2615 * Release selected PCI I/O and memory resources previously reserved.
2616 * Call this function only after all use of the PCI regions has ceased.
2618 void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2622 for (i = 0; i < 6; i++)
2623 if (bars & (1 << i))
2624 pci_release_region(pdev, i);
2627 int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2628 const char *res_name, int excl)
2632 for (i = 0; i < 6; i++)
2633 if (bars & (1 << i))
2634 if (__pci_request_region(pdev, i, res_name, excl))
2640 if (bars & (1 << i))
2641 pci_release_region(pdev, i);
2648 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2649 * @pdev: PCI device whose resources are to be reserved
2650 * @bars: Bitmask of BARs to be requested
2651 * @res_name: Name to be associated with resource
2653 int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2654 const char *res_name)
2656 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2659 int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2660 int bars, const char *res_name)
2662 return __pci_request_selected_regions(pdev, bars, res_name,
2663 IORESOURCE_EXCLUSIVE);
2667 * pci_release_regions - Release reserved PCI I/O and memory resources
2668 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2670 * Releases all PCI I/O and memory resources previously reserved by a
2671 * successful call to pci_request_regions. Call this function only
2672 * after all use of the PCI regions has ceased.
2675 void pci_release_regions(struct pci_dev *pdev)
2677 pci_release_selected_regions(pdev, (1 << 6) - 1);
2681 * pci_request_regions - Reserved PCI I/O and memory resources
2682 * @pdev: PCI device whose resources are to be reserved
2683 * @res_name: Name to be associated with resource.
2685 * Mark all PCI regions associated with PCI device @pdev as
2686 * being reserved by owner @res_name. Do not access any
2687 * address inside the PCI regions unless this call returns
2690 * Returns 0 on success, or %EBUSY on error. A warning
2691 * message is also printed on failure.
2693 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2695 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2699 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2700 * @pdev: PCI device whose resources are to be reserved
2701 * @res_name: Name to be associated with resource.
2703 * Mark all PCI regions associated with PCI device @pdev as
2704 * being reserved by owner @res_name. Do not access any
2705 * address inside the PCI regions unless this call returns
2708 * pci_request_regions_exclusive() will mark the region so that
2709 * /dev/mem and the sysfs MMIO access will not be allowed.
2711 * Returns 0 on success, or %EBUSY on error. A warning
2712 * message is also printed on failure.
2714 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2716 return pci_request_selected_regions_exclusive(pdev,
2717 ((1 << 6) - 1), res_name);
2720 static void __pci_set_master(struct pci_dev *dev, bool enable)
2724 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2726 cmd = old_cmd | PCI_COMMAND_MASTER;
2728 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2729 if (cmd != old_cmd) {
2730 dev_dbg(&dev->dev, "%s bus mastering\n",
2731 enable ? "enabling" : "disabling");
2732 pci_write_config_word(dev, PCI_COMMAND, cmd);
2734 dev->is_busmaster = enable;
2738 * pcibios_set_master - enable PCI bus-mastering for device dev
2739 * @dev: the PCI device to enable
2741 * Enables PCI bus-mastering for the device. This is the default
2742 * implementation. Architecture specific implementations can override
2743 * this if necessary.
2745 void __weak pcibios_set_master(struct pci_dev *dev)
2749 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2750 if (pci_is_pcie(dev))
2753 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2755 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2756 else if (lat > pcibios_max_latency)
2757 lat = pcibios_max_latency;
2760 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2761 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2765 * pci_set_master - enables bus-mastering for device dev
2766 * @dev: the PCI device to enable
2768 * Enables bus-mastering on the device and calls pcibios_set_master()
2769 * to do the needed arch specific settings.
2771 void pci_set_master(struct pci_dev *dev)
2773 __pci_set_master(dev, true);
2774 pcibios_set_master(dev);
2778 * pci_clear_master - disables bus-mastering for device dev
2779 * @dev: the PCI device to disable
2781 void pci_clear_master(struct pci_dev *dev)
2783 __pci_set_master(dev, false);
2787 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2788 * @dev: the PCI device for which MWI is to be enabled
2790 * Helper function for pci_set_mwi.
2791 * Originally copied from drivers/net/acenic.c.
2792 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2794 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2796 int pci_set_cacheline_size(struct pci_dev *dev)
2800 if (!pci_cache_line_size)
2803 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2804 equal to or multiple of the right value. */
2805 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2806 if (cacheline_size >= pci_cache_line_size &&
2807 (cacheline_size % pci_cache_line_size) == 0)
2810 /* Write the correct value. */
2811 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2813 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2814 if (cacheline_size == pci_cache_line_size)
2817 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2818 "supported\n", pci_cache_line_size << 2);
2822 EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2824 #ifdef PCI_DISABLE_MWI
2825 int pci_set_mwi(struct pci_dev *dev)
2830 int pci_try_set_mwi(struct pci_dev *dev)
2835 void pci_clear_mwi(struct pci_dev *dev)
2842 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2843 * @dev: the PCI device for which MWI is enabled
2845 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2847 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2850 pci_set_mwi(struct pci_dev *dev)
2855 rc = pci_set_cacheline_size(dev);
2859 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2860 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2861 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2862 cmd |= PCI_COMMAND_INVALIDATE;
2863 pci_write_config_word(dev, PCI_COMMAND, cmd);
2870 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2871 * @dev: the PCI device for which MWI is enabled
2873 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2874 * Callers are not required to check the return value.
2876 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2878 int pci_try_set_mwi(struct pci_dev *dev)
2880 int rc = pci_set_mwi(dev);
2885 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2886 * @dev: the PCI device to disable
2888 * Disables PCI Memory-Write-Invalidate transaction on the device
2891 pci_clear_mwi(struct pci_dev *dev)
2895 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2896 if (cmd & PCI_COMMAND_INVALIDATE) {
2897 cmd &= ~PCI_COMMAND_INVALIDATE;
2898 pci_write_config_word(dev, PCI_COMMAND, cmd);
2901 #endif /* ! PCI_DISABLE_MWI */
2904 * pci_intx - enables/disables PCI INTx for device dev
2905 * @pdev: the PCI device to operate on
2906 * @enable: boolean: whether to enable or disable PCI INTx
2908 * Enables/disables PCI INTx for device dev
2911 pci_intx(struct pci_dev *pdev, int enable)
2913 u16 pci_command, new;
2915 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2918 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2920 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2923 if (new != pci_command) {
2924 struct pci_devres *dr;
2926 pci_write_config_word(pdev, PCI_COMMAND, new);
2928 dr = find_pci_dr(pdev);
2929 if (dr && !dr->restore_intx) {
2930 dr->restore_intx = 1;
2931 dr->orig_intx = !enable;
2937 * pci_intx_mask_supported - probe for INTx masking support
2938 * @dev: the PCI device to operate on
2940 * Check if the device dev support INTx masking via the config space
2943 bool pci_intx_mask_supported(struct pci_dev *dev)
2945 bool mask_supported = false;
2948 pci_cfg_access_lock(dev);
2950 pci_read_config_word(dev, PCI_COMMAND, &orig);
2951 pci_write_config_word(dev, PCI_COMMAND,
2952 orig ^ PCI_COMMAND_INTX_DISABLE);
2953 pci_read_config_word(dev, PCI_COMMAND, &new);
2956 * There's no way to protect against hardware bugs or detect them
2957 * reliably, but as long as we know what the value should be, let's
2958 * go ahead and check it.
2960 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2961 dev_err(&dev->dev, "Command register changed from "
2962 "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2963 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2964 mask_supported = true;
2965 pci_write_config_word(dev, PCI_COMMAND, orig);
2968 pci_cfg_access_unlock(dev);
2969 return mask_supported;
2971 EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2973 static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2975 struct pci_bus *bus = dev->bus;
2976 bool mask_updated = true;
2977 u32 cmd_status_dword;
2978 u16 origcmd, newcmd;
2979 unsigned long flags;
2983 * We do a single dword read to retrieve both command and status.
2984 * Document assumptions that make this possible.
2986 BUILD_BUG_ON(PCI_COMMAND % 4);
2987 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2989 raw_spin_lock_irqsave(&pci_lock, flags);
2991 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2993 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2996 * Check interrupt status register to see whether our device
2997 * triggered the interrupt (when masking) or the next IRQ is
2998 * already pending (when unmasking).
3000 if (mask != irq_pending) {
3001 mask_updated = false;
3005 origcmd = cmd_status_dword;
3006 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3008 newcmd |= PCI_COMMAND_INTX_DISABLE;
3009 if (newcmd != origcmd)
3010 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3013 raw_spin_unlock_irqrestore(&pci_lock, flags);
3015 return mask_updated;
3019 * pci_check_and_mask_intx - mask INTx on pending interrupt
3020 * @dev: the PCI device to operate on
3022 * Check if the device dev has its INTx line asserted, mask it and
3023 * return true in that case. False is returned if not interrupt was
3026 bool pci_check_and_mask_intx(struct pci_dev *dev)
3028 return pci_check_and_set_intx_mask(dev, true);
3030 EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3033 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
3034 * @dev: the PCI device to operate on
3036 * Check if the device dev has its INTx line asserted, unmask it if not
3037 * and return true. False is returned and the mask remains active if
3038 * there was still an interrupt pending.
3040 bool pci_check_and_unmask_intx(struct pci_dev *dev)
3042 return pci_check_and_set_intx_mask(dev, false);
3044 EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3047 * pci_msi_off - disables any msi or msix capabilities
3048 * @dev: the PCI device to operate on
3050 * If you want to use msi see pci_enable_msi and friends.
3051 * This is a lower level primitive that allows us to disable
3052 * msi operation at the device level.
3054 void pci_msi_off(struct pci_dev *dev)
3059 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
3061 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
3062 control &= ~PCI_MSI_FLAGS_ENABLE;
3063 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3065 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3067 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3068 control &= ~PCI_MSIX_FLAGS_ENABLE;
3069 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3072 EXPORT_SYMBOL_GPL(pci_msi_off);
3074 int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3076 return dma_set_max_seg_size(&dev->dev, size);
3078 EXPORT_SYMBOL(pci_set_dma_max_seg_size);
3080 int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3082 return dma_set_seg_boundary(&dev->dev, mask);
3084 EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3086 static int pcie_flr(struct pci_dev *dev, int probe)
3091 u16 status, control;
3093 pos = pci_pcie_cap(dev);
3097 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
3098 if (!(cap & PCI_EXP_DEVCAP_FLR))
3104 /* Wait for Transaction Pending bit clean */
3105 for (i = 0; i < 4; i++) {
3107 msleep((1 << (i - 1)) * 100);
3109 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3110 if (!(status & PCI_EXP_DEVSTA_TRPND))
3114 dev_err(&dev->dev, "transaction is not cleared; "
3115 "proceeding with reset anyway\n");
3118 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3119 control |= PCI_EXP_DEVCTL_BCR_FLR;
3120 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3127 static int pci_af_flr(struct pci_dev *dev, int probe)
3134 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3138 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3139 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3145 /* Wait for Transaction Pending bit clean */
3146 for (i = 0; i < 4; i++) {
3148 msleep((1 << (i - 1)) * 100);
3150 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3151 if (!(status & PCI_AF_STATUS_TP))
3155 dev_err(&dev->dev, "transaction is not cleared; "
3156 "proceeding with reset anyway\n");
3159 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3166 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3167 * @dev: Device to reset.
3168 * @probe: If set, only check if the device can be reset this way.
3170 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3171 * unset, it will be reinitialized internally when going from PCI_D3hot to
3172 * PCI_D0. If that's the case and the device is not in a low-power state
3173 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3175 * NOTE: This causes the caller to sleep for twice the device power transition
3176 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3177 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3178 * Moreover, only devices in D0 can be reset by this function.
3180 static int pci_pm_reset(struct pci_dev *dev, int probe)
3187 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3188 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3194 if (dev->current_state != PCI_D0)
3197 csr &= ~PCI_PM_CTRL_STATE_MASK;
3199 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3200 pci_dev_d3_sleep(dev);
3202 csr &= ~PCI_PM_CTRL_STATE_MASK;
3204 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3205 pci_dev_d3_sleep(dev);
3210 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3213 struct pci_dev *pdev;
3215 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
3218 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3225 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3226 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3227 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3230 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3231 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3237 static int __pci_dev_reset(struct pci_dev *dev, int probe)
3243 rc = pci_dev_specific_reset(dev, probe);
3247 rc = pcie_flr(dev, probe);
3251 rc = pci_af_flr(dev, probe);
3255 rc = pci_pm_reset(dev, probe);
3259 rc = pci_parent_bus_reset(dev, probe);
3264 static int pci_dev_reset(struct pci_dev *dev, int probe)
3269 pci_cfg_access_lock(dev);
3270 /* block PM suspend, driver probe, etc. */
3271 device_lock(&dev->dev);
3274 rc = __pci_dev_reset(dev, probe);
3277 device_unlock(&dev->dev);
3278 pci_cfg_access_unlock(dev);
3283 * __pci_reset_function - reset a PCI device function
3284 * @dev: PCI device to reset
3286 * Some devices allow an individual function to be reset without affecting
3287 * other functions in the same device. The PCI device must be responsive
3288 * to PCI config space in order to use this function.
3290 * The device function is presumed to be unused when this function is called.
3291 * Resetting the device will make the contents of PCI configuration space
3292 * random, so any caller of this must be prepared to reinitialise the
3293 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3296 * Returns 0 if the device function was successfully reset or negative if the
3297 * device doesn't support resetting a single function.
3299 int __pci_reset_function(struct pci_dev *dev)
3301 return pci_dev_reset(dev, 0);
3303 EXPORT_SYMBOL_GPL(__pci_reset_function);
3306 * __pci_reset_function_locked - reset a PCI device function while holding
3307 * the @dev mutex lock.
3308 * @dev: PCI device to reset
3310 * Some devices allow an individual function to be reset without affecting
3311 * other functions in the same device. The PCI device must be responsive
3312 * to PCI config space in order to use this function.
3314 * The device function is presumed to be unused and the caller is holding
3315 * the device mutex lock when this function is called.
3316 * Resetting the device will make the contents of PCI configuration space
3317 * random, so any caller of this must be prepared to reinitialise the
3318 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3321 * Returns 0 if the device function was successfully reset or negative if the
3322 * device doesn't support resetting a single function.
3324 int __pci_reset_function_locked(struct pci_dev *dev)
3326 return __pci_dev_reset(dev, 0);
3328 EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3331 * pci_probe_reset_function - check whether the device can be safely reset
3332 * @dev: PCI device to reset
3334 * Some devices allow an individual function to be reset without affecting
3335 * other functions in the same device. The PCI device must be responsive
3336 * to PCI config space in order to use this function.
3338 * Returns 0 if the device function can be reset or negative if the
3339 * device doesn't support resetting a single function.
3341 int pci_probe_reset_function(struct pci_dev *dev)
3343 return pci_dev_reset(dev, 1);
3347 * pci_reset_function - quiesce and reset a PCI device function
3348 * @dev: PCI device to reset
3350 * Some devices allow an individual function to be reset without affecting
3351 * other functions in the same device. The PCI device must be responsive
3352 * to PCI config space in order to use this function.
3354 * This function does not just reset the PCI portion of a device, but
3355 * clears all the state associated with the device. This function differs
3356 * from __pci_reset_function in that it saves and restores device state
3359 * Returns 0 if the device function was successfully reset or negative if the
3360 * device doesn't support resetting a single function.
3362 int pci_reset_function(struct pci_dev *dev)
3366 rc = pci_dev_reset(dev, 1);
3370 pci_save_state(dev);
3373 * both INTx and MSI are disabled after the Interrupt Disable bit
3374 * is set and the Bus Master bit is cleared.
3376 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3378 rc = pci_dev_reset(dev, 0);
3380 pci_restore_state(dev);
3384 EXPORT_SYMBOL_GPL(pci_reset_function);
3387 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3388 * @dev: PCI device to query
3390 * Returns mmrbc: maximum designed memory read count in bytes
3391 * or appropriate error value.
3393 int pcix_get_max_mmrbc(struct pci_dev *dev)
3398 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3402 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3405 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3407 EXPORT_SYMBOL(pcix_get_max_mmrbc);
3410 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3411 * @dev: PCI device to query
3413 * Returns mmrbc: maximum memory read count in bytes
3414 * or appropriate error value.
3416 int pcix_get_mmrbc(struct pci_dev *dev)
3421 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3425 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3428 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3430 EXPORT_SYMBOL(pcix_get_mmrbc);
3433 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3434 * @dev: PCI device to query
3435 * @mmrbc: maximum memory read count in bytes
3436 * valid values are 512, 1024, 2048, 4096
3438 * If possible sets maximum memory read byte count, some bridges have erratas
3439 * that prevent this.
3441 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3447 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3450 v = ffs(mmrbc) - 10;
3452 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3456 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3459 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3462 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3465 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3467 if (v > o && dev->bus &&
3468 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3471 cmd &= ~PCI_X_CMD_MAX_READ;
3473 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3478 EXPORT_SYMBOL(pcix_set_mmrbc);
3481 * pcie_get_readrq - get PCI Express read request size
3482 * @dev: PCI device to query
3484 * Returns maximum memory read request in bytes
3485 * or appropriate error value.
3487 int pcie_get_readrq(struct pci_dev *dev)
3492 cap = pci_pcie_cap(dev);
3496 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3498 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3502 EXPORT_SYMBOL(pcie_get_readrq);
3505 * pcie_set_readrq - set PCI Express maximum memory read request
3506 * @dev: PCI device to query
3507 * @rq: maximum memory read count in bytes
3508 * valid values are 128, 256, 512, 1024, 2048, 4096
3510 * If possible sets maximum memory read request in bytes
3512 int pcie_set_readrq(struct pci_dev *dev, int rq)
3514 int cap, err = -EINVAL;
3517 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3520 cap = pci_pcie_cap(dev);
3524 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3528 * If using the "performance" PCIe config, we clamp the
3529 * read rq size to the max packet size to prevent the
3530 * host bridge generating requests larger than we can
3533 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3534 int mps = pcie_get_mps(dev);
3542 v = (ffs(rq) - 8) << 12;
3544 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3545 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3547 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3553 EXPORT_SYMBOL(pcie_set_readrq);
3556 * pcie_get_mps - get PCI Express maximum payload size
3557 * @dev: PCI device to query
3559 * Returns maximum payload size in bytes
3560 * or appropriate error value.
3562 int pcie_get_mps(struct pci_dev *dev)
3567 cap = pci_pcie_cap(dev);
3571 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3573 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3579 * pcie_set_mps - set PCI Express maximum payload size
3580 * @dev: PCI device to query
3581 * @mps: maximum payload size in bytes
3582 * valid values are 128, 256, 512, 1024, 2048, 4096
3584 * If possible sets maximum payload size
3586 int pcie_set_mps(struct pci_dev *dev, int mps)
3588 int cap, err = -EINVAL;
3591 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3595 if (v > dev->pcie_mpss)
3599 cap = pci_pcie_cap(dev);
3603 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3607 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3608 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3610 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3617 * pci_select_bars - Make BAR mask from the type of resource
3618 * @dev: the PCI device for which BAR mask is made
3619 * @flags: resource type mask to be selected
3621 * This helper routine makes bar mask from the type of resource.
3623 int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3626 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3627 if (pci_resource_flags(dev, i) & flags)
3633 * pci_resource_bar - get position of the BAR associated with a resource
3634 * @dev: the PCI device
3635 * @resno: the resource number
3636 * @type: the BAR type to be filled in
3638 * Returns BAR position in config space, or 0 if the BAR is invalid.
3640 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3644 if (resno < PCI_ROM_RESOURCE) {
3645 *type = pci_bar_unknown;
3646 return PCI_BASE_ADDRESS_0 + 4 * resno;
3647 } else if (resno == PCI_ROM_RESOURCE) {
3648 *type = pci_bar_mem32;
3649 return dev->rom_base_reg;
3650 } else if (resno < PCI_BRIDGE_RESOURCES) {
3651 /* device specific resource */
3652 reg = pci_iov_resource_bar(dev, resno, type);
3657 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3661 /* Some architectures require additional programming to enable VGA */
3662 static arch_set_vga_state_t arch_set_vga_state;
3664 void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3666 arch_set_vga_state = func; /* NULL disables */
3669 static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3670 unsigned int command_bits, u32 flags)
3672 if (arch_set_vga_state)
3673 return arch_set_vga_state(dev, decode, command_bits,
3679 * pci_set_vga_state - set VGA decode state on device and parents if requested
3680 * @dev: the PCI device
3681 * @decode: true = enable decoding, false = disable decoding
3682 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3683 * @flags: traverse ancestors and change bridges
3684 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3686 int pci_set_vga_state(struct pci_dev *dev, bool decode,
3687 unsigned int command_bits, u32 flags)
3689 struct pci_bus *bus;
3690 struct pci_dev *bridge;
3694 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3696 /* ARCH specific VGA enables */
3697 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3701 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3702 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3704 cmd |= command_bits;
3706 cmd &= ~command_bits;
3707 pci_write_config_word(dev, PCI_COMMAND, cmd);
3710 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3717 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3720 cmd |= PCI_BRIDGE_CTL_VGA;
3722 cmd &= ~PCI_BRIDGE_CTL_VGA;
3723 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3731 #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3732 static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
3733 static DEFINE_SPINLOCK(resource_alignment_lock);
3736 * pci_specified_resource_alignment - get resource alignment specified by user.
3737 * @dev: the PCI device to get
3739 * RETURNS: Resource alignment if it is specified.
3740 * Zero if it is not specified.
3742 resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3744 int seg, bus, slot, func, align_order, count;
3745 resource_size_t align = 0;
3748 spin_lock(&resource_alignment_lock);
3749 p = resource_alignment_param;
3752 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3758 if (sscanf(p, "%x:%x:%x.%x%n",
3759 &seg, &bus, &slot, &func, &count) != 4) {
3761 if (sscanf(p, "%x:%x.%x%n",
3762 &bus, &slot, &func, &count) != 3) {
3763 /* Invalid format */
3764 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3770 if (seg == pci_domain_nr(dev->bus) &&
3771 bus == dev->bus->number &&
3772 slot == PCI_SLOT(dev->devfn) &&
3773 func == PCI_FUNC(dev->devfn)) {
3774 if (align_order == -1) {
3777 align = 1 << align_order;
3782 if (*p != ';' && *p != ',') {
3783 /* End of param or invalid format */
3788 spin_unlock(&resource_alignment_lock);
3793 * pci_is_reassigndev - check if specified PCI is target device to reassign
3794 * @dev: the PCI device to check
3796 * RETURNS: non-zero for PCI device is a target device to reassign,
3799 int pci_is_reassigndev(struct pci_dev *dev)
3801 return (pci_specified_resource_alignment(dev) != 0);
3805 * This function disables memory decoding and releases memory resources
3806 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3807 * It also rounds up size to specified alignment.
3808 * Later on, the kernel will assign page-aligned memory resource back
3811 void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3815 resource_size_t align, size;
3818 if (!pci_is_reassigndev(dev))
3821 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3822 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3824 "Can't reassign resources to host bridge.\n");
3829 "Disabling memory decoding and releasing memory resources.\n");
3830 pci_read_config_word(dev, PCI_COMMAND, &command);
3831 command &= ~PCI_COMMAND_MEMORY;
3832 pci_write_config_word(dev, PCI_COMMAND, command);
3834 align = pci_specified_resource_alignment(dev);
3835 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3836 r = &dev->resource[i];
3837 if (!(r->flags & IORESOURCE_MEM))
3839 size = resource_size(r);
3843 "Rounding up size of resource #%d to %#llx.\n",
3844 i, (unsigned long long)size);
3849 /* Need to disable bridge's resource window,
3850 * to enable the kernel to reassign new resource
3853 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3854 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3855 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3856 r = &dev->resource[i];
3857 if (!(r->flags & IORESOURCE_MEM))
3859 r->end = resource_size(r) - 1;
3862 pci_disable_bridge_window(dev);
3866 ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3868 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3869 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3870 spin_lock(&resource_alignment_lock);
3871 strncpy(resource_alignment_param, buf, count);
3872 resource_alignment_param[count] = '\0';
3873 spin_unlock(&resource_alignment_lock);
3877 ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3880 spin_lock(&resource_alignment_lock);
3881 count = snprintf(buf, size, "%s", resource_alignment_param);
3882 spin_unlock(&resource_alignment_lock);
3886 static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3888 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3891 static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3892 const char *buf, size_t count)
3894 return pci_set_resource_alignment_param(buf, count);
3897 BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3898 pci_resource_alignment_store);
3900 static int __init pci_resource_alignment_sysfs_init(void)
3902 return bus_create_file(&pci_bus_type,
3903 &bus_attr_resource_alignment);
3906 late_initcall(pci_resource_alignment_sysfs_init);
3908 static void __devinit pci_no_domains(void)
3910 #ifdef CONFIG_PCI_DOMAINS
3911 pci_domains_supported = 0;
3916 * pci_ext_cfg_enabled - can we access extended PCI config space?
3917 * @dev: The PCI device of the root bridge.
3919 * Returns 1 if we can access PCI extended config space (offsets
3920 * greater than 0xff). This is the default implementation. Architecture
3921 * implementations can override this.
3923 int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3928 void __weak pci_fixup_cardbus(struct pci_bus *bus)
3931 EXPORT_SYMBOL(pci_fixup_cardbus);
3933 static int __init pci_setup(char *str)
3936 char *k = strchr(str, ',');
3939 if (*str && (str = pcibios_setup(str)) && *str) {
3940 if (!strcmp(str, "nomsi")) {
3942 } else if (!strcmp(str, "noaer")) {
3944 } else if (!strncmp(str, "realloc=", 8)) {
3945 pci_realloc_get_opt(str + 8);
3946 } else if (!strncmp(str, "realloc", 7)) {
3947 pci_realloc_get_opt("on");
3948 } else if (!strcmp(str, "nodomains")) {
3950 } else if (!strncmp(str, "noari", 5)) {
3951 pcie_ari_disabled = true;
3952 } else if (!strncmp(str, "cbiosize=", 9)) {
3953 pci_cardbus_io_size = memparse(str + 9, &str);
3954 } else if (!strncmp(str, "cbmemsize=", 10)) {
3955 pci_cardbus_mem_size = memparse(str + 10, &str);
3956 } else if (!strncmp(str, "resource_alignment=", 19)) {
3957 pci_set_resource_alignment_param(str + 19,
3959 } else if (!strncmp(str, "ecrc=", 5)) {
3960 pcie_ecrc_get_policy(str + 5);
3961 } else if (!strncmp(str, "hpiosize=", 9)) {
3962 pci_hotplug_io_size = memparse(str + 9, &str);
3963 } else if (!strncmp(str, "hpmemsize=", 10)) {
3964 pci_hotplug_mem_size = memparse(str + 10, &str);
3965 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3966 pcie_bus_config = PCIE_BUS_TUNE_OFF;
3967 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3968 pcie_bus_config = PCIE_BUS_SAFE;
3969 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3970 pcie_bus_config = PCIE_BUS_PERFORMANCE;
3971 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3972 pcie_bus_config = PCIE_BUS_PEER2PEER;
3973 } else if (!strncmp(str, "pcie_scan_all", 13)) {
3974 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
3976 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3984 early_param("pci", pci_setup);
3986 EXPORT_SYMBOL(pci_reenable_device);
3987 EXPORT_SYMBOL(pci_enable_device_io);
3988 EXPORT_SYMBOL(pci_enable_device_mem);
3989 EXPORT_SYMBOL(pci_enable_device);
3990 EXPORT_SYMBOL(pcim_enable_device);
3991 EXPORT_SYMBOL(pcim_pin_device);
3992 EXPORT_SYMBOL(pci_disable_device);
3993 EXPORT_SYMBOL(pci_find_capability);
3994 EXPORT_SYMBOL(pci_bus_find_capability);
3995 EXPORT_SYMBOL(pci_release_regions);
3996 EXPORT_SYMBOL(pci_request_regions);
3997 EXPORT_SYMBOL(pci_request_regions_exclusive);
3998 EXPORT_SYMBOL(pci_release_region);
3999 EXPORT_SYMBOL(pci_request_region);
4000 EXPORT_SYMBOL(pci_request_region_exclusive);
4001 EXPORT_SYMBOL(pci_release_selected_regions);
4002 EXPORT_SYMBOL(pci_request_selected_regions);
4003 EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4004 EXPORT_SYMBOL(pci_set_master);
4005 EXPORT_SYMBOL(pci_clear_master);
4006 EXPORT_SYMBOL(pci_set_mwi);
4007 EXPORT_SYMBOL(pci_try_set_mwi);
4008 EXPORT_SYMBOL(pci_clear_mwi);
4009 EXPORT_SYMBOL_GPL(pci_intx);
4010 EXPORT_SYMBOL(pci_assign_resource);
4011 EXPORT_SYMBOL(pci_find_parent_resource);
4012 EXPORT_SYMBOL(pci_select_bars);
4014 EXPORT_SYMBOL(pci_set_power_state);
4015 EXPORT_SYMBOL(pci_save_state);
4016 EXPORT_SYMBOL(pci_restore_state);
4017 EXPORT_SYMBOL(pci_pme_capable);
4018 EXPORT_SYMBOL(pci_pme_active);
4019 EXPORT_SYMBOL(pci_wake_from_d3);
4020 EXPORT_SYMBOL(pci_target_state);
4021 EXPORT_SYMBOL(pci_prepare_to_sleep);
4022 EXPORT_SYMBOL(pci_back_from_sleep);
4023 EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);