+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ALPHA_DMA_MAPPING_H
#define _ALPHA_DMA_MAPPING_H
return dma_ops;
}
- #define dma_cache_sync(dev, va, size, dir) ((void)0)
-
#endif /* _ALPHA_DMA_MAPPING_H */
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_CRIS_DMA_MAPPING_H
#define _ASM_CRIS_DMA_MAPPING_H
}
#endif
- static inline void
- dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
- {
- }
-
#endif
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
return &frv_dma_ops;
}
- static inline
- void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
- {
- flush_write_buffers();
- }
-
#endif /* _ASM_DMA_MAPPING_H */
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_DMA_MAPPING_H
#define _ASM_IA64_DMA_MAPPING_H
return daddr;
}
- static inline void
- dma_cache_sync (struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
- {
- /*
- * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
- * ensure that dma_cache_sync() enforces order, hence the mb().
- */
- mb();
- }
-
#endif /* _ASM_IA64_DMA_MAPPING_H */
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_M32R_DMA_MAPPING_H
#define _ASM_M32R_DMA_MAPPING_H
return &dma_noop_ops;
}
- static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
- {
- }
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _M68K_DMA_MAPPING_H
#define _M68K_DMA_MAPPING_H
return &m68k_dma_ops;
}
- static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
- {
- /* we use coherent allocation, so not much to do here. */
- }
-
#endif /* _M68K_DMA_MAPPING_H */
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_METAG_DMA_MAPPING_H
#define _ASM_METAG_DMA_MAPPING_H
return &metag_dma_ops;
}
- /*
- * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to
- * do any flushing here.
- */
- static inline void
- dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
- {
- }
-
#endif
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2009-2010 PetaLogix
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
#include <linux/dma-debug.h>
#include <linux/export.h>
#include <linux/bug.h>
+ #include <asm/cacheflush.h>
#define NOT_COHERENT_CACHE
#endif
}
+ static inline void __dma_sync(unsigned long paddr,
+ size_t size, enum dma_data_direction direction)
+ {
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ flush_dcache_range(paddr, paddr + size);
+ break;
+ case DMA_FROM_DEVICE:
+ invalidate_dcache_range(paddr, paddr + size);
+ break;
+ default:
+ BUG();
+ }
+ }
+
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_DMA_MAPPING_H
#define _ASM_DMA_MAPPING_H
static inline void dma_mark_clean(void *addr, size_t size) {}
- extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction);
-
#define arch_setup_dma_ops arch_setup_dma_ops
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
u64 size, const struct iommu_ops *iommu,
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PARISC_DMA_MAPPING_H
#define _PARISC_DMA_MAPPING_H
return hppa_dma_ops;
}
- static inline void
- dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
- {
- if (hppa_dma_ops->sync_single_for_cpu)
- flush_kernel_dcache_range((unsigned long)vaddr, size);
- }
-
static inline void *
parisc_walk_tree(struct device *dev)
{
+// SPDX-License-Identifier: GPL-2.0
/*
** PARISC 1.1 Dynamic DMA mapping support.
** This implementation is for PA-RISC platforms that do not support
flush_kernel_vmap_range(sg_virt(sg), sg->length);
}
+ static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+ {
+ flush_kernel_dcache_range((unsigned long)vaddr, size);
+ }
+
const struct dma_map_ops pcxl_dma_ops = {
.dma_supported = pa11_dma_supported,
.alloc = pa11_dma_alloc,
.sync_single_for_device = pa11_dma_sync_single_for_device,
.sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.sync_sg_for_device = pa11_dma_sync_sg_for_device,
+ .cache_sync = pa11_dma_cache_sync,
};
static void *pcx_dma_alloc(struct device *dev, size_t size,
.sync_single_for_device = pa11_dma_sync_single_for_device,
.sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.sync_sg_for_device = pa11_dma_sync_sg_for_device,
+ .cache_sync = pa11_dma_cache_sync,
};
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2004 IBM
*
#define ARCH_HAS_DMA_MMAP_COHERENT
- static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
- {
- BUG_ON(direction == DMA_NONE);
- __dma_sync(vaddr, size, (int)direction);
- }
-
#endif /* __KERNEL__ */
#endif /* _ASM_DMA_MAPPING_H */
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_DMA_MAPPING_H
#define _ASM_S390_DMA_MAPPING_H
return &dma_noop_ops;
}
- static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
- {
- }
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_DMA_MAPPING_H
#define __ASM_SH_DMA_MAPPING_H
return dma_ops;
}
- void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir);
-
- /* arch/sh/mm/consistent.c */
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
unsigned long attrs);
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs);
+ void sh_sync_dma_for_device(void *vaddr, size_t size,
+ enum dma_data_direction dir);
+
#endif /* __ASM_SH_DMA_MAPPING_H */
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ___ASM_SPARC_DMA_MAPPING_H
#define ___ASM_SPARC_DMA_MAPPING_H
#include <linux/mm.h>
#include <linux/dma-debug.h>
- static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
- {
- /* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this
- * routine can be a nop.
- */
- }
-
extern const struct dma_map_ops *dma_ops;
extern const struct dma_map_ops pci32_dma_ops;
+/* SPDX-License-Identifier: GPL-2.0 */
/* asm/floppy.h: Sparc specific parts of the Floppy driver.
*
* Copyright (C) 1995 David S. Miller (davem@davemloft.net)
#define fd_set_dma_count(count) sun_fd_set_dma_count(count)
#define fd_enable_irq() /* nothing... */
#define fd_disable_irq() /* nothing... */
- #define fd_cacheflush(addr, size) /* nothing... */
#define fd_request_irq() sun_fd_request_irq()
#define fd_free_irq() /* nothing... */
#if 0 /* P3: added by Alain, these cause a MMU corruption. 19960524 XXX */
+/* SPDX-License-Identifier: GPL-2.0 */
/* floppy.h: Sparc specific parts of the Floppy driver.
*
* Copyright (C) 1996, 2007, 2008 David S. Miller (davem@davemloft.net)
#define fd_set_dma_addr(addr) sun_fdops.fd_set_dma_addr(addr)
#define fd_set_dma_count(count) sun_fdops.fd_set_dma_count(count)
#define get_dma_residue(x) sun_fdops.get_dma_residue()
- #define fd_cacheflush(addr, size) /* nothing... */
#define fd_request_irq() sun_fdops.fd_request_irq()
#define fd_free_irq() sun_fdops.fd_free_irq()
#define fd_eject(drive) sun_fdops.fd_eject(drive)
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_DMA_MAPPING_H
#define _ASM_X86_DMA_MAPPING_H
}
#endif /* CONFIG_X86_DMA_REMAP */
- static inline void
- dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction dir)
- {
- flush_write_buffers();
- }
-
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
gfp_t gfp)
{
{
struct device_driver *drv = dev->driver;
- /*
- * Devices having power.ignore_children set may still be necessary for
- * suspending their children in the next phase of device suspend.
- */
- if (dev->power.ignore_children)
- pm_runtime_resume(dev);
-
if (drv && drv->pm && drv->pm->prepare) {
int error = drv->pm->prepare(dev);
- if (error)
+ if (error < 0)
return error;
+
+ if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
+ return 0;
}
return pci_dev_keep_suspended(to_pci_dev(dev));
}
if (!pm) {
pci_pm_default_suspend(pci_dev);
- goto Fixup;
+ return 0;
}
/*
- * PCI devices suspended at run time need to be resumed at this point,
- * because in general it is necessary to reconfigure them for system
- * suspend. Namely, if the device is supposed to wake up the system
- * from the sleep state, we may need to reconfigure it for this purpose.
- * In turn, if the device is not supposed to wake up the system from the
- * sleep state, we'll have to prevent it from signaling wake-up.
+ * PCI devices suspended at run time may need to be resumed at this
+ * point, because in general it may be necessary to reconfigure them for
+ * system suspend. Namely, if the device is expected to wake up the
+ * system from the sleep state, it may have to be reconfigured for this
+ * purpose, or if the device is not expected to wake up the system from
+ * the sleep state, it should be prevented from signaling wakeup events
+ * going forward.
+ *
+ * Also if the driver of the device does not indicate that its system
+ * suspend callbacks can cope with runtime-suspended devices, it is
+ * better to resume the device from runtime suspend here.
*/
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ !pci_dev_keep_suspended(pci_dev))
+ pm_runtime_resume(dev);
pci_dev->state_saved = false;
if (pm->suspend) {
}
}
- Fixup:
- pci_fixup_device(pci_fixup_suspend, pci_dev);
-
return 0;
}
+static int pci_pm_suspend_late(struct device *dev)
+{
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
+
+ return pm_generic_suspend_late(dev);
+}
+
static int pci_pm_suspend_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
pci_prepare_to_sleep(pci_dev);
}
+ dev_dbg(dev, "PCI PM: Suspend power state: %s\n",
+ pci_power_name(pci_dev->current_state));
+
pci_pm_set_unknown_state(pci_dev);
/*
struct device_driver *drv = dev->driver;
int error = 0;
+ /*
+ * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend
+ * during system suspend, so update their runtime PM status to "active"
+ * as they are going to be put into D0 shortly.
+ */
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ pm_runtime_set_active(dev);
+
pci_pm_default_resume_early(pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
#else /* !CONFIG_SUSPEND */
#define pci_pm_suspend NULL
+#define pci_pm_suspend_late NULL
#define pci_pm_suspend_noirq NULL
#define pci_pm_resume NULL
#define pci_pm_resume_noirq NULL
* devices should not be touched during freeze/thaw transitions,
* however.
*/
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+ pm_runtime_resume(dev);
pci_dev->state_saved = false;
if (pm->freeze) {
return error;
}
- if (pcibios_pm_ops.freeze)
- return pcibios_pm_ops.freeze(dev);
-
return 0;
}
+static int pci_pm_freeze_late(struct device *dev)
+{
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+ return pm_generic_freeze_late(dev);;
+}
+
static int pci_pm_freeze_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_suspend_late(dev, PMSG_FREEZE);
struct device_driver *drv = dev->driver;
int error = 0;
+ /*
+ * If the device is in runtime suspend, the code below may not work
+ * correctly with it, so skip that code and make the PM core skip all of
+ * the subsequent "thaw" callbacks for the device.
+ */
+ if (dev_pm_smart_suspend_and_suspended(dev)) {
+ dev->power.direct_complete = true;
+ return 0;
+ }
+
if (pcibios_pm_ops.thaw_noirq) {
error = pcibios_pm_ops.thaw_noirq(dev);
if (error)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int error = 0;
- if (pcibios_pm_ops.thaw) {
- error = pcibios_pm_ops.thaw(dev);
- if (error)
- return error;
- }
-
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume(dev);
if (!pm) {
pci_pm_default_suspend(pci_dev);
- goto Fixup;
+ return 0;
}
/* The reason to do that is the same as in pci_pm_suspend(). */
- pm_runtime_resume(dev);
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+ !pci_dev_keep_suspended(pci_dev))
+ pm_runtime_resume(dev);
pci_dev->state_saved = false;
if (pm->poweroff) {
return error;
}
- Fixup:
- pci_fixup_device(pci_fixup_suspend, pci_dev);
+ return 0;
+}
- if (pcibios_pm_ops.poweroff)
- return pcibios_pm_ops.poweroff(dev);
+static int pci_pm_poweroff_late(struct device *dev)
+{
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
- return 0;
+ pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
+
+ return pm_generic_poweroff_late(dev);
}
static int pci_pm_poweroff_noirq(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
if (pci_has_legacy_pm_support(to_pci_dev(dev)))
return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
struct device_driver *drv = dev->driver;
int error = 0;
+ /* This is analogous to the pci_pm_resume_noirq() case. */
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ pm_runtime_set_active(dev);
+
if (pcibios_pm_ops.restore_noirq) {
error = pcibios_pm_ops.restore_noirq(dev);
if (error)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int error = 0;
- if (pcibios_pm_ops.restore) {
- error = pcibios_pm_ops.restore(dev);
- if (error)
- return error;
- }
-
/*
* This is necessary for the hibernation error path in which restore is
* called without restoring the standard config registers of the device.
#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define pci_pm_freeze NULL
+#define pci_pm_freeze_late NULL
#define pci_pm_freeze_noirq NULL
#define pci_pm_thaw NULL
#define pci_pm_thaw_noirq NULL
#define pci_pm_poweroff NULL
+#define pci_pm_poweroff_late NULL
#define pci_pm_poweroff_noirq NULL
#define pci_pm_restore NULL
#define pci_pm_restore_noirq NULL
.prepare = pci_pm_prepare,
.complete = pci_pm_complete,
.suspend = pci_pm_suspend,
+ .suspend_late = pci_pm_suspend_late,
.resume = pci_pm_resume,
.freeze = pci_pm_freeze,
+ .freeze_late = pci_pm_freeze_late,
.thaw = pci_pm_thaw,
.poweroff = pci_pm_poweroff,
+ .poweroff_late = pci_pm_poweroff_late,
.restore = pci_pm_restore,
.suspend_noirq = pci_pm_suspend_noirq,
.resume_noirq = pci_pm_resume_noirq,
.drv_groups = pci_drv_groups,
.pm = PCI_PM_OPS_PTR,
.num_vf = pci_bus_num_vf,
+ .force_dma = true,
};
EXPORT_SYMBOL(pci_bus_type);
* @p: The private data of the driver core, only the driver core can
* touch this.
* @lock_key: Lock class key for use by the lock validator
+ * @force_dma: Assume devices on this bus should be set up by dma_configure()
+ * even if DMA capability is not explicitly described by firmware.
*
* A bus is a channel between the processor and one or more devices. For the
* purposes of the device model, all devices are connected via a bus, even if
struct subsys_private *p;
struct lock_class_key lock_key;
+
+ bool force_dma;
};
extern int __must_check bus_register(struct bus_type *bus);
* @devnode: Callback to provide the devtmpfs.
* @class_release: Called to release this class.
* @dev_release: Called to release the device.
- * @suspend: Used to put the device to sleep mode, usually to a low power
- * state.
- * @resume: Used to bring the device from the sleep mode.
* @shutdown_pre: Called at shut-down time before driver shutdown.
* @ns_type: Callbacks so sysfs can detemine namespaces.
* @namespace: Namespace of the device belongs to this class.
void (*class_release)(struct class *class);
void (*dev_release)(struct device *dev);
- int (*suspend)(struct device *dev, pm_message_t state);
- int (*resume)(struct device *dev);
int (*shutdown_pre)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
#endif
}
+static inline void dev_pm_set_driver_flags(struct device *dev, u32 flags)
+{
+ dev->power.driver_flags = flags;
+}
+
+static inline bool dev_pm_test_driver_flags(struct device *dev, u32 flags)
+{
+ return !!(dev->power.driver_flags & flags);
+}
+
static inline void device_lock(struct device *dev)
{
mutex_lock(&dev->mutex);
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DMA_MAPPING_H
#define _LINUX_DMA_MAPPING_H
void (*sync_sg_for_device)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir);
+ void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
int (*dma_supported)(struct device *dev, u64 mask);
#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
+ static inline void
+ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
+ {
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->cache_sync)
+ ops->cache_sync(dev, vaddr, size, dir);
+ }
+
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);