#define ASMARM_DEVICE_H
struct dev_archdata {
- const struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce;
#endif
static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
- if (dev && dev->archdata.dma_ops)
- return dev->archdata.dma_ops;
+ if (dev && dev->dma_ops)
+ return dev->dma_ops;
return &arm_dma_ops;
}
static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
{
BUG_ON(!dev);
- dev->archdata.dma_ops = ops;
+ dev->dma_ops = ops;
}
#define HAVE_ARCH_DMA_SUPPORTED 1
#define __ASM_DEVICE_H
struct dev_archdata {
- const struct dma_map_ops *dma_ops;
#ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */
#endif
static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
- if (dev && dev->archdata.dma_ops)
- return dev->archdata.dma_ops;
+ if (dev && dev->dma_ops)
+ return dev->dma_ops;
/*
* We expect no ISA devices, and all other DMA masters are expected to
return false;
}
- dev->archdata.dma_ops = &iommu_dma_ops;
+ dev->dma_ops = &iommu_dma_ops;
return true;
}
void arch_teardown_dma_ops(struct device *dev)
{
- dev->archdata.dma_ops = NULL;
+ dev->dma_ops = NULL;
}
#else
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
- if (!dev->archdata.dma_ops)
- dev->archdata.dma_ops = &swiotlb_dma_ops;
+ if (!dev->dma_ops)
+ dev->dma_ops = &swiotlb_dma_ops;
dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
* This file is released under the GPLv2
*/
struct dev_archdata {
- const struct dma_map_ops *dma_ops;
};
struct pdev_archdata {
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
- if (dev && dev->archdata.dma_ops)
- return dev->archdata.dma_ops;
+ if (dev && dev->dma_ops)
+ return dev->dma_ops;
return &dma_noop_ops;
}
#ifndef _ASM_MIPS_DEVICE_H
#define _ASM_MIPS_DEVICE_H
-struct dma_map_ops;
-
struct dev_archdata {
- /* DMA operations on that device */
- const struct dma_map_ops *dma_ops;
-
#ifdef CONFIG_DMA_PERDEV_COHERENT
/* Non-zero if DMA is coherent with CPU caches */
bool dma_coherent;
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
- if (dev && dev->archdata.dma_ops)
- return dev->archdata.dma_ops;
+ if (dev && dev->dma_ops)
+ return dev->dma_ops;
else
return mips_dma_map_ops;
}
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
}
- dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops;
+ dev->dev.dma_ops = octeon_pci_dma_map_ops;
return 0;
}
#ifndef _ASM_POWERPC_DEVICE_H
#define _ASM_POWERPC_DEVICE_H
-struct dma_map_ops;
struct device_node;
#ifdef CONFIG_PPC64
struct pci_dn;
* drivers/macintosh/macio_asic.c
*/
struct dev_archdata {
- /* DMA operations on that device */
- const struct dma_map_ops *dma_ops;
-
/*
* These two used to be a union. However, with the hybrid ops we need
* both so here we store both a DMA offset for direct mappings and
if (unlikely(dev == NULL))
return NULL;
- return dev->archdata.dma_ops;
+ return dev->dma_ops;
}
static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
{
- dev->archdata.dma_ops = ops;
+ dev->dma_ops = ops;
}
/*
struct dev_archdata __maybe_unused *sd = &dev->archdata;
#ifdef CONFIG_SWIOTLB
- if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops)
+ if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops)
pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
#endif
return 0;
/* We use the PCI DMA ops */
- dev->archdata.dma_ops = get_pci_dma_ops();
+ dev->dma_ops = get_pci_dma_ops();
cell_dma_dev_setup(dev);
*/
if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
!firmware_has_feature(FW_FEATURE_LPAR)) {
- dev->dev.archdata.dma_ops = &dma_direct_ops;
+ dev->dev.dma_ops = &dma_direct_ops;
/*
* Set the coherent DMA mask to prevent the iommu
* being used unnecessarily
return 0;
/* We use the direct ops for localbus */
- dev->archdata.dma_ops = &dma_direct_ops;
+ dev->dma_ops = &dma_direct_ops;
return 0;
}
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_IOC0:
- dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops;
+ dev->core.dma_ops = &ps3_ioc0_dma_ops;
dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count);
break;
case PS3_DEVICE_TYPE_SB:
- dev->core.archdata.dma_ops = &ps3_sb_dma_ops;
+ dev->core.dma_ops = &ps3_sb_dma_ops;
dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count);
break;
return -ENOMEM;
dev->dev.bus = &ibmebus_bus_type;
- dev->dev.archdata.dma_ops = &ibmebus_dma_ops;
+ dev->dev.dma_ops = &ibmebus_dma_ops;
ret = of_device_add(dev);
if (ret)
* This file is released under the GPLv2
*/
struct dev_archdata {
- const struct dma_map_ops *dma_ops;
};
struct pdev_archdata {
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
- if (dev && dev->archdata.dma_ops)
- return dev->archdata.dma_ops;
+ if (dev && dev->dma_ops)
+ return dev->dma_ops;
return &dma_noop_ops;
}
int i;
pdev->dev.groups = zpci_attr_groups;
- pdev->dev.archdata.dma_ops = &s390_pci_dma_ops;
+ pdev->dev.dma_ops = &s390_pci_dma_ops;
zpci_map_resources(pdev);
for (i = 0; i < PCI_BAR_COUNT; i++) {
#define _ASM_TILE_DEVICE_H
struct dev_archdata {
- /* DMA operations on that device */
- const struct dma_map_ops *dma_ops;
-
/* Offset of the DMA address from the PA. */
dma_addr_t dma_offset;
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
- if (dev && dev->archdata.dma_ops)
- return dev->archdata.dma_ops;
+ if (dev && dev->dma_ops)
+ return dev->dma_ops;
else
return tile_dma_map_ops;
}
static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
{
- dev->archdata.dma_ops = ops;
+ dev->dma_ops = ops;
}
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
#define _ASM_X86_DEVICE_H
struct dev_archdata {
-#ifdef CONFIG_X86_DEV_DMA_OPS
- const struct dma_map_ops *dma_ops;
-#endif
#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
void *iommu; /* hook for IOMMU specific extension */
#endif
#ifndef CONFIG_X86_DEV_DMA_OPS
return dma_ops;
#else
- if (unlikely(!dev) || !dev->archdata.dma_ops)
+ if (unlikely(!dev) || !dev->dma_ops)
return dma_ops;
else
- return dev->archdata.dma_ops;
+ return dev->dma_ops;
#endif
}
tbl = find_iommu_table(&dev->dev);
if (translation_enabled(tbl))
- dev->dev.archdata.dma_ops = &calgary_dma_ops;
+ dev->dev.dma_ops = &calgary_dma_ops;
}
return ret;
calgary_disable_translation(dev);
calgary_free_bus(dev);
pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */
- dev->dev.archdata.dma_ops = NULL;
+ dev->dev.dma_ops = NULL;
} while (1);
return ret;
spin_lock(&dma_domain_list_lock);
list_for_each_entry(domain, &dma_domain_list, node) {
if (pci_domain_nr(pdev->bus) == domain->domain_nr) {
- pdev->dev.archdata.dma_ops = domain->dma_ops;
+ pdev->dev.dma_ops = domain->dma_ops;
break;
}
}
return;
pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
- pdev->dev.archdata.dma_ops = &sta2x11_dma_ops;
+ pdev->dev.dma_ops = &sta2x11_dma_ops;
/* We must enable all devices as master, for audio DMA to work */
pci_set_master(pdev);
{
struct sta2x11_mapping *map;
- if (dev->archdata.dma_ops != &sta2x11_dma_ops) {
+ if (dev->dma_ops != &sta2x11_dma_ops) {
if (!dev->dma_mask)
return false;
return addr + size - 1 <= *dev->dma_mask;
*/
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
- if (dev->archdata.dma_ops != &sta2x11_dma_ops)
+ if (dev->dma_ops != &sta2x11_dma_ops)
return paddr;
return p2a(paddr, to_pci_dev(dev));
}
*/
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
- if (dev->archdata.dma_ops != &sta2x11_dma_ops)
+ if (dev->dma_ops != &sta2x11_dma_ops)
return daddr;
return a2p(daddr, to_pci_dev(dev));
}
#ifndef _ASM_XTENSA_DEVICE_H
#define _ASM_XTENSA_DEVICE_H
-struct dma_map_ops;
-
struct dev_archdata {
- /* DMA operations on that device */
- const struct dma_map_ops *dma_ops;
};
struct pdev_archdata {
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
- if (dev && dev->archdata.dma_ops)
- return dev->archdata.dma_ops;
+ if (dev && dev->dma_ops)
+ return dev->dma_ops;
else
return &xtensa_dma_map_ops;
}
int i;
pr_debug("device = %p, device->dma_ops = %p\n", device,
- device->dma_ops);
+ device->dma_device->dma_ops);
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!sdev)
iommu_group_remove_device(dev);
/* Remove dma-ops */
- dev->archdata.dma_ops = NULL;
+ dev->dma_ops = NULL;
/*
* We keep dev_data around for unplugged devices and reuse it when the
dev_name(dev));
iommu_ignore_device(dev);
- dev->archdata.dma_ops = &nommu_dma_ops;
+ dev->dma_ops = &nommu_dma_ops;
goto out;
}
init_iommu_group(dev);
if (domain->type == IOMMU_DOMAIN_IDENTITY)
dev_data->passthrough = true;
else
- dev->archdata.dma_ops = &amd_iommu_dma_ops;
+ dev->dma_ops = &amd_iommu_dma_ops;
out:
iommu_completion_wait(iommu);
mbdev->dev.parent = pdev;
mbdev->id.device = id;
mbdev->id.vendor = MBUS_DEV_ANY_ID;
- mbdev->dev.archdata.dma_ops = dma_ops;
+ mbdev->dev.dma_ops = dma_ops;
mbdev->dev.dma_mask = &mbdev->dev.coherent_dma_mask;
dma_set_mask(&mbdev->dev, DMA_BIT_MASK(64));
mbdev->dev.release = mbus_release_dev;
sdev->dev.parent = pdev;
sdev->id.device = id;
sdev->id.vendor = SCIF_DEV_ANY_ID;
- sdev->dev.archdata.dma_ops = dma_ops;
+ sdev->dev.dma_ops = dma_ops;
sdev->dev.release = scif_release_dev;
sdev->hw_ops = hw_ops;
sdev->dnode = dnode;
vdev->dev.parent = pdev;
vdev->id.device = id;
vdev->id.vendor = VOP_DEV_ANY_ID;
- vdev->dev.archdata.dma_ops = dma_ops;
+ vdev->dev.dma_ops = dma_ops;
vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask;
dma_set_mask(&vdev->dev, DMA_BIT_MASK(64));
vdev->dev.release = vop_release_dev;
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
#endif
+ const struct dma_map_ops *dma_ops;
u64 *dma_mask; /* dma mask (if dma'able device) */
u64 coherent_dma_mask;/* Like dma_mask, but for
alloc_coherent mappings as