return virt_to_phys(ptr);
}
+static int pci32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, int direction)
+{
+ struct scatterlist *sg;
+ int i;
+
+ WARN_ON(nents == 0 || sglist[0].length == 0);
+
+ for_each_sg(sglist, sg, nents, i) {
+ BUG_ON(!sg_page(sg));
+
+ sg->dma_address = sg_phys(sg);
+ }
+
+ flush_write_buffers();
+ return nents;
+}
+
static const struct dma_mapping_ops pci32_dma_ops = {
.map_single = pci32_map_single,
.unmap_single = NULL,
+ .map_sg = pci32_dma_map_sg,
};
const struct dma_mapping_ops *dma_ops = &pci32_dma_ops;
dma_ops->unmap_single(dev, addr, size, direction);
}
-
+static inline int
+dma_map_sg(struct device *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+ return dma_ops->map_sg(hwdev, sg, nents, direction);
+}
#endif
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
-static inline int
-dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
-{
- struct scatterlist *sg;
- int i;
-
- BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nents == 0 || sglist[0].length == 0);
-
- for_each_sg(sglist, sg, nents, i) {
- BUG_ON(!sg_page(sg));
-
- sg->dma_address = sg_phys(sg);
- }
-
- flush_write_buffers();
- return nents;
-}
-
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction)
flush_write_buffers();
}
-static inline int
-dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
-{
- BUG_ON(!valid_dma_direction(direction));
- return dma_ops->map_sg(hwdev, sg, nents, direction);
-}
-
static inline void
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
int direction)