plat_unmap_dma_mem(dev, dma_addr, size, direction);
}
-static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
+static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
{
int i;
+ struct scatterlist *sg;
- for (i = 0; i < nents; i++, sg++) {
+ for_each_sg(sglist, sg, nents, i) {
if (!plat_device_is_coherent(dev))
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
return plat_map_dma_mem_page(dev, page) + offset;
}
-static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nhwentries, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
int i;
+ struct scatterlist *sg;
- for (i = 0; i < nhwentries; i++, sg++) {
+ for_each_sg(sglist, sg, nhwentries, i) {
if (!plat_device_is_coherent(dev) &&
direction != DMA_TO_DEVICE)
__dma_sync(sg_page(sg), sg->offset, sg->length,
}
static void mips_dma_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction)
{
int i;
+ struct scatterlist *sg;
- if (cpu_needs_post_dma_flush(dev))
- for (i = 0; i < nelems; i++, sg++)
+ if (cpu_needs_post_dma_flush(dev)) {
+ for_each_sg(sglist, sg, nelems, i) {
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
+ }
+ }
plat_post_dma_flush(dev);
}
static void mips_dma_sync_sg_for_device(struct device *dev,
- struct scatterlist *sg, int nelems, enum dma_data_direction direction)
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction)
{
int i;
+ struct scatterlist *sg;
- if (!plat_device_is_coherent(dev))
- for (i = 0; i < nelems; i++, sg++)
+ if (!plat_device_is_coherent(dev)) {
+ for_each_sg(sglist, sg, nelems, i) {
__dma_sync(sg_page(sg), sg->offset, sg->length,
direction);
+ }
+ }
}
int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)