* because display controller, GPU, etc. are not coherent.
*/
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
- dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+ dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
}
static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
* discard those writes.
*/
if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
- dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+ dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
}
/* called with etnaviv_obj->lock held */
}
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
- dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
- etnaviv_obj->sgt->nents,
- etnaviv_op_to_dma_dir(op));
+ dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
+ etnaviv_op_to_dma_dir(op));
etnaviv_obj->last_cpu_prep_op = op;
}
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
/* fini without a prep is almost certainly a userspace error */
WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
- dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
- etnaviv_obj->sgt->nents,
+ dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
etnaviv_obj->last_cpu_prep_op = 0;
}
struct sg_table *sgt, unsigned len, int prot)
{ struct scatterlist *sg;
unsigned int da = iova;
- unsigned int i, j;
+ unsigned int i;
int ret;
if (!context || !sgt)
return -EINVAL;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ for_each_sgtable_dma_sg(sgt, sg, i) {
u32 pa = sg_dma_address(sg) - sg->offset;
size_t bytes = sg_dma_len(sg) + sg->offset;
return 0;
fail:
- da = iova;
-
- for_each_sg(sgt->sgl, sg, i, j) {
- size_t bytes = sg_dma_len(sg) + sg->offset;
-
- etnaviv_context_unmap(context, da, bytes);
- da += bytes;
- }
+ etnaviv_context_unmap(context, iova, da - iova);
return ret;
}
unsigned int da = iova;
int i;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ for_each_sgtable_dma_sg(sgt, sg, i) {
size_t bytes = sg_dma_len(sg) + sg->offset;
etnaviv_context_unmap(context, da, bytes);