for_each_sg(table->sgl, sg, table->nents, i) {
if (ion_buffer_cached(buffer))
- __dma_page_cpu_to_dev(sg_page(sg), 0, sg_dma_len(sg),
- DMA_BIDIRECTIONAL);
+ arm_dma_ops.sync_single_for_device(NULL,
+ pfn_to_dma(NULL, page_to_pfn(sg_page(sg))),
+ sg_dma_len(sg), DMA_BIDIRECTIONAL);
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg_dma_len(sg));
}
}
free_vm_area(vm_struct);
- __dma_page_cpu_to_dev(phys_to_page(heap_data->base), 0, heap_data->size,
- DMA_BIDIRECTIONAL);
+ arm_dma_ops.sync_single_for_device(NULL,
+ pfn_to_dma(NULL, page_to_pfn(phys_to_page(heap_data->base))),
+ heap_data->size, DMA_BIDIRECTIONAL);
gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
chunk_heap->heap.ops = &chunk_heap_ops;
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
/* this is only being used to flush the page for dma,
this api is not really suitable for calling from a driver
but no better way to flush a page for dma exist at this time */
- __dma_page_cpu_to_dev(page, 0, PAGE_SIZE << pool->order,
- DMA_BIDIRECTIONAL);
+ arm_dma_ops.sync_single_for_device(NULL,
+ pfn_to_dma(NULL, page_to_pfn(page)),
+ PAGE_SIZE << pool->order,
+ DMA_BIDIRECTIONAL);
return page;
}
page = alloc_pages(gfp_flags, order);
if (!page)
return 0;
- __dma_page_cpu_to_dev(page, 0, PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
+ arm_dma_ops.sync_single_for_device(NULL,
+ pfn_to_dma(NULL, page_to_pfn(page)),
+ PAGE_SIZE << order, DMA_BIDIRECTIONAL);
}
if (!page)
return 0;