{
}
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+ size_t size, enum dma_data_direction dir)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, page, size, 0);
+ /*
+ * This is not correct - sg_dma_address needs a dma_addr_t that is valid
+ * for the the targeted device, but this works on the currently targeted
+ * hardware.
+ */
+ sg_dma_address(&sg) = page_to_phys(page);
+ dma_sync_sg_for_device(dev, &sg, 1, dir);
+}
+
struct ion_vma_list {
struct list_head list;
struct vm_area_struct *vma;
struct page *page = buffer->pages[i];
if (ion_buffer_page_is_dirty(page))
- __dma_page_cpu_to_dev(page, 0, PAGE_SIZE, dir);
+ ion_pages_sync_for_device(dev, ion_buffer_page(page),
+ PAGE_SIZE, dir);
+
ion_buffer_page_clean(buffer->pages + i);
}
list_for_each_entry(vma_list, &buffer->vmas, list) {
ion_heap_buffer_zero(buffer);
+ if (ion_buffer_cached(buffer))
+ dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+ DMA_BIDIRECTIONAL);
+
for_each_sg(table->sgl, sg, table->nents, i) {
- if (ion_buffer_cached(buffer))
- arm_dma_ops.sync_single_for_device(NULL,
- pfn_to_dma(NULL, page_to_pfn(sg_page(sg))),
- sg_dma_len(sg), DMA_BIDIRECTIONAL);
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg_dma_len(sg));
}
pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL);
int i, ret;
-
chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
if (!chunk_heap)
return ERR_PTR(-ENOMEM);
}
free_vm_area(vm_struct);
- arm_dma_ops.sync_single_for_device(NULL,
- pfn_to_dma(NULL, page_to_pfn(phys_to_page(heap_data->base))),
- heap_data->size, DMA_BIDIRECTIONAL);
+ ion_pages_sync_for_device(NULL, pfn_to_page(PFN_DOWN(heap_data->base)),
+ heap_data->size, DMA_BIDIRECTIONAL);
+
gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
chunk_heap->heap.ops = &chunk_heap_ops;
chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
if (!page)
return NULL;
- /* this is only being used to flush the page for dma,
- this api is not really suitable for calling from a driver
- but no better way to flush a page for dma exist at this time */
- arm_dma_ops.sync_single_for_device(NULL,
- pfn_to_dma(NULL, page_to_pfn(page)),
- PAGE_SIZE << pool->order,
- DMA_BIDIRECTIONAL);
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+ DMA_BIDIRECTIONAL);
return page;
}
#ifndef _ION_PRIV_H
#define _ION_PRIV_H
+#include <linux/dma-direction.h>
#include <linux/kref.h>
#include <linux/mm_types.h>
#include <linux/mutex.h>
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int nr_to_scan);
+/**
+ * ion_pages_sync_for_device - cache flush pages for use with the specified
+ * device
+ * @dev: the device the pages will be used with
+ * @page: the first page to be flushed
+ * @size: size in bytes of region to be flushed
+ * @dir: direction of dma transfer
+ */
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+ size_t size, enum dma_data_direction dir);
+
#endif /* _ION_PRIV_H */
page = ion_heap_alloc_pages(buffer, gfp_flags, order);
if (!page)
return 0;
- arm_dma_ops.sync_single_for_device(NULL,
- pfn_to_dma(NULL, page_to_pfn(page)),
- PAGE_SIZE << order, DMA_BIDIRECTIONAL);
+ ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+ DMA_BIDIRECTIONAL);
}
if (!page)
return 0;