return NULL;
}
-static void *__iommu_alloc_atomic(struct device *dev, size_t size,
- dma_addr_t *handle)
+static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
+ dma_addr_t *handle, int coherent_flag)
{
struct page *page;
void *addr;
- addr = __alloc_from_pool(size, &page);
+ if (coherent_flag == COHERENT)
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else
+ addr = __alloc_from_pool(size, &page);
if (!addr)
return NULL;
}
static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
- dma_addr_t handle, size_t size)
+ dma_addr_t handle, size_t size, int coherent_flag)
{
__iommu_remove_mapping(dev, handle, size);
- __free_from_pool(cpu_addr, size);
+ if (coherent_flag == COHERENT)
+ __dma_free_buffer(virt_to_page(cpu_addr), size);
+ else
+ __free_from_pool(cpu_addr, size);
}
-static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs,
+ int coherent_flag)
{
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
struct page **pages;
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
- if (!gfpflags_allow_blocking(gfp))
- return __iommu_alloc_atomic(dev, size, handle);
+ if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
+ return __iommu_alloc_simple(dev, size, gfp, handle,
+ coherent_flag);
/*
* Following is a work-around (a.k.a. hack) to prevent pages
*/
gfp &= ~(__GFP_COMP);
- /* For now always consider we are in a non-coherent case */
- pages = __iommu_alloc_buffer(dev, size, gfp, attrs, NORMAL);
+ pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
if (!pages)
return NULL;
return NULL;
}
-static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+ return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
+}
+
+static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+ return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
+}
+
+static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
-
if (!pages)
return -ENXIO;
return 0;
}
+static int arm_iommu_mmap_attrs(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+
+ return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
+static int arm_coherent_iommu_mmap_attrs(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
/*
* free a page as defined by the above mapping.
* Must not be called with IRQs disabled.
*/
-void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs)
+void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs, int coherent_flag)
{
struct page **pages;
size = PAGE_ALIGN(size);
- if (__in_atomic_pool(cpu_addr, size)) {
- __iommu_free_atomic(dev, cpu_addr, handle, size);
+ if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
+ __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
return;
}
__iommu_free_buffer(dev, pages, size, attrs);
}
+void arm_iommu_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+{
+ __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
+}
+
+void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+{
+ __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
+}
+
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size, struct dma_attrs *attrs)
};
struct dma_map_ops iommu_coherent_ops = {
- .alloc = arm_iommu_alloc_attrs,
- .free = arm_iommu_free_attrs,
- .mmap = arm_iommu_mmap_attrs,
+ .alloc = arm_coherent_iommu_alloc_attrs,
+ .free = arm_coherent_iommu_free_attrs,
+ .mmap = arm_coherent_iommu_mmap_attrs,
.get_sgtable = arm_iommu_get_sgtable,
.map_page = arm_coherent_iommu_map_page,