/* USERPTR related */
struct vm_area_struct *vma;
+ bool coherent;
/* DMABUF related */
struct dma_buf_attachment *db_attach;
struct sg_table *sgt = buf->dma_sgt;
/* DMABUF exporter will flush the cache for us */
- if (!sgt || buf->db_attach)
+ if (!sgt || buf->db_attach || buf->coherent)
return;
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
struct sg_table *sgt = buf->dma_sgt;
/* DMABUF exporter will flush the cache for us */
- if (!sgt || buf->db_attach)
+ if (!sgt || buf->db_attach || buf->coherent)
return;
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
}
+#ifdef __arm__
+static inline int vma_is_coherent(struct vm_area_struct *vma)
+{
+ pgprot_t prot = vma->vm_page_prot;
+ /*
+ * this is a hack to speed up cache management on coherent arm mappings,
+ * other architectures rely on dma-mapping internal optimisations.
+ */
+ if (vma_is_io(vma) || prot == pgprot_noncached(prot) ||
+ prot == pgprot_writecombine(prot) ||
+ prot == pgprot_dmacoherent(prot))
+ return true;
+
+ return false;
+}
+#else
+static inline int vma_is_coherent(struct vm_area_struct *vma)
+{
+ return false;
+}
+#endif
+
static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
struct vm_area_struct *vma, unsigned long *res)
{
goto fail_pages;
}
+ buf->coherent = vma_is_coherent(vma);
+
/* extract page list from userspace mapping */
ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
if (ret) {