msm_gem_unlock(obj);
}
+static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
+{
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ return pgprot_writecombine(prot);
+ return prot;
+}
+
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
-
- if (msm_obj->flags & MSM_BO_WC) {
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- } else if (msm_obj->flags & MSM_BO_UNCACHED) {
- vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
- } else {
- /*
- * Shunt off cached objs to shmem file so they have their own
- * address_space (so unmap_mapping_range does what we want,
- * in particular in the case of mmap'd dmabufs)
- */
- vma->vm_pgoff = 0;
- vma_set_file(vma, obj->filp);
-
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- }
+ vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
}
if (msm_obj->flags & MSM_BO_MAP_PRIV)
prot |= IOMMU_PRIV;
+ if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
+ prot |= IOMMU_CACHE;
+
GEM_WARN_ON(!msm_gem_is_locked(obj));
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
goto fail;
}
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
if (msm_obj->vaddr == NULL) {
ret = -ENOMEM;
goto fail;
uint32_t size, uint32_t flags,
struct drm_gem_object **obj)
{
+ struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
switch (flags & MSM_BO_CACHE_MASK) {
case MSM_BO_CACHED:
case MSM_BO_WC:
break;
+ case MSM_BO_CACHED_COHERENT:
+ if (priv->has_cached_coherent)
+ break;
+ /* fallthrough */
default:
DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));