Merge tag 'drm-intel-fixes-2022-11-10' of git://anongit.freedesktop.org/drm/drm-intel...
authorDave Airlie <airlied@redhat.com>
Fri, 11 Nov 2022 00:20:11 +0000 (10:20 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 11 Nov 2022 00:20:11 +0000 (10:20 +1000)
- Fix sg_table handling in map_dma_buf (Matthew Auld)
- Send PSR update also on invalidate (Jouni Högander)
- Do not set cache_dirty for DGFX (Niranjana Vishwanathapura)
- Restore userptr probe_range behaviour (Matthew Auld)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/Y2zCy5q85qE9W0J8@tursulin-desk
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c

index d4cce62..15c3e44 100644 (file)
@@ -2201,8 +2201,11 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
        if (intel_dp->psr.psr2_sel_fetch_enabled) {
                u32 val;
 
-               if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
+               if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
+                       /* Send one update otherwise lag is observed in screen */
+                       intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
                        return;
+               }
 
                val = man_trk_ctl_enable_bit_get(dev_priv) |
                      man_trk_ctl_partial_frame_bit_get(dev_priv) |
index f5062d0..824971a 100644 (file)
@@ -40,13 +40,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
                goto err;
        }
 
-       ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
+       ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL);
        if (ret)
                goto err_free;
 
        src = obj->mm.pages->sgl;
        dst = st->sgl;
-       for (i = 0; i < obj->mm.pages->nents; i++) {
+       for (i = 0; i < obj->mm.pages->orig_nents; i++) {
                sg_set_page(dst, sg_page(src), src->length, 0);
                dst = sg_next(dst);
                src = sg_next(src);
index 11125c3..2f78044 100644 (file)
@@ -369,14 +369,14 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
 
        __start_cpu_write(obj);
        /*
-        * On non-LLC platforms, force the flush-on-acquire if this is ever
+        * On non-LLC igfx platforms, force the flush-on-acquire if this is ever
         * swapped-in. Our async flush path is not trust worthy enough yet(and
         * happens in the wrong order), and with some tricks it's conceivable
         * for userspace to change the cache-level to I915_CACHE_NONE after the
         * pages are swapped-in, and since execbuf binds the object before doing
         * the async flush, we have a race window.
         */
-       if (!HAS_LLC(i915))
+       if (!HAS_LLC(i915) && !IS_DGFX(i915))
                obj->cache_dirty = true;
 }
 
index f34e01a..ba14b18 100644 (file)
@@ -428,9 +428,10 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
 {
        VMA_ITERATOR(vmi, mm, addr);
        struct vm_area_struct *vma;
+       unsigned long end = addr + len;
 
        mmap_read_lock(mm);
-       for_each_vma_range(vmi, vma, addr + len) {
+       for_each_vma_range(vmi, vma, end) {
                /* Check for holes, note that we also update the addr below */
                if (vma->vm_start > addr)
                        break;
@@ -442,7 +443,7 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
        }
        mmap_read_unlock(mm);
 
-       if (vma)
+       if (vma || addr < end)
                return -EFAULT;
        return 0;
 }