drm/i915: Recreate internal objects with single page segments if dmar fails
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 2 Feb 2017 13:27:21 +0000 (13:27 +0000)
committerJani Nikula <jani.nikula@intel.com>
Thu, 16 Feb 2017 09:59:09 +0000 (11:59 +0200)
If we fail to dma-map the object, the most common cause is lack of space
inside the SW-IOTLB due to fragmentation. If we recreate the_sg_table
using segments of PAGE_SIZE (and single page allocations), we may succeed
in remapping the scatterlist.

First became a significant problem for the mock selftests after commit
5584f1b1d73e ("drm/i915: fix i915 running as dom0 under Xen") increased
the max_order.

Fixes: 920cf4194954 ("drm/i915: Introduce an internal allocator for disposable private objects")
Fixes: 5584f1b1d73e ("drm/i915: fix i915 running as dom0 under Xen")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170202132721.12711-1-chris@chris-wilson.co.uk
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: <stable@vger.kernel.org> # v4.10
(cherry picked from commit bb96dcf5830e5d81a1da2e2a14e6c0f7dfc64348)
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
drivers/gpu/drm/i915/i915_gem_internal.c

index 17ce53d..628699a 100644 (file)
@@ -46,24 +46,12 @@ static struct sg_table *
 i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
-       unsigned int npages = obj->base.size / PAGE_SIZE;
        struct sg_table *st;
        struct scatterlist *sg;
+       unsigned int npages;
        int max_order;
        gfp_t gfp;
 
-       st = kmalloc(sizeof(*st), GFP_KERNEL);
-       if (!st)
-               return ERR_PTR(-ENOMEM);
-
-       if (sg_alloc_table(st, npages, GFP_KERNEL)) {
-               kfree(st);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       sg = st->sgl;
-       st->nents = 0;
-
        max_order = MAX_ORDER;
 #ifdef CONFIG_SWIOTLB
        if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
@@ -77,6 +65,20 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
                gfp |= __GFP_DMA32;
        }
 
+create_st:
+       st = kmalloc(sizeof(*st), GFP_KERNEL);
+       if (!st)
+               return ERR_PTR(-ENOMEM);
+
+       npages = obj->base.size / PAGE_SIZE;
+       if (sg_alloc_table(st, npages, GFP_KERNEL)) {
+               kfree(st);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       sg = st->sgl;
+       st->nents = 0;
+
        do {
                int order = min(fls(npages) - 1, max_order);
                struct page *page;
@@ -104,8 +106,15 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
                sg = __sg_next(sg);
        } while (1);
 
-       if (i915_gem_gtt_prepare_pages(obj, st))
+       if (i915_gem_gtt_prepare_pages(obj, st)) {
+               /* Failed to dma-map try again with single page sg segments */
+               if (get_order(st->sgl->length)) {
+                       internal_free_pages(st);
+                       max_order = 0;
+                       goto create_st;
+               }
                goto err;
+       }
 
        /* Mark the pages as dontneed whilst they are still pinned. As soon
         * as they are unpinned they are allowed to be reaped by the shrinker,