drm/i915: Remove allow_alloc from i915_gem_object_get_sg*
authorJason Ekstrand <jason@jlekstrand.net>
Wed, 14 Jul 2021 19:34:17 +0000 (14:34 -0500)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 16 Jul 2021 19:47:07 +0000 (21:47 +0200)
This reverts the rest of 0edbb9ba1bfe ("drm/i915: Move cmd parser
pinning to execbuffer").  Now that the only user of i915_gem_object_get_sg
without allow_alloc has been removed, we can drop the parameter.  This
portion of the revert was broken into its own patch to aid review.

Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210714193419.1459723-4-jason@jlekstrand.net
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/i915/gt/intel_ggtt.c

index 8be4fad..f3ede43 100644 (file)
@@ -342,22 +342,22 @@ struct scatterlist *
 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
                         struct i915_gem_object_page_iter *iter,
                         unsigned int n,
-                        unsigned int *offset, bool allow_alloc, bool dma);
+                        unsigned int *offset, bool dma);
 
 static inline struct scatterlist *
 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
                       unsigned int n,
-                      unsigned int *offset, bool allow_alloc)
+                      unsigned int *offset)
 {
-       return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc, false);
+       return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, false);
 }
 
 static inline struct scatterlist *
 i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
                           unsigned int n,
-                          unsigned int *offset, bool allow_alloc)
+                          unsigned int *offset)
 {
-       return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc, true);
+       return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, true);
 }
 
 struct page *
index 0c9d284..8eb1c3a 100644 (file)
@@ -494,7 +494,7 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
                         struct i915_gem_object_page_iter *iter,
                         unsigned int n,
                         unsigned int *offset,
-                        bool allow_alloc, bool dma)
+                        bool dma)
 {
        struct scatterlist *sg;
        unsigned int idx, count;
@@ -516,9 +516,6 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
        if (n < READ_ONCE(iter->sg_idx))
                goto lookup;
 
-       if (!allow_alloc)
-               goto manual_lookup;
-
        mutex_lock(&iter->lock);
 
        /* We prefer to reuse the last sg so that repeated lookup of this
@@ -568,16 +565,7 @@ scan:
        if (unlikely(n < idx)) /* insertion completed by another thread */
                goto lookup;
 
-       goto manual_walk;
-
-manual_lookup:
-       idx = 0;
-       sg = obj->mm.pages->sgl;
-       count = __sg_page_count(sg);
-
-manual_walk:
-       /*
-        * In case we failed to insert the entry into the radixtree, we need
+       /* In case we failed to insert the entry into the radixtree, we need
         * to look beyond the current sg.
         */
        while (idx + count <= n) {
@@ -624,7 +612,7 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
 
        GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
 
-       sg = i915_gem_object_get_sg(obj, n, &offset, true);
+       sg = i915_gem_object_get_sg(obj, n, &offset);
        return nth_page(sg_page(sg), offset);
 }
 
@@ -650,7 +638,7 @@ i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
        struct scatterlist *sg;
        unsigned int offset;
 
-       sg = i915_gem_object_get_sg_dma(obj, n, &offset, true);
+       sg = i915_gem_object_get_sg_dma(obj, n, &offset);
 
        if (len)
                *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
index 6589411..f253b11 100644 (file)
@@ -589,7 +589,7 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
 
        GEM_WARN_ON(bo->ttm);
 
-       sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true, true);
+       sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
 
        return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
 }
index 20e46b8..9d445ad 100644 (file)
@@ -1494,7 +1494,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
        if (ret)
                goto err_sg_alloc;
 
-       iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset, true);
+       iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
        GEM_BUG_ON(!iter);
 
        sg = st->sgl;