drm/i915: Use 0 for the unordered context
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 19 Aug 2019 18:44:03 +0000 (19:44 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 19 Aug 2019 19:07:03 +0000 (20:07 +0100)
Since commit 078dec3326e2 ("dma-buf: add dma_fence_get_stub") the 0
fence context became an impossible match as it is used for an always
signaled fence. We can simplify our timeline tracking by knowing that 0
always means no match.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190819184404.24200-1-chris@chris-wilson.co.uk
Link: https://patchwork.freedesktop.org/patch/msgid/20190819175109.5241-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/i915_gem_clflush.c
drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
drivers/gpu/drm/i915/gem/i915_gem_fence.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_request.c

index 7794495..835d675 100644 (file)
@@ -134,8 +134,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
                dma_fence_init(&clflush->dma,
                               &i915_clflush_ops,
                               &clflush_lock,
-                              to_i915(obj->base.dev)->mm.unordered_timeline,
-                              0);
+                              0, 0);
                i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
 
                clflush->obj = i915_gem_object_get(obj);
index 2536d1f..61d0ca5 100644 (file)
@@ -267,7 +267,6 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
                                     struct i915_page_sizes *page_sizes,
                                     u32 value)
 {
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct clear_pages_work *work;
        struct i915_sleeve *sleeve;
        int err;
@@ -290,11 +289,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
 
        init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
 
-       dma_fence_init(&work->dma,
-                      &clear_pages_work_ops,
-                      &fence_lock,
-                      i915->mm.unordered_timeline,
-                      0);
+       dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
        i915_sw_fence_init(&work->wait, clear_pages_work_notify);
 
        i915_gem_object_lock(obj);
index cf0439e..c788f86 100644 (file)
@@ -69,8 +69,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
 
        i915_sw_fence_init(&stub->chain, stub_notify);
        dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
-                      to_i915(obj->base.dev)->mm.unordered_timeline,
-                      0);
+                      0, 0);
 
        if (i915_sw_fence_await_reservation(&stub->chain,
                                            obj->base.resv, NULL,
index 1031fc1..63682cb 100644 (file)
@@ -748,8 +748,6 @@ struct i915_gem_mm {
         */
        struct workqueue_struct *userptr_wq;
 
-       u64 unordered_timeline;
-
        /** Bit 6 swizzling required for X tiling */
        u32 bit_6_swizzle_x;
        /** Bit 6 swizzling required for Y tiling */
index 85a1de5..71af43d 100644 (file)
@@ -1411,8 +1411,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
                mkwrite_device_info(dev_priv)->page_sizes =
                        I915_GTT_PAGE_SIZE_4K;
 
-       dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
-
        intel_timelines_init(dev_priv);
 
        ret = i915_gem_init_userptr(dev_priv);
index eabf85b..ae3a60a 100644 (file)
@@ -915,7 +915,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
                        continue;
 
                /* Squash repeated waits to the same timelines */
-               if (fence->context != rq->i915->mm.unordered_timeline &&
+               if (fence->context &&
                    intel_timeline_sync_is_later(rq->timeline, fence))
                        continue;
 
@@ -929,7 +929,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
                        return ret;
 
                /* Record the latest fence used against each timeline */
-               if (fence->context != rq->i915->mm.unordered_timeline)
+               if (fence->context)
                        intel_timeline_sync_set(rq->timeline, fence);
        } while (--nchild);