From f33a8a51602c84cc7d5cadd2655835ba3b7d03f9 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 4 Oct 2019 14:40:04 +0100 Subject: [PATCH] drm/i915: Merge wait_for_timelines with retire_request wait_for_timelines is essentially the same loop as retiring requests (with an extra timeout), so merge the two into one routine. v2: i915_retire_requests_timeout and keep VT'd w/a as !interruptible Signed-off-by: Chris Wilson Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-10-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_pm.c | 4 +- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/gt/intel_gt_pm.c | 3 +- drivers/gpu/drm/i915/i915_debugfs.c | 4 +- drivers/gpu/drm/i915/i915_drv.h | 3 +- drivers/gpu/drm/i915/i915_gem.c | 67 ++-------------------- drivers/gpu/drm/i915/i915_gem_evict.c | 12 ++-- drivers/gpu/drm/i915/i915_gem_gtt.c | 4 +- drivers/gpu/drm/i915/i915_request.c | 26 ++++++++- drivers/gpu/drm/i915/i915_request.h | 7 ++- drivers/gpu/drm/i915/selftests/igt_flush_test.c | 4 +- drivers/gpu/drm/i915/selftests/igt_live_test.c | 4 +- drivers/gpu/drm/i915/selftests/mock_gem_device.c | 2 +- 14 files changed, 50 insertions(+), 96 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index c19431d..418d0d2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -432,9 +432,7 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj) /* Attempt to reap some mmap space from dead objects */ do { - err = i915_gem_wait_for_idle(i915, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); + err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT); if (err) break; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index 26f325b..90b2112 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -59,9 +59,7 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt) { bool result = !intel_gt_is_wedged(gt); - if (i915_gem_wait_for_idle(gt->i915, - I915_WAIT_FOR_IDLE_BOOST, - I915_GEM_IDLE_TIMEOUT) == -ETIME) { + if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) { /* XXX hide warning from gem_eio */ if (i915_modparams.reset) { dev_err(gt->i915->drm.dev, diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index f5402aa..f902aee 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -1137,7 +1137,7 @@ out: if ((flags & TEST_IDLE) && ret == 0) { ret = i915_gem_wait_for_idle(ce->engine->i915, - 0, MAX_SCHEDULE_TIMEOUT); + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index d4cefdd..bdb34f0 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -196,8 +196,7 @@ int intel_gt_resume(struct intel_gt *gt) static void wait_for_idle(struct intel_gt *gt) { - if (i915_gem_wait_for_idle(gt->i915, 0, - I915_GEM_IDLE_TIMEOUT) == -ETIME) { + if (i915_gem_wait_for_idle(gt->i915, I915_GEM_IDLE_TIMEOUT) == -ETIME) { /* * Forcibly cancel outstanding work and leave * the gpu quiet. diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 7c4bba2..5888a65 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3635,9 +3635,7 @@ i915_drop_caches_set(void *data, u64 val) i915_retire_requests(i915); if (val & (DROP_IDLE | DROP_ACTIVE)) { - ret = i915_gem_wait_for_idle(i915, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); + ret = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ad31852..44f3463 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2321,8 +2321,7 @@ void i915_gem_driver_register(struct drm_i915_private *i915); void i915_gem_driver_unregister(struct drm_i915_private *i915); void i915_gem_driver_remove(struct drm_i915_private *dev_priv); void i915_gem_driver_release(struct drm_i915_private *dev_priv); -int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, - unsigned int flags, long timeout); +int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, long timeout); void i915_gem_suspend(struct drm_i915_private *dev_priv); void i915_gem_suspend_late(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 83c2c7b..12d21e5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -883,61 +883,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) } } -static long -wait_for_timelines(struct intel_gt *gt, unsigned int wait, long timeout) -{ - struct intel_gt_timelines *timelines = >->timelines; - struct intel_timeline *tl; - unsigned long flags; - - spin_lock_irqsave(&timelines->lock, flags); - list_for_each_entry(tl, &timelines->active_list, link) { - struct dma_fence *fence; - - fence = i915_active_fence_get(&tl->last_request); - if (!fence) - continue; - - spin_unlock_irqrestore(&timelines->lock, flags); - - if (!dma_fence_is_i915(fence)) { - timeout = dma_fence_wait_timeout(fence, - flags & I915_WAIT_INTERRUPTIBLE, - timeout); - } else { - struct i915_request *rq = to_request(fence); - - /* - * "Race-to-idle". - * - * Switching to the kernel context is often used as - * a synchronous step prior to idling, e.g. in suspend - * for flushing all current operations to memory before - * sleeping. These we want to complete as quickly as - * possible to avoid prolonged stalls, so allow the gpu - * to boost to maximum clocks. - */ - if (flags & I915_WAIT_FOR_IDLE_BOOST) - gen6_rps_boost(rq); - - timeout = i915_request_wait(rq, flags, timeout); - } - - dma_fence_put(fence); - if (timeout < 0) - return timeout; - - /* restart after reacquiring the lock */ - spin_lock_irqsave(&timelines->lock, flags); - tl = list_entry(&timelines->active_list, typeof(*tl), link); - } - spin_unlock_irqrestore(&timelines->lock, flags); - - return timeout; -} - -int i915_gem_wait_for_idle(struct drm_i915_private *i915, - unsigned int flags, long timeout) +int i915_gem_wait_for_idle(struct drm_i915_private *i915, long timeout) { struct intel_gt *gt = &i915->gt; @@ -945,18 +891,13 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, if (!intel_gt_pm_is_awake(gt)) return 0; - do { - timeout = wait_for_timelines(gt, flags, timeout); - if (timeout < 0) - return timeout; - + while ((timeout = i915_retire_requests_timeout(i915, timeout)) > 0) { cond_resched(); if (signal_pending(current)) return -EINTR; + } - } while (i915_retire_requests(i915)); - - return 0; + return timeout; } struct i915_vma * diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 0552bf9..0a412f6 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -46,9 +46,7 @@ static int ggtt_flush(struct drm_i915_private *i915) * the hopes that we can then remove contexts and the like only * bound by their active reference. */ - return i915_gem_wait_for_idle(i915, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); + return i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT); } static bool @@ -126,6 +124,8 @@ i915_gem_evict_something(struct i915_address_space *vm, min_size, alignment, color, start, end, mode); + i915_retire_requests(vm->i915); + search_again: active = NULL; INIT_LIST_HEAD(&eviction_list); @@ -264,13 +264,13 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, trace_i915_gem_evict_node(vm, target, flags); - /* Retire before we search the active list. Although we have + /* + * Retire before we search the active list. Although we have * reasonable accuracy in our retirement lists, we may have * a stray pin (preventing eviction) that can only be resolved by * retiring. */ - if (!(flags & PIN_NONBLOCK)) - i915_retire_requests(vm->i915); + i915_retire_requests(vm->i915); if (i915_vm_has_cache_coloring(vm)) { /* Expand search to cover neighbouring guard pages (or lack!) */ diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7462d87..082fcf9 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2528,7 +2528,9 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, struct i915_ggtt *ggtt = &dev_priv->ggtt; if (unlikely(ggtt->do_idle_maps)) { - if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) { + /* XXX This does not prevent more requests being submitted! */ + if (i915_retire_requests_timeout(dev_priv, + -MAX_SCHEDULE_TIMEOUT)) { DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); /* Wait a bit, in hopes it avoids the hang */ udelay(10); diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 4ffe62a..52f7c4e 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1508,13 +1508,19 @@ out: return timeout; } -bool i915_retire_requests(struct drm_i915_private *i915) +long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout) { struct intel_gt_timelines *timelines = &i915->gt.timelines; struct intel_timeline *tl, *tn; + unsigned long active_count = 0; unsigned long flags; + bool interruptible; LIST_HEAD(free); + interruptible = true; + if (timeout < 0) + timeout = -timeout, interruptible = false; + spin_lock_irqsave(&timelines->lock, flags); list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { if (!mutex_trylock(&tl->mutex)) @@ -1525,13 +1531,27 @@ bool i915_retire_requests(struct drm_i915_private *i915) tl->active_count++; /* pin the list element */ spin_unlock_irqrestore(&timelines->lock, flags); + if (timeout > 0) { + struct dma_fence *fence; + + fence = i915_active_fence_get(&tl->last_request); + if (fence) { + timeout = dma_fence_wait_timeout(fence, + interruptible, + timeout); + dma_fence_put(fence); + } + } + retire_requests(tl); spin_lock_irqsave(&timelines->lock, flags); /* Resume iteration after dropping lock */ list_safe_reset_next(tl, tn, link); - if (!--tl->active_count) + if (--tl->active_count) + active_count += !!rcu_access_pointer(tl->last_request.fence); + else list_del(&tl->link); mutex_unlock(&tl->mutex); @@ -1547,7 +1567,7 @@ bool i915_retire_requests(struct drm_i915_private *i915) list_for_each_entry_safe(tl, tn, &free, link) __intel_timeline_free(&tl->kref); - return !list_empty(&timelines->active_list); + return active_count ? timeout : 0; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 621fb33c..256b071 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -310,7 +310,6 @@ long i915_request_wait(struct i915_request *rq, #define I915_WAIT_INTERRUPTIBLE BIT(0) #define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */ #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ -#define I915_WAIT_FOR_IDLE_BOOST BIT(3) static inline bool i915_request_signaled(const struct i915_request *rq) { @@ -460,6 +459,10 @@ i915_request_active_timeline(struct i915_request *rq) lockdep_is_held(&rq->engine->active.lock)); } -bool i915_retire_requests(struct drm_i915_private *i915); +long i915_retire_requests_timeout(struct drm_i915_private *i915, long timeout); +static inline void i915_retire_requests(struct drm_i915_private *i915) +{ + i915_retire_requests_timeout(i915, 0); +} #endif /* I915_REQUEST_H */ diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index 2a5fbe4..ed496bd 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c @@ -18,8 +18,7 @@ int igt_flush_test(struct drm_i915_private *i915) cond_resched(); - i915_retire_requests(i915); - if (i915_gem_wait_for_idle(i915, 0, HZ / 5) == -ETIME) { + if (i915_gem_wait_for_idle(i915, HZ / 5) == -ETIME) { pr_err("%pS timed out, cancelling all further testing.\n", __builtin_return_address(0)); @@ -30,7 +29,6 @@ int igt_flush_test(struct drm_i915_private *i915) intel_gt_set_wedged(&i915->gt); ret = -EIO; } - i915_retire_requests(i915); return ret; } diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c index 04a6f88..eae90f9 100644 --- a/drivers/gpu/drm/i915/selftests/igt_live_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c @@ -23,9 +23,7 @@ int igt_live_test_begin(struct igt_live_test *t, t->func = func; t->name = name; - err = i915_gem_wait_for_idle(i915, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); + err = i915_gem_wait_for_idle(i915, MAX_SCHEDULE_TIMEOUT); if (err) { pr_err("%s(%s): failed to idle before, with err=%d!", func, name, err); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index a8be5da..3b589bb 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -44,7 +44,7 @@ void mock_device_flush(struct drm_i915_private *i915) do { for_each_engine(engine, i915, id) mock_engine_flush(engine); - } while (i915_retire_requests(i915)); + } while (i915_retire_requests_timeout(i915, MAX_SCHEDULE_TIMEOUT)); } static void mock_device_release(struct drm_device *dev) -- 2.7.4