From 5aea37bf4112896437176704049db2559efcb8a9 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 5 Sep 2022 18:00:52 +0300 Subject: [PATCH] drm/i915: un-inline i915_gem_drain_freed_objects() I can't idenfity a single hot path that would require i915_gem_drain_freed_objects() to be inline. Un-inline it. Signed-off-by: Jani Nikula Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/6c289c55afee0d9a3067122db63277b8d60cf74f.1662390010.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_drv.h | 17 +---------------- drivers/gpu/drm/i915/i915_gem.c | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4ee463e..497a0c0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -979,22 +979,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, void i915_gem_init_early(struct drm_i915_private *dev_priv); void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); -static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) -{ - /* - * A single pass should suffice to release all the freed objects (along - * most call paths) , but be a little more paranoid in that freeing - * the objects does take a little amount of time, during which the rcu - * callbacks could have added new objects into the freed list, and - * armed the work again. - */ - while (atomic_read(&i915->mm.free_count)) { - flush_work(&i915->mm.free_work); - flush_delayed_work(&i915->bdev.wq); - rcu_barrier(); - } -} - +void i915_gem_drain_freed_objects(struct drm_i915_private *i915); void i915_gem_drain_workqueue(struct drm_i915_private *i915); struct i915_vma * __must_check diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 4c89b33..0f49ec9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1086,6 +1086,21 @@ out: } /* + * A single pass should suffice to release all the freed objects (along most + * call paths), but be a little more paranoid in that freeing the objects does + * take a little amount of time, during which the rcu callbacks could have added + * new objects into the freed list, and armed the work again. + */ +void i915_gem_drain_freed_objects(struct drm_i915_private *i915) +{ + while (atomic_read(&i915->mm.free_count)) { + flush_work(&i915->mm.free_work); + flush_delayed_work(&i915->bdev.wq); + rcu_barrier(); + } +} + +/* * Similar to objects above (see i915_gem_drain_freed-objects), in general we * have workers that are armed by RCU and then rearm themselves in their * callbacks. To be paranoid, we need to drain the workqueue a second time after -- 2.7.4