drm/i915: un-inline i915_gem_drain_freed_objects()
authorJani Nikula <jani.nikula@intel.com>
Mon, 5 Sep 2022 15:00:52 +0000 (18:00 +0300)
committerJani Nikula <jani.nikula@intel.com>
Tue, 6 Sep 2022 13:06:21 +0000 (16:06 +0300)
I can't idenfity a single hot path that would require
i915_gem_drain_freed_objects() to be inline. Un-inline it.

Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/6c289c55afee0d9a3067122db63277b8d60cf74f.1662390010.git.jani.nikula@intel.com
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c

index 4ee463e..497a0c0 100644 (file)
@@ -979,22 +979,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 void i915_gem_init_early(struct drm_i915_private *dev_priv);
 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
 
-static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
-{
-       /*
-        * A single pass should suffice to release all the freed objects (along
-        * most call paths) , but be a little more paranoid in that freeing
-        * the objects does take a little amount of time, during which the rcu
-        * callbacks could have added new objects into the freed list, and
-        * armed the work again.
-        */
-       while (atomic_read(&i915->mm.free_count)) {
-               flush_work(&i915->mm.free_work);
-               flush_delayed_work(&i915->bdev.wq);
-               rcu_barrier();
-       }
-}
-
+void i915_gem_drain_freed_objects(struct drm_i915_private *i915);
 void i915_gem_drain_workqueue(struct drm_i915_private *i915);
 
 struct i915_vma * __must_check
index 4c89b33..0f49ec9 100644 (file)
@@ -1086,6 +1086,21 @@ out:
 }
 
 /*
+ * A single pass should suffice to release all the freed objects (along most
+ * call paths), but be a little more paranoid in that freeing the objects does
+ * take a little amount of time, during which the rcu callbacks could have added
+ * new objects into the freed list, and armed the work again.
+ */
+void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
+{
+       while (atomic_read(&i915->mm.free_count)) {
+               flush_work(&i915->mm.free_work);
+               flush_delayed_work(&i915->bdev.wq);
+               rcu_barrier();
+       }
+}
+
+/*
  * Similar to objects above (see i915_gem_drain_freed-objects), in general we
  * have workers that are armed by RCU and then rearm themselves in their
  * callbacks. To be paranoid, we need to drain the workqueue a second time after