drm/i915: add a dedicated workqueue inside drm_i915_private
authorLuca Coelho <luciano.coelho@intel.com>
Thu, 8 Jun 2023 13:35:45 +0000 (16:35 +0300)
committerJani Nikula <jani.nikula@intel.com>
Sat, 10 Jun 2023 03:33:11 +0000 (06:33 +0300)
In order to avoid flush_scheduled_work() usage, add a dedicated
workqueue in the drm_i915_private structure.  In this way, we don't
need to use the system queue anymore.

This change is mostly mechanical and based on Tetsuo's original
patch[1].

v6 by Jani:
- Also create unordered_wq for mock device

Link: https://patchwork.freedesktop.org/series/114608/
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Jani Nikula <jani.nikula@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/c816ebe17ef08d363981942a096a586a7658a65e.1686231190.git.jani.nikula@intel.com
25 files changed:
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display_driver.c
drivers/gpu/drm/i915/display/intel_dmc.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/display/intel_drrs.c
drivers/gpu/drm/i915/display/intel_fbc.c
drivers/gpu/drm/i915/display/intel_fbdev.c
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/i915/display/intel_hotplug.c
drivers/gpu/drm/i915/display/intel_opregion.c
drivers/gpu/drm/i915/display/intel_pps.c
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
drivers/gpu/drm/i915/gt/intel_gt_irq.c
drivers/gpu/drm/i915/gt/intel_gt_requests.c
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/intel_rps.c
drivers/gpu/drm/i915/gt/selftest_engine_cs.c
drivers/gpu/drm/i915/i915_driver.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/intel_wakeref.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c

index 5c7fdc8..d853360 100644 (file)
@@ -7180,11 +7180,12 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
                break;
        case FENCE_FREE:
                {
+                       struct drm_i915_private *i915 = to_i915(state->base.dev);
                        struct intel_atomic_helper *helper =
-                               &to_i915(state->base.dev)->display.atomic_helper;
+                               &i915->display.atomic_helper;
 
                        if (llist_add(&state->freed, &helper->free_list))
-                               schedule_work(&helper->free_work);
+                               queue_work(i915->unordered_wq, &helper->free_work);
                        break;
                }
        }
index dc8de86..b909814 100644 (file)
@@ -442,7 +442,7 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
        intel_unregister_dsm_handler();
 
        /* flush any delayed tasks or pending work */
-       flush_scheduled_work();
+       flush_workqueue(i915->unordered_wq);
 
        intel_hdcp_component_fini(i915);
 
index 8a88de6..5f479f3 100644 (file)
@@ -1057,7 +1057,7 @@ void intel_dmc_init(struct drm_i915_private *i915)
        i915->display.dmc.dmc = dmc;
 
        drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path);
-       schedule_work(&dmc->work);
+       queue_work(i915->unordered_wq, &dmc->work);
 
        return;
 
index f4192fd..09dc6c8 100644 (file)
@@ -5251,7 +5251,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector)
        spin_lock_irq(&i915->irq_lock);
        i915->display.hotplug.event_bits |= BIT(encoder->hpd_pin);
        spin_unlock_irq(&i915->irq_lock);
-       queue_delayed_work(system_wq, &i915->display.hotplug.hotplug_work, 0);
+       queue_delayed_work(i915->unordered_wq, &i915->display.hotplug.hotplug_work, 0);
 }
 
 static const struct drm_connector_funcs intel_dp_connector_funcs = {
index 176b610..a263773 100644 (file)
@@ -1064,6 +1064,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
                                                     const struct intel_crtc_state *crtc_state)
 {
        struct intel_connector *intel_connector = intel_dp->attached_connector;
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
        if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) {
                lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n");
@@ -1081,7 +1082,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
        }
 
        /* Schedule a Hotplug Uevent to userspace to start modeset */
-       schedule_work(&intel_connector->modeset_retry_work);
+       queue_work(i915->unordered_wq, &intel_connector->modeset_retry_work);
 }
 
 /* Perform the link training on all LTTPRs and the DPRX on a link. */
index 760e63c..0d35b6b 100644 (file)
@@ -111,7 +111,9 @@ static void intel_drrs_set_state(struct intel_crtc *crtc,
 
 static void intel_drrs_schedule_work(struct intel_crtc *crtc)
 {
-       mod_delayed_work(system_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
+       struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+       mod_delayed_work(i915->unordered_wq, &crtc->drrs.work, msecs_to_jiffies(1000));
 }
 
 static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state)
index 29aa029..7f8b2d7 100644 (file)
@@ -1600,7 +1600,7 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc)
        if (READ_ONCE(fbc->underrun_detected))
                return;
 
-       schedule_work(&fbc->underrun_work);
+       queue_work(fbc->i915->unordered_wq, &fbc->underrun_work);
 }
 
 /**
index 2c3f7be..4d6209c 100644 (file)
@@ -694,7 +694,8 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
                        /* Don't block our own workqueue as this can
                         * be run in parallel with other i915.ko tasks.
                         */
-                       schedule_work(&dev_priv->display.fbdev.suspend_work);
+                       queue_work(dev_priv->unordered_wq,
+                                  &dev_priv->display.fbdev.suspend_work);
                        return;
                }
        }
index 17542c2..5ed4501 100644 (file)
@@ -983,6 +983,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
        struct drm_device *dev = connector->base.dev;
        struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct intel_hdcp *hdcp = &connector->hdcp;
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
 
        drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
 
@@ -1001,7 +1002,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector,
        hdcp->value = value;
        if (update_property) {
                drm_connector_get(&connector->base);
-               schedule_work(&hdcp->prop_work);
+               queue_work(i915->unordered_wq, &hdcp->prop_work);
        }
 }
 
@@ -2090,16 +2091,17 @@ static void intel_hdcp_check_work(struct work_struct *work)
                                               struct intel_hdcp,
                                               check_work);
        struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
 
        if (drm_connector_is_unregistered(&connector->base))
                return;
 
        if (!intel_hdcp2_check_link(connector))
-               schedule_delayed_work(&hdcp->check_work,
-                                     DRM_HDCP2_CHECK_PERIOD_MS);
+               queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+                                  DRM_HDCP2_CHECK_PERIOD_MS);
        else if (!intel_hdcp_check_link(connector))
-               schedule_delayed_work(&hdcp->check_work,
-                                     DRM_HDCP_CHECK_PERIOD_MS);
+               queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+                                  DRM_HDCP_CHECK_PERIOD_MS);
 }
 
 static int i915_hdcp_component_bind(struct device *i915_kdev,
@@ -2398,7 +2400,8 @@ int intel_hdcp_enable(struct intel_atomic_state *state,
        }
 
        if (!ret) {
-               schedule_delayed_work(&hdcp->check_work, check_link_interval);
+               queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
+                                  check_link_interval);
                intel_hdcp_update_value(connector,
                                        DRM_MODE_CONTENT_PROTECTION_ENABLED,
                                        true);
@@ -2447,6 +2450,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
                                to_intel_connector(conn_state->connector);
        struct intel_hdcp *hdcp = &connector->hdcp;
        bool content_protection_type_changed, desired_and_not_enabled = false;
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
 
        if (!connector->hdcp.shim)
                return;
@@ -2473,7 +2477,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
                mutex_lock(&hdcp->mutex);
                hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
                drm_connector_get(&connector->base);
-               schedule_work(&hdcp->prop_work);
+               queue_work(i915->unordered_wq, &hdcp->prop_work);
                mutex_unlock(&hdcp->mutex);
        }
 
@@ -2490,7 +2494,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
                 */
                if (!desired_and_not_enabled && !content_protection_type_changed) {
                        drm_connector_get(&connector->base);
-                       schedule_work(&hdcp->prop_work);
+                       queue_work(i915->unordered_wq, &hdcp->prop_work);
                }
        }
 
@@ -2602,6 +2606,7 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
 {
        struct intel_hdcp *hdcp = &connector->hdcp;
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
 
        if (!hdcp->shim)
                return;
@@ -2609,5 +2614,5 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
        atomic_inc(&connector->hdcp.cp_irq_count);
        wake_up_all(&connector->hdcp.cp_irq_queue);
 
-       schedule_delayed_work(&hdcp->check_work, 0);
+       queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
 }
index 23a5e1a..1160fa2 100644 (file)
@@ -212,7 +212,8 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
        /* Enable polling and queue hotplug re-enabling. */
        if (hpd_disabled) {
                drm_kms_helper_poll_enable(&dev_priv->drm);
-               mod_delayed_work(system_wq, &dev_priv->display.hotplug.reenable_work,
+               mod_delayed_work(dev_priv->unordered_wq,
+                                &dev_priv->display.hotplug.reenable_work,
                                 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
        }
 }
@@ -339,7 +340,8 @@ static void i915_digport_work_func(struct work_struct *work)
                spin_lock_irq(&dev_priv->irq_lock);
                dev_priv->display.hotplug.event_bits |= old_bits;
                spin_unlock_irq(&dev_priv->irq_lock);
-               queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0);
+               queue_delayed_work(dev_priv->unordered_wq,
+                                  &dev_priv->display.hotplug.hotplug_work, 0);
        }
 }
 
@@ -446,7 +448,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
                dev_priv->display.hotplug.retry_bits |= retry;
                spin_unlock_irq(&dev_priv->irq_lock);
 
-               mod_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work,
+               mod_delayed_work(dev_priv->unordered_wq,
+                                &dev_priv->display.hotplug.hotplug_work,
                                 msecs_to_jiffies(HPD_RETRY_DELAY));
        }
 }
@@ -577,7 +580,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
        if (queue_dig)
                queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work);
        if (queue_hp)
-               queue_delayed_work(system_wq, &dev_priv->display.hotplug.hotplug_work, 0);
+               queue_delayed_work(dev_priv->unordered_wq,
+                                  &dev_priv->display.hotplug.hotplug_work, 0);
 }
 
 /**
@@ -687,7 +691,8 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
         * As well, there's no issue if we race here since we always reschedule
         * this worker anyway
         */
-       schedule_work(&dev_priv->display.hotplug.poll_init_work);
+       queue_work(dev_priv->unordered_wq,
+                  &dev_priv->display.hotplug.poll_init_work);
 }
 
 /**
@@ -715,7 +720,8 @@ void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
                return;
 
        WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false);
-       schedule_work(&dev_priv->display.hotplug.poll_init_work);
+       queue_work(dev_priv->unordered_wq,
+                  &dev_priv->display.hotplug.poll_init_work);
 }
 
 void intel_hpd_init_early(struct drm_i915_private *i915)
index b7973a0..84078fb 100644 (file)
@@ -635,7 +635,8 @@ static void asle_work(struct work_struct *work)
 void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
 {
        if (dev_priv->display.opregion.asle)
-               schedule_work(&dev_priv->display.opregion.asle_work);
+               queue_work(dev_priv->unordered_wq,
+                          &dev_priv->display.opregion.asle_work);
 }
 
 #define ACPI_EV_DISPLAY_SWITCH (1<<0)
index 5e7ba59..73f0f17 100644 (file)
@@ -867,6 +867,7 @@ static void edp_panel_vdd_work(struct work_struct *__work)
 
 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        unsigned long delay;
 
        /*
@@ -882,7 +883,8 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
         * operations.
         */
        delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5);
-       schedule_delayed_work(&intel_dp->pps.panel_vdd_work, delay);
+       queue_delayed_work(i915->unordered_wq,
+                          &intel_dp->pps.panel_vdd_work, delay);
 }
 
 /*
index ea0389c..d58ed9b 100644 (file)
@@ -341,7 +341,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
                 */
                intel_de_rmw(dev_priv, imr_reg, 0, psr_irq_psr_error_bit_get(intel_dp));
 
-               schedule_work(&intel_dp->psr.work);
+               queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
        }
 }
 
@@ -2440,6 +2440,8 @@ static void
 tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
                       enum fb_op_origin origin)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
        if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
            !intel_dp->psr.active)
                return;
@@ -2453,7 +2455,7 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
                return;
 
        tgl_psr2_enable_dc3co(intel_dp);
-       mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
+       mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
                         intel_dp->psr.dc3co_exit_delay);
 }
 
@@ -2493,7 +2495,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
                psr_force_hw_tracking_exit(intel_dp);
 
                if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
-                       schedule_work(&intel_dp->psr.work);
+                       queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
        }
 }
 
index 7503264..2ebd937 100644 (file)
@@ -2327,6 +2327,7 @@ static u32 active_ccid(struct intel_engine_cs *engine)
 
 static void execlists_capture(struct intel_engine_cs *engine)
 {
+       struct drm_i915_private *i915 = engine->i915;
        struct execlists_capture *cap;
 
        if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
@@ -2375,7 +2376,7 @@ static void execlists_capture(struct intel_engine_cs *engine)
                goto err_rq;
 
        INIT_WORK(&cap->work, execlists_capture_work);
-       schedule_work(&cap->work);
+       queue_work(i915->unordered_wq, &cap->work);
        return;
 
 err_rq:
@@ -3680,7 +3681,7 @@ static void virtual_context_destroy(struct kref *kref)
         * lock, we can delegate the free of the engine to an RCU worker.
         */
        INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
-       queue_rcu_work(system_wq, &ve->rcu);
+       queue_rcu_work(ve->context.engine->i915->unordered_wq, &ve->rcu);
 }
 
 static void virtual_engine_initial_hint(struct virtual_engine *ve)
index cadfd85..86b5a9b 100644 (file)
@@ -88,10 +88,11 @@ static void pool_free_work(struct work_struct *wrk)
 {
        struct intel_gt_buffer_pool *pool =
                container_of(wrk, typeof(*pool), work.work);
+       struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
 
        if (pool_free_older_than(pool, HZ))
-               schedule_delayed_work(&pool->work,
-                                     round_jiffies_up_relative(HZ));
+               queue_delayed_work(gt->i915->unordered_wq, &pool->work,
+                                  round_jiffies_up_relative(HZ));
 }
 
 static void pool_retire(struct i915_active *ref)
@@ -99,6 +100,7 @@ static void pool_retire(struct i915_active *ref)
        struct intel_gt_buffer_pool_node *node =
                container_of(ref, typeof(*node), active);
        struct intel_gt_buffer_pool *pool = node->pool;
+       struct intel_gt *gt = container_of(pool, struct intel_gt, buffer_pool);
        struct list_head *list = bucket_for_size(pool, node->obj->base.size);
        unsigned long flags;
 
@@ -116,8 +118,8 @@ static void pool_retire(struct i915_active *ref)
        WRITE_ONCE(node->age, jiffies ?: 1); /* 0 reserved for active nodes */
        spin_unlock_irqrestore(&pool->lock, flags);
 
-       schedule_delayed_work(&pool->work,
-                             round_jiffies_up_relative(HZ));
+       queue_delayed_work(gt->i915->unordered_wq, &pool->work,
+                          round_jiffies_up_relative(HZ));
 }
 
 void intel_gt_buffer_pool_mark_used(struct intel_gt_buffer_pool_node *node)
index 8f888d3..62fd00c 100644 (file)
@@ -376,7 +376,7 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
        if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
                gt->i915->l3_parity.which_slice |= 1 << 0;
 
-       schedule_work(&gt->i915->l3_parity.error_work);
+       queue_work(gt->i915->unordered_wq, &gt->i915->l3_parity.error_work);
 }
 
 void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
index 1dfd016..d1a382d 100644 (file)
@@ -116,7 +116,7 @@ void intel_engine_add_retire(struct intel_engine_cs *engine,
        GEM_BUG_ON(intel_engine_is_virtual(engine));
 
        if (add_retire(engine, tl))
-               schedule_work(&engine->retire_work);
+               queue_work(engine->i915->unordered_wq, &engine->retire_work);
 }
 
 void intel_engine_init_retire(struct intel_engine_cs *engine)
@@ -207,8 +207,8 @@ static void retire_work_handler(struct work_struct *work)
        struct intel_gt *gt =
                container_of(work, typeof(*gt), requests.retire_work.work);
 
-       schedule_delayed_work(&gt->requests.retire_work,
-                             round_jiffies_up_relative(HZ));
+       queue_delayed_work(gt->i915->unordered_wq, &gt->requests.retire_work,
+                          round_jiffies_up_relative(HZ));
        intel_gt_retire_requests(gt);
 }
 
@@ -224,8 +224,8 @@ void intel_gt_park_requests(struct intel_gt *gt)
 
 void intel_gt_unpark_requests(struct intel_gt *gt)
 {
-       schedule_delayed_work(&gt->requests.retire_work,
-                             round_jiffies_up_relative(HZ));
+       queue_delayed_work(gt->i915->unordered_wq, &gt->requests.retire_work,
+                          round_jiffies_up_relative(HZ));
 }
 
 void intel_gt_fini_requests(struct intel_gt *gt)
index 195ff72..e2152f7 100644 (file)
@@ -1625,7 +1625,7 @@ void __intel_init_wedge(struct intel_wedge_me *w,
        w->name = name;
 
        INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
-       schedule_delayed_work(&w->work, timeout);
+       queue_delayed_work(gt->i915->unordered_wq, &w->work, timeout);
 }
 
 void __intel_fini_wedge(struct intel_wedge_me *w)
index e68a992..e92e626 100644 (file)
@@ -73,13 +73,14 @@ static void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val)
 static void rps_timer(struct timer_list *t)
 {
        struct intel_rps *rps = from_timer(rps, t, timer);
+       struct intel_gt *gt = rps_to_gt(rps);
        struct intel_engine_cs *engine;
        ktime_t dt, last, timestamp;
        enum intel_engine_id id;
        s64 max_busy[3] = {};
 
        timestamp = 0;
-       for_each_engine(engine, rps_to_gt(rps), id) {
+       for_each_engine(engine, gt, id) {
                s64 busy;
                int i;
 
@@ -123,7 +124,7 @@ static void rps_timer(struct timer_list *t)
 
                        busy += div_u64(max_busy[i], 1 << i);
                }
-               GT_TRACE(rps_to_gt(rps),
+               GT_TRACE(gt,
                         "busy:%lld [%d%%], max:[%lld, %lld, %lld], interval:%d\n",
                         busy, (int)div64_u64(100 * busy, dt),
                         max_busy[0], max_busy[1], max_busy[2],
@@ -133,12 +134,12 @@ static void rps_timer(struct timer_list *t)
                    rps->cur_freq < rps->max_freq_softlimit) {
                        rps->pm_iir |= GEN6_PM_RP_UP_THRESHOLD;
                        rps->pm_interval = 1;
-                       schedule_work(&rps->work);
+                       queue_work(gt->i915->unordered_wq, &rps->work);
                } else if (100 * busy < rps->power.down_threshold * dt &&
                           rps->cur_freq > rps->min_freq_softlimit) {
                        rps->pm_iir |= GEN6_PM_RP_DOWN_THRESHOLD;
                        rps->pm_interval = 1;
-                       schedule_work(&rps->work);
+                       queue_work(gt->i915->unordered_wq, &rps->work);
                } else {
                        rps->last_adj = 0;
                }
@@ -973,7 +974,7 @@ static int rps_set_boost_freq(struct intel_rps *rps, u32 val)
        }
        mutex_unlock(&rps->lock);
        if (boost)
-               schedule_work(&rps->work);
+               queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work);
 
        return 0;
 }
@@ -1025,7 +1026,8 @@ void intel_rps_boost(struct i915_request *rq)
                        if (!atomic_fetch_inc(&slpc->num_waiters)) {
                                GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n",
                                         rq->fence.context, rq->fence.seqno);
-                               schedule_work(&slpc->boost_work);
+                               queue_work(rps_to_gt(rps)->i915->unordered_wq,
+                                          &slpc->boost_work);
                        }
 
                        return;
@@ -1041,7 +1043,7 @@ void intel_rps_boost(struct i915_request *rq)
                         rq->fence.context, rq->fence.seqno);
 
                if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
-                       schedule_work(&rps->work);
+                       queue_work(rps_to_gt(rps)->i915->unordered_wq, &rps->work);
 
                WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */
        }
@@ -1900,7 +1902,7 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
        gen6_gt_pm_mask_irq(gt, events);
 
        rps->pm_iir |= events;
-       schedule_work(&rps->work);
+       queue_work(gt->i915->unordered_wq, &rps->work);
 }
 
 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
@@ -1917,7 +1919,7 @@ void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
                gen6_gt_pm_mask_irq(gt, events);
                rps->pm_iir |= events;
 
-               schedule_work(&rps->work);
+               queue_work(gt->i915->unordered_wq, &rps->work);
                spin_unlock(gt->irq_lock);
        }
 
index 542ce6d..78cdfc6 100644 (file)
@@ -27,7 +27,7 @@ static void perf_begin(struct intel_gt *gt)
 
        /* Boost gpufreq to max [waitboost] and keep it fixed */
        atomic_inc(&gt->rps.num_waiters);
-       schedule_work(&gt->rps.work);
+       queue_work(gt->i915->unordered_wq, &gt->rps.work);
        flush_work(&gt->rps.work);
 }
 
index 4483be1..401f59c 100644 (file)
@@ -132,8 +132,20 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
        if (dev_priv->display.hotplug.dp_wq == NULL)
                goto out_free_wq;
 
+       /*
+        * The unordered i915 workqueue should be used for all work
+        * scheduling that do not require running in order, which used
+        * to be scheduled on the system_wq before moving to a driver
+        * instance due deprecation of flush_scheduled_work().
+        */
+       dev_priv->unordered_wq = alloc_workqueue("i915-unordered", 0, 0);
+       if (dev_priv->unordered_wq == NULL)
+               goto out_free_dp_wq;
+
        return 0;
 
+out_free_dp_wq:
+       destroy_workqueue(dev_priv->display.hotplug.dp_wq);
 out_free_wq:
        destroy_workqueue(dev_priv->wq);
 out_err:
@@ -144,6 +156,7 @@ out_err:
 
 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
 {
+       destroy_workqueue(dev_priv->unordered_wq);
        destroy_workqueue(dev_priv->display.hotplug.dp_wq);
        destroy_workqueue(dev_priv->wq);
 }
index f1205ed..f125f04 100644 (file)
@@ -260,6 +260,16 @@ struct drm_i915_private {
         */
        struct workqueue_struct *wq;
 
+       /**
+        * unordered_wq - internal workqueue for unordered work
+        *
+        * This workqueue should be used for all unordered work
+        * scheduling within i915, which used to be scheduled on the
+        * system_wq before moving to a driver instance due
+        * deprecation of flush_scheduled_work().
+        */
+       struct workqueue_struct *unordered_wq;
+
        /* pm private clock gating functions */
        const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
 
index 630a732..894068b 100644 (file)
@@ -290,7 +290,7 @@ static enum hrtimer_restart __rq_watchdog_expired(struct hrtimer *hrtimer)
 
        if (!i915_request_completed(rq)) {
                if (llist_add(&rq->watchdog.link, &gt->watchdog.list))
-                       schedule_work(&gt->watchdog.work);
+                       queue_work(gt->i915->unordered_wq, &gt->watchdog.work);
        } else {
                i915_request_put(rq);
        }
index 40aafe6..718f2f1 100644 (file)
@@ -75,7 +75,7 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
 
        /* Assume we are not in process context and so cannot sleep. */
        if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
-               mod_delayed_work(system_wq, &wf->work,
+               mod_delayed_work(wf->i915->unordered_wq, &wf->work,
                                 FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
                return;
        }
index 0eda8b4..09d4bbc 100644 (file)
@@ -69,6 +69,7 @@ static void mock_device_release(struct drm_device *dev)
        i915_gem_drain_workqueue(i915);
 
        mock_fini_ggtt(to_gt(i915)->ggtt);
+       destroy_workqueue(i915->unordered_wq);
        destroy_workqueue(i915->wq);
 
        intel_region_ttm_device_fini(i915);
@@ -208,6 +209,10 @@ struct drm_i915_private *mock_gem_device(void)
        if (!i915->wq)
                goto err_drv;
 
+       i915->unordered_wq = alloc_workqueue("mock-unordered", 0, 0);
+       if (!i915->unordered_wq)
+               goto err_wq;
+
        mock_init_contexts(i915);
 
        /* allocate the ggtt */
@@ -239,6 +244,8 @@ struct drm_i915_private *mock_gem_device(void)
 err_context:
        intel_gt_driver_remove(to_gt(i915));
 err_unlock:
+       destroy_workqueue(i915->unordered_wq);
+err_wq:
        destroy_workqueue(i915->wq);
 err_drv:
        intel_region_ttm_device_fini(i915);