Currently we call .hpd_irq_setup() directly just before display
resume, and follow it with another call via intel_hpd_init()
just afterwards. Assuming the hpd pins are marked as enabled
during the open-coded call these two things do exactly the
same thing (ie. enable HPD interrupts). Which even makes sense
since we definitely need working HPD interrupts for MST sideband
during the display resume.
So let's nuke the open-coded call and move the intel_hpd_init()
call earlier. However we need to leave the poll_init_work stuff
behind after the display resume as that will trigger display
detection while we're resuming. We don't want that trampling over
the display resume process. To make this a bit more symmetric
we turn this into a intel_hpd_poll_{enable,disable}() pair.
So we end up with the following transformation:
intel_hpd_poll_init() -> intel_hpd_poll_enable()
lone intel_hpd_init() -> intel_hpd_init()+intel_hpd_poll_disable()
.hpd_irq_setup()+resume+intel_hpd_init() -> intel_hpd_init()+resume+intel_hpd_poll_disable()
If we really would like to prevent all *long* HPD processing during
display resume we'd need some kind of software mechanism to simply
ignore all long HPDs. Currently we appear to have that just for
fbdev via ifbdev->hpd_suspended. Since we aren't exploding left and
right all the time I guess that's mostly sufficient.
For a bit of history on this, we first got a mechanism to block
hotplug processing during suspend in commit
15239099d7a7 ("drm/i915:
enable irqs earlier when resuming") on account of moving the irq enable
earlier. This then got removed in commit
50c3dc970a09 ("drm/fb-helper:
Fix hpd vs. initial config races") because the fdev initial config
got pushed to a later point. The second ad-hoc hpd_irq_setup() for
resume was added in commit
0e32b39ceed6 ("drm/i915: add DP 1.2 MST
support (v0.7)") to be able to do MST sideband during the resume.
And finally we got a partial resurrection of the hpd blocking
mechanism in commit
e8a8fedd57fd ("drm/i915: Block fbdev HPD
processing during suspend"), but this time it only prevent fbdev
from handling hpd while resuming.
v2: Leave the poll_init_work behind
v3: Remove the extra intel_hpd_poll_disable() from display reset (Lyude)
Add the missing intel_hpd_poll_disable() to display init (Imre)
Cc: Lyude Paul <lyude@redhat.com>
Cc: Imre Deak <imre.deak@intel.com>
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201013181137.30560-1-ville.syrjala@linux.intel.com
Reviewed-by: Imre Deak <imre.deak@intel.com>
Reviewed-by: Lyude Paul <lyude@redhat.com>
intel_pps_unlock_regs_wa(dev_priv);
intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
-
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ intel_hpd_init(dev_priv);
ret = __intel_display_resume(dev, state, ctx);
if (ret)
drm_err(&dev_priv->drm,
"Restoring old state failed with %i\n", ret);
- intel_hpd_init(dev_priv);
+ intel_hpd_poll_disable(dev_priv);
}
drm_atomic_state_put(state);
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(i915);
+ intel_hpd_poll_disable(i915);
intel_init_ipc(i915);
return;
intel_hpd_init(dev_priv);
+ intel_hpd_poll_disable(dev_priv);
/* Re-enable the ADPA, if we have one */
for_each_intel_encoder(&dev_priv->drm, encoder) {
/* Prevent us from re-enabling polling on accident in late suspend */
if (!dev_priv->drm.dev->power.is_suspended)
- intel_hpd_poll_init(dev_priv);
+ intel_hpd_poll_enable(dev_priv);
}
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
* This is a separate step from interrupt enabling to simplify the locking rules
* in the driver load and resume code.
*
- * Also see: intel_hpd_poll_init(), which enables connector polling
+ * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable().
*/
void intel_hpd_init(struct drm_i915_private *dev_priv)
{
dev_priv->hotplug.stats[i].state = HPD_ENABLED;
}
- WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
- schedule_work(&dev_priv->hotplug.poll_init_work);
-
/*
* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy.
}
/**
- * intel_hpd_poll_init - enables/disables polling for connectors with hpd
+ * intel_hpd_poll_enable - enable polling for connectors with hpd
* @dev_priv: i915 device instance
*
- * This function enables polling for all connectors, regardless of whether or
- * not they support hotplug detection. Under certain conditions HPD may not be
- * functional. On most Intel GPUs, this happens when we enter runtime suspend.
+ * This function enables polling for all connectors which support HPD.
+ * Under certain conditions HPD may not be functional. On most Intel GPUs,
+ * this happens when we enter runtime suspend.
* On Valleyview and Cherryview systems, this also happens when we shut off all
* of the powerwells.
*
* dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
* worker.
*
- * Also see: intel_hpd_init(), which restores hpd handling.
+ * Also see: intel_hpd_init() and intel_hpd_poll_disable().
*/
-void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
+void intel_hpd_poll_enable(struct drm_i915_private *dev_priv)
{
WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
schedule_work(&dev_priv->hotplug.poll_init_work);
}
+/**
+ * intel_hpd_poll_disable - disable polling for connectors with hpd
+ * @dev_priv: i915 device instance
+ *
+ * This function disables polling for all connectors which support HPD.
+ * Under certain conditions HPD may not be functional. On most Intel GPUs,
+ * this happens when we enter runtime suspend.
+ * On Valleyview and Cherryview systems, this also happens when we shut off all
+ * of the powerwells.
+ *
+ * Since this function can get called in contexts where we're already holding
+ * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
+ * worker.
+ *
+ * Also used during driver init to initialize connector->polled
+ * appropriately for all connectors.
+ *
+ * Also see: intel_hpd_init() and intel_hpd_poll_enable().
+ */
+void intel_hpd_poll_disable(struct drm_i915_private *dev_priv)
+{
+ WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
+ schedule_work(&dev_priv->hotplug.poll_init_work);
+}
+
void intel_hpd_init_work(struct drm_i915_private *dev_priv)
{
INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
struct intel_encoder;
enum port;
-void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
+void intel_hpd_poll_enable(struct drm_i915_private *dev_priv);
+void intel_hpd_poll_disable(struct drm_i915_private *dev_priv);
enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector);
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
+ intel_hpd_init(dev_priv);
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
-
+ /* MST sideband requires HPD interrupts enabled */
intel_dp_mst_resume(dev_priv);
-
intel_display_resume(dev);
+ intel_hpd_poll_disable(dev_priv);
drm_kms_helper_poll_enable(dev);
- /*
- * ... but also need to make sure that hotplug processing
- * doesn't cause havoc. Like in the driver load code we don't
- * bother with the tiny race here where we might lose hotplug
- * notifications.
- * */
- intel_hpd_init(dev_priv);
-
intel_opregion_resume(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
assert_forcewakes_inactive(&dev_priv->uncore);
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
- intel_hpd_poll_init(dev_priv);
+ intel_hpd_poll_enable(dev_priv);
drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
return 0;
* power well, so hpd is reinitialized from there. For
* everyone else do it here.
*/
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
intel_hpd_init(dev_priv);
+ intel_hpd_poll_disable(dev_priv);
+ }
intel_enable_ipc(dev_priv);