mutex_lock(&dev_priv->pc8.lock);
seq_printf(m, "Requirements met: %s\n",
yesno(dev_priv->pc8.requirements_met));
- seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
+ seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
seq_printf(m, "IRQs disabled: %s\n",
yesno(dev_priv->pc8.irqs_disabled));
* Ideally every piece of our code that needs PC8+ disabled would call
* hsw_disable_package_c8, which would increment disable_count and prevent the
* system from reaching PC8+. But we don't have a symmetric way to do this for
- * everything, so we have the requirements_met and gpu_idle variables. When we
- * switch requirements_met or gpu_idle to true we decrease disable_count, and
- * increase it in the opposite case. The requirements_met variable is true when
- * all the CRTCs, encoders and the power well are disabled. The gpu_idle
- * variable is true when the GPU is idle.
+ * everything, so we have the requirements_met variable. When we switch
+ * requirements_met to true we decrease disable_count, and increase it in the
+ * opposite case. The requirements_met variable is true when all the CRTCs,
+ * encoders and the power well are disabled.
*
* In addition to everything, we only actually enable PC8+ if disable_count
* stays at zero for at least some seconds. This is implemented with the
*/
struct i915_package_c8 {
bool requirements_met;
- bool gpu_idle;
bool irqs_disabled;
/* Only true after the delayed work task actually enables it. */
bool enabled;
mutex_unlock(&dev_priv->pc8.lock);
}
-static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
-{
- if (!HAS_PC8(dev_priv->dev))
- return;
-
- mutex_lock(&dev_priv->pc8.lock);
- if (!dev_priv->pc8.gpu_idle) {
- dev_priv->pc8.gpu_idle = true;
- __hsw_enable_package_c8(dev_priv);
- }
- mutex_unlock(&dev_priv->pc8.lock);
-}
-
-static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
-{
- if (!HAS_PC8(dev_priv->dev))
- return;
-
- mutex_lock(&dev_priv->pc8.lock);
- if (dev_priv->pc8.gpu_idle) {
- dev_priv->pc8.gpu_idle = false;
- __hsw_disable_package_c8(dev_priv);
- }
- mutex_unlock(&dev_priv->pc8.lock);
-}
-
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
if (dev_priv->mm.busy)
return;
- hsw_package_c8_gpu_busy(dev_priv);
+ hsw_disable_package_c8(dev_priv);
i915_update_gfx_val(dev_priv);
dev_priv->mm.busy = true;
}
gen6_rps_idle(dev->dev_private);
out:
- hsw_package_c8_gpu_idle(dev_priv);
+ hsw_enable_package_c8(dev_priv);
}
void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
mutex_init(&dev_priv->pc8.lock);
dev_priv->pc8.requirements_met = false;
- dev_priv->pc8.gpu_idle = false;
dev_priv->pc8.irqs_disabled = false;
dev_priv->pc8.enabled = false;
- dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
+ dev_priv->pc8.disable_count = 1; /* requirements_met */
INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);