return ret;
}
-static int smu_dpm_set_vcn_enable(struct smu_context *smu,
- bool enable)
+static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
+ bool enable)
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
if (!smu->ppt_funcs->dpm_set_vcn_enable)
return 0;
- mutex_lock(&power_gate->vcn_gate_lock);
-
if (atomic_read(&power_gate->vcn_gated) ^ enable)
- goto out;
+ return 0;
ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
if (!ret)
atomic_set(&power_gate->vcn_gated, !enable);
-out:
+ return ret;
+}
+
+static int smu_dpm_set_vcn_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ mutex_lock(&power_gate->vcn_gate_lock);
+
+ ret = smu_dpm_set_vcn_enable_locked(smu, enable);
+
mutex_unlock(&power_gate->vcn_gate_lock);
return ret;
}
-static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
- bool enable)
+static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
+ bool enable)
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
if (!smu->ppt_funcs->dpm_set_jpeg_enable)
return 0;
- mutex_lock(&power_gate->jpeg_gate_lock);
-
if (atomic_read(&power_gate->jpeg_gated) ^ enable)
- goto out;
+ return 0;
ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
if (!ret)
atomic_set(&power_gate->jpeg_gated, !enable);
-out:
+ return ret;
+}
+
+static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
+ bool enable)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int ret = 0;
+
+ mutex_lock(&power_gate->jpeg_gate_lock);
+
+ ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
+
mutex_unlock(&power_gate->jpeg_gate_lock);
return ret;
return smu_set_funcs(adev);
}
+static int smu_set_default_dpm_table(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+ struct smu_power_gate *power_gate = &smu_power->power_gate;
+ int vcn_gate, jpeg_gate;
+ int ret = 0;
+
+ if (!smu->ppt_funcs->set_default_dpm_table)
+ return 0;
+
+ mutex_lock(&power_gate->vcn_gate_lock);
+ mutex_lock(&power_gate->jpeg_gate_lock);
+
+ vcn_gate = atomic_read(&power_gate->vcn_gated);
+ jpeg_gate = atomic_read(&power_gate->jpeg_gated);
+
+ ret = smu_dpm_set_vcn_enable_locked(smu, true);
+ if (ret)
+ goto err0_out;
+
+ ret = smu_dpm_set_jpeg_enable_locked(smu, true);
+ if (ret)
+ goto err1_out;
+
+ ret = smu->ppt_funcs->set_default_dpm_table(smu);
+ if (ret)
+ dev_err(smu->adev->dev,
+ "Failed to setup default dpm clock tables!\n");
+
+ smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
+err1_out:
+ smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
+err0_out:
+ mutex_unlock(&power_gate->jpeg_gate_lock);
+ mutex_unlock(&power_gate->vcn_gate_lock);
+
+ return ret;
+}
+
static int smu_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
#define smu_disable_all_features_with_exception(smu, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask)
#define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0 , smu)
#define smu_notify_display_change(smu) smu_ppt_funcs(notify_display_change, 0, smu)
-#define smu_set_default_dpm_table(smu) smu_ppt_funcs(set_default_dpm_table, 0, smu)
#define smu_populate_umd_state_clk(smu) smu_ppt_funcs(populate_umd_state_clk, 0, smu)
#define smu_set_default_od8_settings(smu) smu_ppt_funcs(set_default_od8_settings, 0, smu)
#define smu_enable_thermal_alert(smu) smu_ppt_funcs(enable_thermal_alert, 0, smu)