ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true);
if (ret || val > max_freq || val < min_freq)
return -EINVAL;
- ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true);
+ ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val);
+ } else {
+ return 0;
}
pm_runtime_mark_last_busy(adev->ddev->dev);
return ret;
}
-int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max, bool lock_needed)
+int smu_set_soft_freq_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
{
int ret = 0;
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
- if (lock_needed)
- mutex_lock(&smu->mutex);
- ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
- if (lock_needed)
- mutex_unlock(&smu->mutex);
+ mutex_lock(&smu->mutex);
+
+ if (smu->ppt_funcs->set_soft_freq_limited_range)
+ ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
+ clk_type,
+ min,
+ max);
+
+ mutex_unlock(&smu->mutex);
return ret;
}
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max, bool lock_needed);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max, bool lock_needed);
+ uint32_t min, uint32_t max);
int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min_value, uint32_t *max_value);
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu);
if (ret)
return size;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
+ ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
if (ret)
return size;
break;
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
+ ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
if (ret)
return ret;
}
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
+ ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
if (ret)
return ret;
}
return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
}
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
+ ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
+ ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
if (ret)
return ret;
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
+ ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
+ ret = smu_v11_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
if (ret)
return ret;
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
if (ret)
return ret;
}
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
if (ret)
return ret;
}
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
if (ret)
return ret;
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
+ ret = smu_v12_0_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
if (ret)
return ret;
return size;
}
+int sienna_cichlid_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min, uint32_t max)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+
+ if (clk_type == SMU_GFXCLK)
+ amdgpu_gfx_off_ctrl(adev, false);
+ ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min, max);
+ if (clk_type == SMU_GFXCLK)
+ amdgpu_gfx_off_ctrl(adev, true);
+
+ return ret;
+}
+
static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, uint32_t mask)
{
if (ret)
goto forec_level_out;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
+ ret = sienna_cichlid_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
if (ret)
goto forec_level_out;
break;
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
+ ret = sienna_cichlid_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
if (ret)
return ret;
}
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
+ ret = sienna_cichlid_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
if (ret)
return ret;
}
return sienna_cichlid_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
}
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
+ ret = sienna_cichlid_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
+ ret = sienna_cichlid_set_soft_freq_limited_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
if (ret)
return ret;
return ret;
}
-static int sienna_cichlid_set_soft_freq_limited_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t min, uint32_t max)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret;
-
- if (clk_type == SMU_GFXCLK)
- amdgpu_gfx_off_ctrl(adev, false);
- ret = smu_v11_0_set_soft_freq_limited_range(smu, clk_type, min, max);
- if (clk_type == SMU_GFXCLK)
- amdgpu_gfx_off_ctrl(adev, true);
-
- return ret;
-}
-
static bool sienna_cichlid_is_baco_supported(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;