The supported features should be retrieved just after EnableAllDpmFeatures message
complete. And the check(whether some dpm feature is supported) is only needed when we
decide to enable or disable it.
Signed-off-by: Evan Quan <evan.quan@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
static int smu_smc_hw_setup(struct smu_context *smu)
{
+ struct smu_feature *feature = &smu->smu_feature;
struct amdgpu_device *adev = smu->adev;
uint32_t pcie_gen = 0, pcie_width = 0;
+ uint64_t features_supported;
int ret = 0;
if (adev->in_suspend && smu_is_dpm_running(smu)) {
return ret;
}
+ ret = smu_feature_get_enabled_mask(smu, &features_supported);
+ if (ret) {
+ dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
+ return ret;
+ }
+ bitmap_copy(feature->supported,
+ (unsigned long *)&features_supported,
+ feature->feature_num);
+
if (!smu_is_dpm_running(smu))
dev_info(adev->dev, "dpm has been disabled\n");
int ret = 0;
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
smu->display_config->num_display,
NULL);
min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
if (!ret) {
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcef_clock_in_sr/100,
int ret = 0;
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
#if 0
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
smu->display_config->num_display,
min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
if (!ret) {
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcef_clock_in_sr/100,
int ret = 0;
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFX_GPO_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFX_GPO_BIT)) {
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (ret)
return ret;
return ret;
bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
if (en) {
ret = smu_cmn_get_enabled_mask(smu, &feature_mask);
bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
}
return ret;
RLC_STATUS_OFF, NULL);
bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
if (!en)
return ret;
bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
return 0;
}
adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
/* allow message will be sent after enable message on Vangogh*/
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
if (ret) {
return ret;
bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
if (en) {
ret = smu_cmn_get_enabled_mask(smu, &feature_mask);
bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
}
return ret;
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
if (!en)
return ret;
bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
return 0;
}