drm/amd/powerplay: add the hw manager for vega12 (v4)
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / amd / powerplay / hwmgr / vega12_hwmgr.c
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28
29 #include "hwmgr.h"
30 #include "amd_powerplay.h"
31 #include "vega12_smumgr.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega12_powertune.h"
37 #include "vega12_inc.h"
38 #include "pp_soc15.h"
39 #include "pppcielanes.h"
40 #include "vega12_hwmgr.h"
41 #include "vega12_processpptables.h"
42 #include "vega12_pptable.h"
43 #include "vega12_thermal.h"
44 #include "vega12_ppsmc.h"
45 #include "pp_debug.h"
46 #include "amd_pcie_helpers.h"
47 #include "cgs_linux.h"
48 #include "ppinterrupt.h"
49 #include "pp_overdriver.h"
50 #include "pp_thermal.h"
51
52 static const ULONG PhwVega12_Magic = (ULONG)(PHM_VIslands_Magic);
53
54 static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
55                 enum pp_clock_type type, uint32_t mask);
56 static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
57                 uint32_t *clock,
58                 PPCLK_e clock_select,
59                 bool max);
60
61 struct vega12_power_state *cast_phw_vega12_power_state(
62                                   struct pp_hw_power_state *hw_ps)
63 {
64         PP_ASSERT_WITH_CODE((PhwVega12_Magic == hw_ps->magic),
65                                 "Invalid Powerstate Type!",
66                                  return NULL;);
67
68         return (struct vega12_power_state *)hw_ps;
69 }
70
71 const struct vega12_power_state *cast_const_phw_vega12_power_state(
72                                  const struct pp_hw_power_state *hw_ps)
73 {
74         PP_ASSERT_WITH_CODE((PhwVega12_Magic == hw_ps->magic),
75                                 "Invalid Powerstate Type!",
76                                  return NULL;);
77
78         return (const struct vega12_power_state *)hw_ps;
79 }
80
81 static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
82 {
83         struct vega12_hwmgr *data =
84                         (struct vega12_hwmgr *)(hwmgr->backend);
85
86         data->gfxclk_average_alpha = PPVEGA12_VEGA12GFXCLKAVERAGEALPHA_DFLT;
87         data->socclk_average_alpha = PPVEGA12_VEGA12SOCCLKAVERAGEALPHA_DFLT;
88         data->uclk_average_alpha = PPVEGA12_VEGA12UCLKCLKAVERAGEALPHA_DFLT;
89         data->gfx_activity_average_alpha = PPVEGA12_VEGA12GFXACTIVITYAVERAGEALPHA_DFLT;
90         data->lowest_uclk_reserved_for_ulv = PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT;
91
92         data->display_voltage_mode = PPVEGA12_VEGA12DISPLAYVOLTAGEMODE_DFLT;
93         data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
94         data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
95         data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
96         data->disp_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
97         data->disp_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
98         data->disp_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
99         data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
100         data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
101         data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
102         data->phy_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
103         data->phy_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
104         data->phy_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
105
106         data->registry_data.disallowed_features = 0x0;
107         data->registry_data.od_state_in_dc_support = 0;
108         data->registry_data.skip_baco_hardware = 0;
109
110         data->registry_data.log_avfs_param = 0;
111         data->registry_data.sclk_throttle_low_notification = 1;
112         data->registry_data.force_dpm_high = 0;
113         data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
114
115         data->registry_data.didt_support = 0;
116         if (data->registry_data.didt_support) {
117                 data->registry_data.didt_mode = 6;
118                 data->registry_data.sq_ramping_support = 1;
119                 data->registry_data.db_ramping_support = 0;
120                 data->registry_data.td_ramping_support = 0;
121                 data->registry_data.tcp_ramping_support = 0;
122                 data->registry_data.dbr_ramping_support = 0;
123                 data->registry_data.edc_didt_support = 1;
124                 data->registry_data.gc_didt_support = 0;
125                 data->registry_data.psm_didt_support = 0;
126         }
127
128         data->registry_data.pcie_lane_override = 0xff;
129         data->registry_data.pcie_speed_override = 0xff;
130         data->registry_data.pcie_clock_override = 0xffffffff;
131         data->registry_data.regulator_hot_gpio_support = 1;
132         data->registry_data.ac_dc_switch_gpio_support = 0;
133         data->registry_data.quick_transition_support = 0;
134         data->registry_data.zrpm_start_temp = 0xffff;
135         data->registry_data.zrpm_stop_temp = 0xffff;
136         data->registry_data.odn_feature_enable = 1;
137         data->registry_data.disable_water_mark = 0;
138         data->registry_data.disable_pp_tuning = 0;
139         data->registry_data.disable_xlpp_tuning = 0;
140         data->registry_data.disable_workload_policy = 0;
141         data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
142         data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
143         data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
144         data->registry_data.force_workload_policy_mask = 0;
145         data->registry_data.disable_3d_fs_detection = 0;
146         data->registry_data.fps_support = 1;
147         data->registry_data.disable_auto_wattman = 1;
148         data->registry_data.auto_wattman_debug = 0;
149         data->registry_data.auto_wattman_sample_period = 100;
150         data->registry_data.auto_wattman_threshold = 50;
151 }
152
153 static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
154 {
155         struct vega12_hwmgr *data =
156                         (struct vega12_hwmgr *)(hwmgr->backend);
157         struct amdgpu_device *adev = hwmgr->adev;
158
159         if (data->vddci_control == VEGA12_VOLTAGE_CONTROL_NONE)
160                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
161                                 PHM_PlatformCaps_ControlVDDCI);
162
163         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
164                         PHM_PlatformCaps_TablelessHardwareInterface);
165
166         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
167                         PHM_PlatformCaps_EnableSMU7ThermalManagement);
168
169         if (adev->pg_flags & AMD_PG_SUPPORT_UVD) {
170                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
171                                 PHM_PlatformCaps_UVDPowerGating);
172                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
173                                 PHM_PlatformCaps_UVDDynamicPowerGating);
174         }
175
176         if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
177                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
178                                 PHM_PlatformCaps_VCEPowerGating);
179
180         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
181                         PHM_PlatformCaps_UnTabledHardwareInterface);
182
183         if (data->registry_data.odn_feature_enable)
184                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
185                                 PHM_PlatformCaps_ODNinACSupport);
186         else {
187                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
188                                 PHM_PlatformCaps_OD6inACSupport);
189                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
190                                 PHM_PlatformCaps_OD6PlusinACSupport);
191         }
192
193         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
194                         PHM_PlatformCaps_ActivityReporting);
195         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
196                         PHM_PlatformCaps_FanSpeedInTableIsRPM);
197
198         if (data->registry_data.od_state_in_dc_support) {
199                 if (data->registry_data.odn_feature_enable)
200                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
201                                         PHM_PlatformCaps_ODNinDCSupport);
202                 else {
203                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
204                                         PHM_PlatformCaps_OD6inDCSupport);
205                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
206                                         PHM_PlatformCaps_OD6PlusinDCSupport);
207                 }
208         }
209
210         if (data->registry_data.thermal_support
211                         && data->registry_data.fuzzy_fan_control_support
212                         && hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
213                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
214                                 PHM_PlatformCaps_ODFuzzyFanControlSupport);
215
216         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
217                                 PHM_PlatformCaps_DynamicPowerManagement);
218         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
219                         PHM_PlatformCaps_SMC);
220         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
221                         PHM_PlatformCaps_ThermalPolicyDelay);
222
223         if (data->registry_data.force_dpm_high)
224                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
225                                 PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
226
227         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
228                         PHM_PlatformCaps_DynamicUVDState);
229
230         if (data->registry_data.sclk_throttle_low_notification)
231                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
232                                 PHM_PlatformCaps_SclkThrottleLowNotification);
233
234         /* power tune caps */
235         /* assume disabled */
236         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
237                         PHM_PlatformCaps_PowerContainment);
238         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
239                         PHM_PlatformCaps_DiDtSupport);
240         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
241                         PHM_PlatformCaps_SQRamping);
242         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
243                         PHM_PlatformCaps_DBRamping);
244         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
245                         PHM_PlatformCaps_TDRamping);
246         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
247                         PHM_PlatformCaps_TCPRamping);
248         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
249                         PHM_PlatformCaps_DBRRamping);
250         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
251                         PHM_PlatformCaps_DiDtEDCEnable);
252         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
253                         PHM_PlatformCaps_GCEDC);
254         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
255                         PHM_PlatformCaps_PSM);
256
257         if (data->registry_data.didt_support) {
258                 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
259                 if (data->registry_data.sq_ramping_support)
260                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
261                 if (data->registry_data.db_ramping_support)
262                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
263                 if (data->registry_data.td_ramping_support)
264                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
265                 if (data->registry_data.tcp_ramping_support)
266                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
267                 if (data->registry_data.dbr_ramping_support)
268                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
269                 if (data->registry_data.edc_didt_support)
270                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
271                 if (data->registry_data.gc_didt_support)
272                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
273                 if (data->registry_data.psm_didt_support)
274                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
275         }
276
277         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
278                         PHM_PlatformCaps_RegulatorHot);
279
280         if (data->registry_data.ac_dc_switch_gpio_support) {
281                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
282                                 PHM_PlatformCaps_AutomaticDCTransition);
283                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
284                                 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
285         }
286
287         if (data->registry_data.quick_transition_support) {
288                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
289                                 PHM_PlatformCaps_AutomaticDCTransition);
290                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
291                                 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
292                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
293                                 PHM_PlatformCaps_Falcon_QuickTransition);
294         }
295
296         if (data->lowest_uclk_reserved_for_ulv != PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT) {
297                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
298                                 PHM_PlatformCaps_LowestUclkReservedForUlv);
299                 if (data->lowest_uclk_reserved_for_ulv == 1)
300                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
301                                         PHM_PlatformCaps_LowestUclkReservedForUlv);
302         }
303
304         if (data->registry_data.custom_fan_support)
305                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
306                                 PHM_PlatformCaps_CustomFanControlSupport);
307
308         return 0;
309 }
310
311 static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
312 {
313         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
314         int i;
315
316         data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
317                         FEATURE_DPM_PREFETCHER_BIT;
318         data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
319                         FEATURE_DPM_GFXCLK_BIT;
320         data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
321                         FEATURE_DPM_UCLK_BIT;
322         data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
323                         FEATURE_DPM_SOCCLK_BIT;
324         data->smu_features[GNLD_DPM_UVD].smu_feature_id =
325                         FEATURE_DPM_UVD_BIT;
326         data->smu_features[GNLD_DPM_VCE].smu_feature_id =
327                         FEATURE_DPM_VCE_BIT;
328         data->smu_features[GNLD_ULV].smu_feature_id =
329                         FEATURE_ULV_BIT;
330         data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
331                         FEATURE_DPM_MP0CLK_BIT;
332         data->smu_features[GNLD_DPM_LINK].smu_feature_id =
333                         FEATURE_DPM_LINK_BIT;
334         data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
335                         FEATURE_DPM_DCEFCLK_BIT;
336         data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
337                         FEATURE_DS_GFXCLK_BIT;
338         data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
339                         FEATURE_DS_SOCCLK_BIT;
340         data->smu_features[GNLD_DS_LCLK].smu_feature_id =
341                         FEATURE_DS_LCLK_BIT;
342         data->smu_features[GNLD_PPT].smu_feature_id =
343                         FEATURE_PPT_BIT;
344         data->smu_features[GNLD_TDC].smu_feature_id =
345                         FEATURE_TDC_BIT;
346         data->smu_features[GNLD_THERMAL].smu_feature_id =
347                         FEATURE_THERMAL_BIT;
348         data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
349                         FEATURE_GFX_PER_CU_CG_BIT;
350         data->smu_features[GNLD_RM].smu_feature_id =
351                         FEATURE_RM_BIT;
352         data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
353                         FEATURE_DS_DCEFCLK_BIT;
354         data->smu_features[GNLD_ACDC].smu_feature_id =
355                         FEATURE_ACDC_BIT;
356         data->smu_features[GNLD_VR0HOT].smu_feature_id =
357                         FEATURE_VR0HOT_BIT;
358         data->smu_features[GNLD_VR1HOT].smu_feature_id =
359                         FEATURE_VR1HOT_BIT;
360         data->smu_features[GNLD_FW_CTF].smu_feature_id =
361                         FEATURE_FW_CTF_BIT;
362         data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
363                         FEATURE_LED_DISPLAY_BIT;
364         data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
365                         FEATURE_FAN_CONTROL_BIT;
366         data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
367         data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
368         data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
369         data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
370
371         for (i = 0; i < GNLD_FEATURES_MAX; i++) {
372                 data->smu_features[i].smu_feature_bitmap =
373                         (uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
374                 data->smu_features[i].allowed =
375                         ((data->registry_data.disallowed_features >> i) & 1) ?
376                         false : true;
377         }
378 }
379
380 static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
381 {
382         return 0;
383 }
384
385 static int vega12_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
386 {
387         kfree(hwmgr->backend);
388         hwmgr->backend = NULL;
389
390         return 0;
391 }
392
393 static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
394 {
395         int result = 0;
396         struct vega12_hwmgr *data;
397         struct amdgpu_device *adev = hwmgr->adev;
398
399         data = kzalloc(sizeof(struct vega12_hwmgr), GFP_KERNEL);
400         if (data == NULL)
401                 return -ENOMEM;
402
403         hwmgr->backend = data;
404
405         vega12_set_default_registry_data(hwmgr);
406
407         data->disable_dpm_mask = 0xff;
408         data->workload_mask = 0xff;
409
410         /* need to set voltage control types before EVV patching */
411         data->vddc_control = VEGA12_VOLTAGE_CONTROL_NONE;
412         data->mvdd_control = VEGA12_VOLTAGE_CONTROL_NONE;
413         data->vddci_control = VEGA12_VOLTAGE_CONTROL_NONE;
414
415         data->water_marks_bitmap = 0;
416         data->avfs_exist = false;
417
418         vega12_set_features_platform_caps(hwmgr);
419
420         vega12_init_dpm_defaults(hwmgr);
421
422         /* Parse pptable data read from VBIOS */
423         vega12_set_private_data_based_on_pptable(hwmgr);
424
425         data->is_tlu_enabled = false;
426
427         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
428                         VEGA12_MAX_HARDWARE_POWERLEVELS;
429         hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
430         hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
431
432         hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
433         /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
434         hwmgr->platform_descriptor.clockStep.engineClock = 500;
435         hwmgr->platform_descriptor.clockStep.memoryClock = 500;
436
437         data->total_active_cus = adev->gfx.cu_info.number;
438         /* Setup default Overdrive Fan control settings */
439         data->odn_fan_table.target_fan_speed =
440                         hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
441         data->odn_fan_table.target_temperature =
442                         hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature;
443         data->odn_fan_table.min_performance_clock =
444                         hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit;
445         data->odn_fan_table.min_fan_limit =
446                         hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit *
447                         hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
448
449         return result;
450 }
451
452 static int vega12_init_sclk_threshold(struct pp_hwmgr *hwmgr)
453 {
454         struct vega12_hwmgr *data =
455                         (struct vega12_hwmgr *)(hwmgr->backend);
456
457         data->low_sclk_interrupt_threshold = 0;
458
459         return 0;
460 }
461
462 static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
463 {
464         PP_ASSERT_WITH_CODE(!vega12_init_sclk_threshold(hwmgr),
465                         "Failed to init sclk threshold!",
466                         return -EINVAL);
467
468         return 0;
469 }
470
471 /*
472  * @fn vega12_init_dpm_state
473  * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
474  *
475  * @param    dpm_state - the address of the DPM Table to initiailize.
476  * @return   None.
477  */
478 static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
479 {
480         dpm_state->soft_min_level = 0xff;
481         dpm_state->soft_max_level = 0xff;
482         dpm_state->hard_min_level = 0xff;
483         dpm_state->hard_max_level = 0xff;
484 }
485
486 /*
487  * This function is to initialize all DPM state tables
488  * for SMU based on the dependency table.
489  * Dynamic state patching function will then trim these
490  * state tables to the allowed range based
491  * on the power policy or external client requests,
492  * such as UVD request, etc.
493  */
494 static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
495 {
496         struct vega12_hwmgr *data =
497                         (struct vega12_hwmgr *)(hwmgr->backend);
498         struct vega12_single_dpm_table *dpm_table;
499
500         memset(&data->dpm_table, 0, sizeof(data->dpm_table));
501
502         /* Initialize Sclk DPM table based on allow Sclk values */
503         dpm_table = &(data->dpm_table.soc_table);
504         vega12_init_dpm_state(&(dpm_table->dpm_state));
505
506         dpm_table = &(data->dpm_table.gfx_table);
507         vega12_init_dpm_state(&(dpm_table->dpm_state));
508
509         /* Initialize Mclk DPM table based on allow Mclk values */
510         dpm_table = &(data->dpm_table.mem_table);
511         vega12_init_dpm_state(&(dpm_table->dpm_state));
512
513         dpm_table = &(data->dpm_table.eclk_table);
514         vega12_init_dpm_state(&(dpm_table->dpm_state));
515
516         dpm_table = &(data->dpm_table.vclk_table);
517         vega12_init_dpm_state(&(dpm_table->dpm_state));
518
519         dpm_table = &(data->dpm_table.dclk_table);
520         vega12_init_dpm_state(&(dpm_table->dpm_state));
521
522         /* Assume there is no headless Vega12 for now */
523         dpm_table = &(data->dpm_table.dcef_table);
524         vega12_init_dpm_state(&(dpm_table->dpm_state));
525
526         dpm_table = &(data->dpm_table.pixel_table);
527         vega12_init_dpm_state(&(dpm_table->dpm_state));
528
529         dpm_table = &(data->dpm_table.display_table);
530         vega12_init_dpm_state(&(dpm_table->dpm_state));
531
532         dpm_table = &(data->dpm_table.phy_table);
533         vega12_init_dpm_state(&(dpm_table->dpm_state));
534
535         /* save a copy of the default DPM table */
536         memcpy(&(data->golden_dpm_table), &(data->dpm_table),
537                         sizeof(struct vega12_dpm_table));
538
539         return 0;
540 }
541
542 #if 0
543 static int vega12_save_default_power_profile(struct pp_hwmgr *hwmgr)
544 {
545         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
546         struct vega12_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
547         uint32_t min_level;
548
549         hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
550         hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
551
552         /* Optimize compute power profile: Use only highest
553          * 2 power levels (if more than 2 are available)
554          */
555         if (dpm_table->count > 2)
556                 min_level = dpm_table->count - 2;
557         else if (dpm_table->count == 2)
558                 min_level = 1;
559         else
560                 min_level = 0;
561
562         hwmgr->default_compute_power_profile.min_sclk =
563                         dpm_table->dpm_levels[min_level].value;
564
565         hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
566         hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
567
568         return 0;
569 }
570 #endif
571
572 /**
573 * Initializes the SMC table and uploads it
574 *
575 * @param    hwmgr  the address of the powerplay hardware manager.
576 * @param    pInput  the pointer to input data (PowerState)
577 * @return   always 0
578 */
579 static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
580 {
581         int result;
582         struct vega12_hwmgr *data =
583                         (struct vega12_hwmgr *)(hwmgr->backend);
584         PPTable_t *pp_table = &(data->smc_state_table.pp_table);
585         struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
586         struct phm_ppt_v3_information *pptable_information =
587                 (struct phm_ppt_v3_information *)hwmgr->pptable;
588
589         result = vega12_setup_default_dpm_tables(hwmgr);
590         PP_ASSERT_WITH_CODE(!result,
591                         "Failed to setup default DPM tables!",
592                         return result);
593
594         result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
595         if (!result) {
596                 data->vbios_boot_state.vddc     = boot_up_values.usVddc;
597                 data->vbios_boot_state.vddci    = boot_up_values.usVddci;
598                 data->vbios_boot_state.mvddc    = boot_up_values.usMvddc;
599                 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
600                 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
601                 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
602                 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
603                 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
604                 if (0 != boot_up_values.usVddc) {
605                         smum_send_msg_to_smc_with_parameter(hwmgr,
606                                                 PPSMC_MSG_SetFloorSocVoltage,
607                                                 (boot_up_values.usVddc * 4));
608                         data->vbios_boot_state.bsoc_vddc_lock = true;
609                 } else {
610                         data->vbios_boot_state.bsoc_vddc_lock = false;
611                 }
612                 smum_send_msg_to_smc_with_parameter(hwmgr,
613                                 PPSMC_MSG_SetMinDeepSleepDcefclk,
614                         (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
615         }
616
617         memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
618
619         result = vega12_copy_table_to_smc(hwmgr,
620                         (uint8_t *)pp_table, TABLE_PPTABLE);
621         PP_ASSERT_WITH_CODE(!result,
622                         "Failed to upload PPtable!", return result);
623
624         return 0;
625 }
626
627 static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
628 {
629         struct vega12_hwmgr *data =
630                         (struct vega12_hwmgr *)(hwmgr->backend);
631         int i;
632         uint32_t allowed_features_low = 0, allowed_features_high = 0;
633
634         for (i = 0; i < GNLD_FEATURES_MAX; i++)
635                 if (data->smu_features[i].allowed)
636                         data->smu_features[i].smu_feature_id > 31 ?
637                                 (allowed_features_high |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT) & 0xFFFFFFFF)) :
638                                 (allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF));
639
640         PP_ASSERT_WITH_CODE(
641                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high) == 0,
642                 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
643                 return -1);
644
645         PP_ASSERT_WITH_CODE(
646                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low) == 0,
647                 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
648                 return -1);
649
650         return 0;
651 }
652
653 static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
654 {
655         struct vega12_hwmgr *data =
656                         (struct vega12_hwmgr *)(hwmgr->backend);
657         uint64_t features_enabled;
658         int i;
659         bool enabled;
660
661         PP_ASSERT_WITH_CODE(
662                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures) == 0,
663                 "[EnableAllSMUFeatures] Failed to enable all smu features!",
664                 return -1);
665
666         if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
667                 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
668                         enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
669                         data->smu_features[i].enabled = enabled;
670                         data->smu_features[i].supported = enabled;
671                         PP_ASSERT(
672                                 !data->smu_features[i].allowed || enabled,
673                                 "[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!");
674                 }
675         }
676
677         return 0;
678 }
679
680 static int vega12_disable_all_smu_features(struct pp_hwmgr *hwmgr)
681 {
682         struct vega12_hwmgr *data =
683                         (struct vega12_hwmgr *)(hwmgr->backend);
684         uint64_t features_enabled;
685         int i;
686         bool enabled;
687
688         PP_ASSERT_WITH_CODE(
689                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures) == 0,
690                 "[DisableAllSMUFeatures] Failed to disable all smu features!",
691                 return -1);
692
693         if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
694                 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
695                         enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
696                         data->smu_features[i].enabled = enabled;
697                         data->smu_features[i].supported = enabled;
698                 }
699         }
700
701         return 0;
702 }
703
704 static int vega12_odn_initialize_default_settings(
705                 struct pp_hwmgr *hwmgr)
706 {
707         return 0;
708 }
709
710 static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
711 {
712         int tmp_result, result = 0;
713
714         smum_send_msg_to_smc_with_parameter(hwmgr,
715                         PPSMC_MSG_NumOfDisplays, 0);
716
717         result = vega12_set_allowed_featuresmask(hwmgr);
718         PP_ASSERT_WITH_CODE(result == 0,
719                         "[EnableDPMTasks] Failed to set allowed featuresmask!\n",
720                         return result);
721
722         tmp_result = vega12_init_smc_table(hwmgr);
723         PP_ASSERT_WITH_CODE(!tmp_result,
724                         "Failed to initialize SMC table!",
725                         result = tmp_result);
726
727         result = vega12_enable_all_smu_features(hwmgr);
728         PP_ASSERT_WITH_CODE(!result,
729                         "Failed to enable all smu features!",
730                         return result);
731
732         tmp_result = vega12_power_control_set_level(hwmgr);
733         PP_ASSERT_WITH_CODE(!tmp_result,
734                         "Failed to power control set level!",
735                         result = tmp_result);
736
737         result = vega12_odn_initialize_default_settings(hwmgr);
738         PP_ASSERT_WITH_CODE(!result,
739                         "Failed to power control set level!",
740                         return result);
741
742         return result;
743 }
744
745 static int vega12_get_power_state_size(struct pp_hwmgr *hwmgr)
746 {
747         return sizeof(struct vega12_power_state);
748 }
749
750 static int vega12_get_number_of_pp_table_entries(struct pp_hwmgr *hwmgr)
751 {
752         return 0;
753 }
754
755 static int vega12_patch_boot_state(struct pp_hwmgr *hwmgr,
756              struct pp_hw_power_state *hw_ps)
757 {
758         return 0;
759 }
760
761 static int vega12_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
762                                 struct pp_power_state  *request_ps,
763                         const struct pp_power_state *current_ps)
764 {
765         struct vega12_power_state *vega12_ps =
766                                 cast_phw_vega12_power_state(&request_ps->hardware);
767         uint32_t sclk;
768         uint32_t mclk;
769         struct PP_Clocks minimum_clocks = {0};
770         bool disable_mclk_switching;
771         bool disable_mclk_switching_for_frame_lock;
772         bool disable_mclk_switching_for_vr;
773         bool force_mclk_high;
774         struct cgs_display_info info = {0};
775         const struct phm_clock_and_voltage_limits *max_limits;
776         uint32_t i;
777         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
778         struct phm_ppt_v2_information *table_info =
779                         (struct phm_ppt_v2_information *)(hwmgr->pptable);
780         int32_t count;
781         uint32_t stable_pstate_sclk_dpm_percentage;
782         uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
783         uint32_t latency;
784
785         data->battery_state = (PP_StateUILabel_Battery ==
786                         request_ps->classification.ui_label);
787
788         if (vega12_ps->performance_level_count != 2)
789                 pr_info("VI should always have 2 performance levels");
790
791         max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
792                         &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
793                         &(hwmgr->dyn_state.max_clock_voltage_on_dc);
794
795         /* Cap clock DPM tables at DC MAX if it is in DC. */
796         if (PP_PowerSource_DC == hwmgr->power_source) {
797                 for (i = 0; i < vega12_ps->performance_level_count; i++) {
798                         if (vega12_ps->performance_levels[i].mem_clock >
799                                 max_limits->mclk)
800                                 vega12_ps->performance_levels[i].mem_clock =
801                                                 max_limits->mclk;
802                         if (vega12_ps->performance_levels[i].gfx_clock >
803                                 max_limits->sclk)
804                                 vega12_ps->performance_levels[i].gfx_clock =
805                                                 max_limits->sclk;
806                 }
807         }
808
809         cgs_get_active_displays_info(hwmgr->device, &info);
810
811         /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
812         minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
813         minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
814
815         if (PP_CAP(PHM_PlatformCaps_StablePState)) {
816                 PP_ASSERT_WITH_CODE(
817                         data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
818                         data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
819                         "percent sclk value must range from 1% to 100%, setting default value",
820                         stable_pstate_sclk_dpm_percentage = 75);
821
822                 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
823                 stable_pstate_sclk = (max_limits->sclk *
824                                 stable_pstate_sclk_dpm_percentage) / 100;
825
826                 for (count = table_info->vdd_dep_on_sclk->count - 1;
827                                 count >= 0; count--) {
828                         if (stable_pstate_sclk >=
829                                         table_info->vdd_dep_on_sclk->entries[count].clk) {
830                                 stable_pstate_sclk =
831                                                 table_info->vdd_dep_on_sclk->entries[count].clk;
832                                 break;
833                         }
834                 }
835
836                 if (count < 0)
837                         stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
838
839                 stable_pstate_mclk = max_limits->mclk;
840
841                 minimum_clocks.engineClock = stable_pstate_sclk;
842                 minimum_clocks.memoryClock = stable_pstate_mclk;
843         }
844
845         disable_mclk_switching_for_frame_lock = phm_cap_enabled(
846                                     hwmgr->platform_descriptor.platformCaps,
847                                     PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
848         disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
849         force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
850
851         if (info.display_count == 0)
852                 disable_mclk_switching = false;
853         else
854                 disable_mclk_switching = (info.display_count > 1) ||
855                         disable_mclk_switching_for_frame_lock ||
856                         disable_mclk_switching_for_vr ||
857                         force_mclk_high;
858
859         sclk = vega12_ps->performance_levels[0].gfx_clock;
860         mclk = vega12_ps->performance_levels[0].mem_clock;
861
862         if (sclk < minimum_clocks.engineClock)
863                 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
864                                 max_limits->sclk : minimum_clocks.engineClock;
865
866         if (mclk < minimum_clocks.memoryClock)
867                 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
868                                 max_limits->mclk : minimum_clocks.memoryClock;
869
870         vega12_ps->performance_levels[0].gfx_clock = sclk;
871         vega12_ps->performance_levels[0].mem_clock = mclk;
872
873         if (vega12_ps->performance_levels[1].gfx_clock <
874                         vega12_ps->performance_levels[0].gfx_clock)
875                 vega12_ps->performance_levels[0].gfx_clock =
876                                 vega12_ps->performance_levels[1].gfx_clock;
877
878         if (disable_mclk_switching) {
879                 /* Set Mclk the max of level 0 and level 1 */
880                 if (mclk < vega12_ps->performance_levels[1].mem_clock)
881                         mclk = vega12_ps->performance_levels[1].mem_clock;
882                 /* Find the lowest MCLK frequency that is within
883                  * the tolerable latency defined in DAL
884                  */
885                 latency = 0;
886                 for (i = 0; i < data->mclk_latency_table.count; i++) {
887                         if ((data->mclk_latency_table.entries[i].latency <= latency) &&
888                                 (data->mclk_latency_table.entries[i].frequency >=
889                                                 vega12_ps->performance_levels[0].mem_clock) &&
890                                 (data->mclk_latency_table.entries[i].frequency <=
891                                                 vega12_ps->performance_levels[1].mem_clock))
892                                 mclk = data->mclk_latency_table.entries[i].frequency;
893                 }
894                 vega12_ps->performance_levels[0].mem_clock = mclk;
895         } else {
896                 if (vega12_ps->performance_levels[1].mem_clock <
897                                 vega12_ps->performance_levels[0].mem_clock)
898                         vega12_ps->performance_levels[0].mem_clock =
899                                         vega12_ps->performance_levels[1].mem_clock;
900         }
901
902         if (PP_CAP(PHM_PlatformCaps_StablePState)) {
903                 for (i = 0; i < vega12_ps->performance_level_count; i++) {
904                         vega12_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
905                         vega12_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
906                 }
907         }
908
909         return 0;
910 }
911
912 static int vega12_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
913 {
914         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
915         struct PP_Clocks min_clocks = {0};
916         struct cgs_display_info info = {0};
917
918         data->need_update_dpm_table = 0;
919
920         min_clocks.engineClockInSR = hwmgr->display_config.min_core_set_clock_in_sr;
921         if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
922                         (min_clocks.engineClockInSR >= VEGA12_MINIMUM_ENGINE_CLOCK ||
923                          data->display_timing.min_clock_in_sr >= VEGA12_MINIMUM_ENGINE_CLOCK))
924                 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
925
926         cgs_get_active_displays_info(hwmgr->device, &info);
927         if (data->display_timing.num_existing_displays != info.display_count)
928                 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
929
930         return 0;
931 }
932
933 static int vega12_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
934                 struct vega12_single_dpm_table *dpm_table,
935                 uint32_t low_limit, uint32_t high_limit)
936 {
937         uint32_t i;
938
939         for (i = 0; i < dpm_table->count; i++) {
940                 if ((dpm_table->dpm_levels[i].value < low_limit) ||
941                     (dpm_table->dpm_levels[i].value > high_limit))
942                         dpm_table->dpm_levels[i].enabled = false;
943                 else
944                         dpm_table->dpm_levels[i].enabled = true;
945         }
946         return 0;
947 }
948
949 static int vega12_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
950                 struct vega12_single_dpm_table *dpm_table,
951                 uint32_t low_limit, uint32_t high_limit,
952                 uint32_t disable_dpm_mask)
953 {
954         uint32_t i;
955
956         for (i = 0; i < dpm_table->count; i++) {
957                 if ((dpm_table->dpm_levels[i].value < low_limit) ||
958                     (dpm_table->dpm_levels[i].value > high_limit))
959                         dpm_table->dpm_levels[i].enabled = false;
960                 else if ((!((1 << i) & disable_dpm_mask)) &&
961                                 !(low_limit == high_limit))
962                         dpm_table->dpm_levels[i].enabled = false;
963                 else
964                         dpm_table->dpm_levels[i].enabled = true;
965         }
966         return 0;
967 }
968
969 static int vega12_trim_dpm_states(struct pp_hwmgr *hwmgr,
970                 const struct vega12_power_state *vega12_ps)
971 {
972         struct vega12_hwmgr *data =
973                         (struct vega12_hwmgr *)(hwmgr->backend);
974         uint32_t high_limit_count;
975
976         PP_ASSERT_WITH_CODE((vega12_ps->performance_level_count >= 1),
977                         "power state did not have any performance level",
978                         return -1);
979
980         high_limit_count = (vega12_ps->performance_level_count == 1) ? 0 : 1;
981
982         vega12_trim_single_dpm_states(hwmgr,
983                         &(data->dpm_table.soc_table),
984                         vega12_ps->performance_levels[0].soc_clock,
985                         vega12_ps->performance_levels[high_limit_count].soc_clock);
986
987         vega12_trim_single_dpm_states_with_mask(hwmgr,
988                         &(data->dpm_table.gfx_table),
989                         vega12_ps->performance_levels[0].gfx_clock,
990                         vega12_ps->performance_levels[high_limit_count].gfx_clock,
991                         data->disable_dpm_mask);
992
993         vega12_trim_single_dpm_states(hwmgr,
994                         &(data->dpm_table.mem_table),
995                         vega12_ps->performance_levels[0].mem_clock,
996                         vega12_ps->performance_levels[high_limit_count].mem_clock);
997
998         return 0;
999 }
1000
1001 static uint32_t vega12_find_lowest_dpm_level(
1002                 struct vega12_single_dpm_table *table)
1003 {
1004         uint32_t i;
1005
1006         for (i = 0; i < table->count; i++) {
1007                 if (table->dpm_levels[i].enabled)
1008                         break;
1009         }
1010
1011         return i;
1012 }
1013
1014 static uint32_t vega12_find_highest_dpm_level(
1015                 struct vega12_single_dpm_table *table)
1016 {
1017         uint32_t i = 0;
1018
1019         if (table->count <= MAX_REGULAR_DPM_NUMBER) {
1020                 for (i = table->count; i > 0; i--) {
1021                         if (table->dpm_levels[i - 1].enabled)
1022                                 return i - 1;
1023                 }
1024         } else {
1025                 pr_info("DPM Table Has Too Many Entries!");
1026                 return MAX_REGULAR_DPM_NUMBER - 1;
1027         }
1028
1029         return i;
1030 }
1031
1032 static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
1033 {
1034         return 0;
1035 }
1036
1037 static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
1038 {
1039         return 0;
1040 }
1041
1042 static int vega12_generate_dpm_level_enable_mask(
1043                 struct pp_hwmgr *hwmgr, const void *input)
1044 {
1045         struct vega12_hwmgr *data =
1046                         (struct vega12_hwmgr *)(hwmgr->backend);
1047         const struct phm_set_power_state_input *states =
1048                         (const struct phm_set_power_state_input *)input;
1049         const struct vega12_power_state *vega12_ps =
1050                         cast_const_phw_vega12_power_state(states->pnew_state);
1051         int i;
1052
1053         PP_ASSERT_WITH_CODE(!vega12_trim_dpm_states(hwmgr, vega12_ps),
1054                         "Attempt to Trim DPM States Failed!",
1055                         return -1);
1056
1057         data->smc_state_table.gfx_boot_level =
1058                         vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
1059         data->smc_state_table.gfx_max_level =
1060                         vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
1061         data->smc_state_table.mem_boot_level =
1062                         vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
1063         data->smc_state_table.mem_max_level =
1064                         vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
1065
1066         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1067                         "Attempt to upload DPM Bootup Levels Failed!",
1068                         return -1);
1069         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1070                         "Attempt to upload DPM Max Levels Failed!",
1071                         return -1);
1072         for (i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
1073                 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
1074
1075
1076         for (i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
1077                 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
1078
1079         return 0;
1080 }
1081
1082 int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1083 {
1084         struct vega12_hwmgr *data =
1085                         (struct vega12_hwmgr *)(hwmgr->backend);
1086
1087         if (data->smu_features[GNLD_DPM_VCE].supported) {
1088                 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
1089                                 enable,
1090                                 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
1091                                 "Attempt to Enable/Disable DPM VCE Failed!",
1092                                 return -1);
1093                 data->smu_features[GNLD_DPM_VCE].enabled = enable;
1094         }
1095
1096         return 0;
1097 }
1098
1099 static int vega12_update_sclk_threshold(struct pp_hwmgr *hwmgr)
1100 {
1101         return 0;
1102 }
1103
1104 static int vega12_set_power_state_tasks(struct pp_hwmgr *hwmgr,
1105                 const void *input)
1106 {
1107         int tmp_result, result = 0;
1108         struct vega12_hwmgr *data =
1109                         (struct vega12_hwmgr *)(hwmgr->backend);
1110         PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1111
1112         tmp_result = vega12_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
1113         PP_ASSERT_WITH_CODE(!tmp_result,
1114                         "Failed to find DPM states clocks in DPM table!",
1115                         result = tmp_result);
1116
1117         tmp_result = vega12_generate_dpm_level_enable_mask(hwmgr, input);
1118         PP_ASSERT_WITH_CODE(!tmp_result,
1119                         "Failed to generate DPM level enabled mask!",
1120                         result = tmp_result);
1121
1122         tmp_result = vega12_update_sclk_threshold(hwmgr);
1123         PP_ASSERT_WITH_CODE(!tmp_result,
1124                         "Failed to update SCLK threshold!",
1125                         result = tmp_result);
1126
1127         result = vega12_copy_table_to_smc(hwmgr,
1128                         (uint8_t *)pp_table, TABLE_PPTABLE);
1129         PP_ASSERT_WITH_CODE(!result,
1130                         "Failed to upload PPtable!", return result);
1131
1132         data->apply_optimized_settings = false;
1133         data->apply_overdrive_next_settings_mask = 0;
1134
1135         return 0;
1136 }
1137
1138 static uint32_t vega12_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1139 {
1140         struct vega12_hwmgr *data =
1141                         (struct vega12_hwmgr *)(hwmgr->backend);
1142         uint32_t gfx_clk;
1143
1144         if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
1145                 return -1;
1146
1147         if (low)
1148                 PP_ASSERT_WITH_CODE(
1149                         vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false) == 0,
1150                         "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
1151                         return -1);
1152         else
1153                 PP_ASSERT_WITH_CODE(
1154                         vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true) == 0,
1155                         "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
1156                         return -1);
1157
1158         return (gfx_clk * 100);
1159 }
1160
1161 static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1162 {
1163         struct vega12_hwmgr *data =
1164                         (struct vega12_hwmgr *)(hwmgr->backend);
1165         uint32_t mem_clk;
1166
1167         if (!data->smu_features[GNLD_DPM_UCLK].enabled)
1168                 return -1;
1169
1170         if (low)
1171                 PP_ASSERT_WITH_CODE(
1172                         vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false) == 0,
1173                         "[GetMclks]: fail to get min PPCLK_UCLK\n",
1174                         return -1);
1175         else
1176                 PP_ASSERT_WITH_CODE(
1177                         vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true) == 0,
1178                         "[GetMclks]: fail to get max PPCLK_UCLK\n",
1179                         return -1);
1180
1181         return (mem_clk * 100);
1182 }
1183
1184 static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr,
1185                 struct pp_gpu_power *query)
1186 {
1187 #if 0
1188         uint32_t value;
1189
1190         PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
1191                         PPSMC_MSG_GetCurrPkgPwr),
1192                         "Failed to get current package power!",
1193                         return -EINVAL);
1194
1195         vega12_read_arg_from_smc(hwmgr, &value);
1196         /* power value is an integer */
1197         query->average_gpu_power = value << 8;
1198 #endif
1199         return 0;
1200 }
1201
1202 static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq)
1203 {
1204         uint32_t gfx_clk = 0;
1205
1206         *gfx_freq = 0;
1207
1208         PP_ASSERT_WITH_CODE(
1209                         smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
1210                         "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
1211                         return -1);
1212         PP_ASSERT_WITH_CODE(
1213                         vega12_read_arg_from_smc(hwmgr, &gfx_clk) == 0,
1214                         "[GetCurrentGfxClkFreq] Attempt to read arg from SMC Failed",
1215                         return -1);
1216
1217         *gfx_freq = gfx_clk * 100;
1218
1219         return 0;
1220 }
1221
1222 static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq)
1223 {
1224         uint32_t mem_clk = 0;
1225
1226         *mclk_freq = 0;
1227
1228         PP_ASSERT_WITH_CODE(
1229                         smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
1230                         "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
1231                         return -1);
1232         PP_ASSERT_WITH_CODE(
1233                         vega12_read_arg_from_smc(hwmgr, &mem_clk) == 0,
1234                         "[GetCurrentMClkFreq] Attempt to read arg from SMC Failed",
1235                         return -1);
1236
1237         *mclk_freq = mem_clk * 100;
1238
1239         return 0;
1240 }
1241
1242 static int vega12_get_current_activity_percent(
1243                 struct pp_hwmgr *hwmgr,
1244                 uint32_t *activity_percent)
1245 {
1246         int ret = 0;
1247         uint32_t current_activity = 50;
1248
1249 #if 0
1250         ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
1251         if (!ret) {
1252                 ret = vega12_read_arg_from_smc(hwmgr, &current_activity);
1253                 if (!ret) {
1254                         if (current_activity > 100) {
1255                                 PP_ASSERT(false,
1256                                         "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
1257                                 current_activity = 100;
1258                         }
1259                 } else
1260                         PP_ASSERT(false,
1261                                 "[GetCurrentActivityPercent] Attempt To Read Average Graphics Activity from SMU Failed!");
1262         } else
1263                 PP_ASSERT(false,
1264                         "[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!");
1265 #endif
1266         *activity_percent = current_activity;
1267
1268         return ret;
1269 }
1270
1271 static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1272                               void *value, int *size)
1273 {
1274         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1275         int ret = 0;
1276
1277         switch (idx) {
1278         case AMDGPU_PP_SENSOR_GFX_SCLK:
1279                 ret = vega12_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value);
1280                 if (!ret)
1281                         *size = 4;
1282                 break;
1283         case AMDGPU_PP_SENSOR_GFX_MCLK:
1284                 ret = vega12_get_current_mclk_freq(hwmgr, (uint32_t *)value);
1285                 if (!ret)
1286                         *size = 4;
1287                 break;
1288         case AMDGPU_PP_SENSOR_GPU_LOAD:
1289                 ret = vega12_get_current_activity_percent(hwmgr, (uint32_t *)value);
1290                 if (!ret)
1291                         *size = 4;
1292                 break;
1293         case AMDGPU_PP_SENSOR_GPU_TEMP:
1294                 *((uint32_t *)value) = vega12_thermal_get_temperature(hwmgr);
1295                 *size = 4;
1296                 break;
1297         case AMDGPU_PP_SENSOR_UVD_POWER:
1298                 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1299                 *size = 4;
1300                 break;
1301         case AMDGPU_PP_SENSOR_VCE_POWER:
1302                 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1303                 *size = 4;
1304                 break;
1305         case AMDGPU_PP_SENSOR_GPU_POWER:
1306                 if (*size < sizeof(struct pp_gpu_power))
1307                         ret = -EINVAL;
1308                 else {
1309                         *size = sizeof(struct pp_gpu_power);
1310                         ret = vega12_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
1311                 }
1312                 break;
1313         default:
1314                 ret = -EINVAL;
1315                 break;
1316         }
1317         return ret;
1318 }
1319
1320 static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
1321                 bool has_disp)
1322 {
1323         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1324
1325         if (data->smu_features[GNLD_DPM_UCLK].enabled)
1326                 return smum_send_msg_to_smc_with_parameter(hwmgr,
1327                         PPSMC_MSG_SetUclkFastSwitch,
1328                         has_disp ? 0 : 1);
1329
1330         return 0;
1331 }
1332
1333 int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
1334                 struct pp_display_clock_request *clock_req)
1335 {
1336         int result = 0;
1337         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1338         enum amd_pp_clock_type clk_type = clock_req->clock_type;
1339         uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1340         PPCLK_e clk_select = 0;
1341         uint32_t clk_request = 0;
1342
1343         if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1344                 switch (clk_type) {
1345                 case amd_pp_dcef_clock:
1346                         clk_freq = clock_req->clock_freq_in_khz / 100;
1347                         clk_select = PPCLK_DCEFCLK;
1348                         break;
1349                 case amd_pp_disp_clock:
1350                         clk_select = PPCLK_DISPCLK;
1351                         break;
1352                 case amd_pp_pixel_clock:
1353                         clk_select = PPCLK_PIXCLK;
1354                         break;
1355                 case amd_pp_phy_clock:
1356                         clk_select = PPCLK_PHYCLK;
1357                         break;
1358                 default:
1359                         pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
1360                         result = -1;
1361                         break;
1362                 }
1363
1364                 if (!result) {
1365                         clk_request = (clk_select << 16) | clk_freq;
1366                         result = smum_send_msg_to_smc_with_parameter(hwmgr,
1367                                         PPSMC_MSG_SetHardMinByFreq,
1368                                         clk_request);
1369                 }
1370         }
1371
1372         return result;
1373 }
1374
1375 static int vega12_notify_smc_display_config_after_ps_adjustment(
1376                 struct pp_hwmgr *hwmgr)
1377 {
1378         struct vega12_hwmgr *data =
1379                         (struct vega12_hwmgr *)(hwmgr->backend);
1380         uint32_t num_active_disps = 0;
1381         struct cgs_display_info info = {0};
1382         struct PP_Clocks min_clocks = {0};
1383         struct pp_display_clock_request clock_req;
1384         uint32_t clk_request;
1385
1386         info.mode_info = NULL;
1387         cgs_get_active_displays_info(hwmgr->device, &info);
1388         num_active_disps = info.display_count;
1389         if (num_active_disps > 1)
1390                 vega12_notify_smc_display_change(hwmgr, false);
1391         else
1392                 vega12_notify_smc_display_change(hwmgr, true);
1393
1394         min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
1395         min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
1396         min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
1397
1398         if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
1399                 clock_req.clock_type = amd_pp_dcef_clock;
1400                 clock_req.clock_freq_in_khz = min_clocks.dcefClock;
1401                 if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
1402                         if (data->smu_features[GNLD_DS_DCEFCLK].supported)
1403                                 PP_ASSERT_WITH_CODE(
1404                                         !smum_send_msg_to_smc_with_parameter(
1405                                         hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
1406                                         min_clocks.dcefClockInSR /100),
1407                                         "Attempt to set divider for DCEFCLK Failed!",
1408                                         return -1);
1409                 } else {
1410                         pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
1411                 }
1412         }
1413
1414         if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1415                 clk_request = (PPCLK_UCLK << 16) | (min_clocks.memoryClock) / 100;
1416                 PP_ASSERT_WITH_CODE(
1417                         smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, clk_request) == 0,
1418                         "[PhwVega12_NotifySMCDisplayConfigAfterPowerStateAdjustment] Attempt to set UCLK HardMin Failed!",
1419                         return -1);
1420                 data->dpm_table.mem_table.dpm_state.hard_min_level = min_clocks.memoryClock;
1421         }
1422
1423         return 0;
1424 }
1425
1426 static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
1427 {
1428         struct vega12_hwmgr *data =
1429                         (struct vega12_hwmgr *)(hwmgr->backend);
1430
1431         data->smc_state_table.gfx_boot_level =
1432         data->smc_state_table.gfx_max_level =
1433                         vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
1434         data->smc_state_table.mem_boot_level =
1435         data->smc_state_table.mem_max_level =
1436                         vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
1437
1438         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1439                         "Failed to upload boot level to highest!",
1440                         return -1);
1441
1442         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1443                         "Failed to upload dpm max level to highest!",
1444                         return -1);
1445
1446         return 0;
1447 }
1448
1449 static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1450 {
1451         struct vega12_hwmgr *data =
1452                         (struct vega12_hwmgr *)(hwmgr->backend);
1453
1454         data->smc_state_table.gfx_boot_level =
1455         data->smc_state_table.gfx_max_level =
1456                         vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
1457         data->smc_state_table.mem_boot_level =
1458         data->smc_state_table.mem_max_level =
1459                         vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
1460
1461         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1462                         "Failed to upload boot level to highest!",
1463                         return -1);
1464
1465         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1466                         "Failed to upload dpm max level to highest!",
1467                         return -1);
1468
1469         return 0;
1470
1471 }
1472
1473 static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1474 {
1475         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1476
1477         data->smc_state_table.gfx_boot_level =
1478                         vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
1479         data->smc_state_table.gfx_max_level =
1480                         vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
1481         data->smc_state_table.mem_boot_level =
1482                         vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
1483         data->smc_state_table.mem_max_level =
1484                         vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
1485
1486         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1487                         "Failed to upload DPM Bootup Levels!",
1488                         return -1);
1489
1490         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1491                         "Failed to upload DPM Max Levels!",
1492                         return -1);
1493         return 0;
1494 }
1495
1496 #if 0
1497 static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
1498                                 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
1499 {
1500         struct phm_ppt_v2_information *table_info =
1501                         (struct phm_ppt_v2_information *)(hwmgr->pptable);
1502
1503         if (table_info->vdd_dep_on_sclk->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
1504                 table_info->vdd_dep_on_socclk->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL &&
1505                 table_info->vdd_dep_on_mclk->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
1506                 *sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
1507                 *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
1508                 *mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
1509         }
1510
1511         if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
1512                 *sclk_mask = 0;
1513         } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
1514                 *mclk_mask = 0;
1515         } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
1516                 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
1517                 *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
1518                 *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
1519         }
1520         return 0;
1521 }
1522 #endif
1523
1524 static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
1525 {
1526         switch (mode) {
1527         case AMD_FAN_CTRL_NONE:
1528                 break;
1529         case AMD_FAN_CTRL_MANUAL:
1530                 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
1531                         vega12_fan_ctrl_stop_smc_fan_control(hwmgr);
1532                 break;
1533         case AMD_FAN_CTRL_AUTO:
1534                 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
1535                         vega12_fan_ctrl_start_smc_fan_control(hwmgr);
1536                 break;
1537         default:
1538                 break;
1539         }
1540 }
1541
1542 static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1543                                 enum amd_dpm_forced_level level)
1544 {
1545         int ret = 0;
1546 #if 0
1547         uint32_t sclk_mask = 0;
1548         uint32_t mclk_mask = 0;
1549         uint32_t soc_mask = 0;
1550 #endif
1551
1552         switch (level) {
1553         case AMD_DPM_FORCED_LEVEL_HIGH:
1554                 ret = vega12_force_dpm_highest(hwmgr);
1555                 break;
1556         case AMD_DPM_FORCED_LEVEL_LOW:
1557                 ret = vega12_force_dpm_lowest(hwmgr);
1558                 break;
1559         case AMD_DPM_FORCED_LEVEL_AUTO:
1560                 ret = vega12_unforce_dpm_levels(hwmgr);
1561                 break;
1562         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1563         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1564         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1565         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1566 #if 0
1567                 ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
1568                 if (ret)
1569                         return ret;
1570                 vega12_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
1571                 vega12_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
1572 #endif
1573                 break;
1574         case AMD_DPM_FORCED_LEVEL_MANUAL:
1575         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1576         default:
1577                 break;
1578         }
1579 #if 0
1580         if (!ret) {
1581                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1582                         vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
1583                 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1584                         vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
1585         }
1586 #endif
1587         return ret;
1588 }
1589
1590 static uint32_t vega12_get_fan_control_mode(struct pp_hwmgr *hwmgr)
1591 {
1592         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1593
1594         if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
1595                 return AMD_FAN_CTRL_MANUAL;
1596         else
1597                 return AMD_FAN_CTRL_AUTO;
1598 }
1599
1600 static int vega12_get_dal_power_level(struct pp_hwmgr *hwmgr,
1601                 struct amd_pp_simple_clock_info *info)
1602 {
1603 #if 0
1604         struct phm_ppt_v2_information *table_info =
1605                         (struct phm_ppt_v2_information *)hwmgr->pptable;
1606         struct phm_clock_and_voltage_limits *max_limits =
1607                         &table_info->max_clock_voltage_on_ac;
1608
1609         info->engine_max_clock = max_limits->sclk;
1610         info->memory_max_clock = max_limits->mclk;
1611 #endif
1612         return 0;
1613 }
1614
1615 static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
1616                 uint32_t *clock,
1617                 PPCLK_e clock_select,
1618                 bool max)
1619 {
1620         int result;
1621         *clock = 0;
1622
1623         if (max) {
1624                  PP_ASSERT_WITH_CODE(
1625                         smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16)) == 0,
1626                         "[GetClockRanges] Failed to get max clock from SMC!",
1627                         return -1);
1628                 result = vega12_read_arg_from_smc(hwmgr, clock);
1629         } else {
1630                 PP_ASSERT_WITH_CODE(
1631                         smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16)) == 0,
1632                         "[GetClockRanges] Failed to get min clock from SMC!",
1633                         return -1);
1634                 result = vega12_read_arg_from_smc(hwmgr, clock);
1635         }
1636
1637         return result;
1638 }
1639
1640 static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
1641                 struct pp_clock_levels_with_latency *clocks)
1642 {
1643         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1644         int i;
1645         uint32_t min, max, increments;
1646
1647         if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
1648                 return -1;
1649
1650         PP_ASSERT_WITH_CODE(
1651                 vega12_get_clock_ranges(hwmgr, &min, PPCLK_GFXCLK, false) == 0,
1652                 "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
1653                 return -1);
1654         PP_ASSERT_WITH_CODE(
1655                 vega12_get_clock_ranges(hwmgr, &max, PPCLK_GFXCLK, true) == 0,
1656                 "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
1657                 return -1);
1658
1659         clocks->data[0].clocks_in_khz = min * 100;
1660         increments = (max - min) / (VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS - 1);
1661
1662         for (i = 1; i < (VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS - 1); i++) {
1663                 if ((min + (increments * i)) != 0) {
1664                         clocks->data[i].clocks_in_khz =
1665                                 (min + increments * i) * 100;
1666                         clocks->data[i].latency_in_us = 0;
1667                 }
1668         }
1669         clocks->data[i].clocks_in_khz = max * 100;
1670         clocks->num_levels = i + 1;
1671
1672         return 0;
1673 }
1674
1675 static uint32_t vega12_get_mem_latency(struct pp_hwmgr *hwmgr,
1676                 uint32_t clock)
1677 {
1678         return 25;
1679 }
1680
1681 static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
1682                 struct pp_clock_levels_with_latency *clocks)
1683 {
1684         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1685         uint32_t min, max, increments;
1686         int i;
1687
1688         if (!data->smu_features[GNLD_DPM_UCLK].enabled)
1689                 return -1;
1690
1691         PP_ASSERT_WITH_CODE(
1692                 vega12_get_clock_ranges(hwmgr, &min, PPCLK_UCLK, false) == 0,
1693                 "[GetMclks]: fail to get min PPCLK_UCLK\n",
1694                 return -1);
1695         PP_ASSERT_WITH_CODE(
1696                 vega12_get_clock_ranges(hwmgr, &max, PPCLK_UCLK, true) == 0,
1697                 "[GetMclks]: fail to get max PPCLK_UCLK\n",
1698                 return -1);
1699
1700         clocks->data[0].clocks_in_khz = min * 100;
1701         clocks->data[0].latency_in_us =
1702                 data->mclk_latency_table.entries[0].latency =
1703                 vega12_get_mem_latency(hwmgr, min);
1704
1705         increments = (max - min) / (VG12_PSUEDO_NUM_UCLK_DPM_LEVELS - 1);
1706
1707         for (i = 1; i < (VG12_PSUEDO_NUM_UCLK_DPM_LEVELS - 1); i++) {
1708                 if ((min + (increments * i)) != 0) {
1709                         clocks->data[i].clocks_in_khz =
1710                                 (min + (increments * i)) * 100;
1711                         clocks->data[i].latency_in_us =
1712                                 data->mclk_latency_table.entries[i].latency =
1713                                 vega12_get_mem_latency(hwmgr, min + increments * i);
1714                 }
1715         }
1716
1717         clocks->data[i].clocks_in_khz = max * 100;
1718         clocks->data[i].latency_in_us =
1719                 data->mclk_latency_table.entries[i].latency =
1720                 vega12_get_mem_latency(hwmgr, max);
1721
1722         clocks->num_levels = data->mclk_latency_table.count = i + 1;
1723
1724         return 0;
1725 }
1726
1727 static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
1728                 struct pp_clock_levels_with_latency *clocks)
1729 {
1730         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1731         int i;
1732         uint32_t min, max, increments;
1733
1734         if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
1735                 return -1;
1736
1737         PP_ASSERT_WITH_CODE(
1738                 vega12_get_clock_ranges(hwmgr, &min, PPCLK_DCEFCLK, false) == 0,
1739                 "[GetDcfclocks]: fail to get min PPCLK_DCEFCLK\n",
1740                 return -1);
1741         PP_ASSERT_WITH_CODE(
1742                 vega12_get_clock_ranges(hwmgr, &max, PPCLK_DCEFCLK, true) == 0,
1743                 "[GetDcfclocks]: fail to get max PPCLK_DCEFCLK\n",
1744                 return -1);
1745
1746         clocks->data[0].clocks_in_khz = min * 100;
1747         increments = (max - min) / (VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS - 1);
1748
1749         for (i = 1; i < (VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS - 1); i++) {
1750                 if ((min + (increments * i)) != 0) {
1751                         clocks->data[i].clocks_in_khz =
1752                                 (min + increments * i) * 100;
1753                         clocks->data[i].latency_in_us = 0;
1754                 }
1755         }
1756         clocks->data[i].clocks_in_khz = max * 100;
1757         clocks->num_levels = i + 1;
1758
1759         return 0;
1760 }
1761
1762 static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
1763                 struct pp_clock_levels_with_latency *clocks)
1764 {
1765         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1766         int i;
1767         uint32_t min, max, increments;
1768
1769         if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
1770                 return -1;
1771
1772         PP_ASSERT_WITH_CODE(
1773                 vega12_get_clock_ranges(hwmgr, &min, PPCLK_SOCCLK, false) == 0,
1774                 "[GetSocclks]: fail to get min PPCLK_SOCCLK\n",
1775                 return -1);
1776         PP_ASSERT_WITH_CODE(
1777                 vega12_get_clock_ranges(hwmgr, &max, PPCLK_SOCCLK, true) == 0,
1778                 "[GetSocclks]: fail to get max PPCLK_SOCCLK\n",
1779                 return -1);
1780
1781         clocks->data[0].clocks_in_khz = min * 100;
1782         increments = (max - min) / (VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS - 1);
1783
1784         for (i = 1; i < (VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS - 1); i++) {
1785                 if ((min + (increments * i)) != 0) {
1786                         clocks->data[i].clocks_in_khz =
1787                                 (min + increments * i) * 100;
1788                         clocks->data[i].latency_in_us = 0;
1789                 }
1790         }
1791
1792         clocks->data[i].clocks_in_khz = max * 100;
1793         clocks->num_levels = i + 1;
1794
1795         return 0;
1796
1797 }
1798
1799 static int vega12_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
1800                 enum amd_pp_clock_type type,
1801                 struct pp_clock_levels_with_latency *clocks)
1802 {
1803         int ret;
1804
1805         switch (type) {
1806         case amd_pp_sys_clock:
1807                 ret = vega12_get_sclks(hwmgr, clocks);
1808                 break;
1809         case amd_pp_mem_clock:
1810                 ret = vega12_get_memclocks(hwmgr, clocks);
1811                 break;
1812         case amd_pp_dcef_clock:
1813                 ret = vega12_get_dcefclocks(hwmgr, clocks);
1814                 break;
1815         case amd_pp_soc_clock:
1816                 ret = vega12_get_socclocks(hwmgr, clocks);
1817                 break;
1818         default:
1819                 return -EINVAL;
1820         }
1821
1822         return ret;
1823 }
1824
1825 static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
1826                 enum amd_pp_clock_type type,
1827                 struct pp_clock_levels_with_voltage *clocks)
1828 {
1829         clocks->num_levels = 0;
1830
1831         return 0;
1832 }
1833
1834 static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1835                 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
1836 {
1837         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1838         Watermarks_t *table = &(data->smc_state_table.water_marks_table);
1839         int result = 0;
1840         uint32_t i;
1841
1842         if (!data->registry_data.disable_water_mark &&
1843                         data->smu_features[GNLD_DPM_DCEFCLK].supported &&
1844                         data->smu_features[GNLD_DPM_SOCCLK].supported) {
1845                 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
1846                         table->WatermarkRow[WM_DCEFCLK][i].MinClock =
1847                                 cpu_to_le16((uint16_t)
1848                                 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
1849                                 100);
1850                         table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
1851                                 cpu_to_le16((uint16_t)
1852                                 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
1853                                 100);
1854                         table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
1855                                 cpu_to_le16((uint16_t)
1856                                 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
1857                                 100);
1858                         table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
1859                                 cpu_to_le16((uint16_t)
1860                                 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
1861                                 100);
1862                         table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
1863                                         wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
1864                 }
1865
1866                 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
1867                         table->WatermarkRow[WM_SOCCLK][i].MinClock =
1868                                 cpu_to_le16((uint16_t)
1869                                 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
1870                                 100);
1871                         table->WatermarkRow[WM_SOCCLK][i].MaxClock =
1872                                 cpu_to_le16((uint16_t)
1873                                 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
1874                                 100);
1875                         table->WatermarkRow[WM_SOCCLK][i].MinUclk =
1876                                 cpu_to_le16((uint16_t)
1877                                 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
1878                                 100);
1879                         table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
1880                                 cpu_to_le16((uint16_t)
1881                                 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
1882                                 100);
1883                         table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
1884                                         wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
1885                 }
1886                 data->water_marks_bitmap |= WaterMarksExist;
1887                 data->water_marks_bitmap &= ~WaterMarksLoaded;
1888         }
1889
1890         return result;
1891 }
1892
1893 static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
1894                 enum pp_clock_type type, uint32_t mask)
1895 {
1896         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1897
1898         if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
1899                                 AMD_DPM_FORCED_LEVEL_LOW |
1900                                 AMD_DPM_FORCED_LEVEL_HIGH))
1901                 return -EINVAL;
1902
1903         switch (type) {
1904         case PP_SCLK:
1905                 data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
1906                 data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
1907
1908                 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1909                         "Failed to upload boot level to lowest!",
1910                         return -EINVAL);
1911
1912                 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1913                         "Failed to upload dpm max level to highest!",
1914                         return -EINVAL);
1915                 break;
1916
1917         case PP_MCLK:
1918                 data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
1919                 data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
1920
1921                 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1922                         "Failed to upload boot level to lowest!",
1923                         return -EINVAL);
1924
1925                 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1926                         "Failed to upload dpm max level to highest!",
1927                         return -EINVAL);
1928
1929                 break;
1930
1931         case PP_PCIE:
1932                 break;
1933
1934         default:
1935                 break;
1936         }
1937
1938         return 0;
1939 }
1940
1941 static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1942                 enum pp_clock_type type, char *buf)
1943 {
1944         int i, now, size = 0;
1945         struct pp_clock_levels_with_latency clocks;
1946
1947         switch (type) {
1948         case PP_SCLK:
1949                 PP_ASSERT_WITH_CODE(
1950                                 vega12_get_current_gfx_clk_freq(hwmgr, &now) == 0,
1951                                 "Attempt to get current gfx clk Failed!",
1952                                 return -1);
1953
1954                 PP_ASSERT_WITH_CODE(
1955                                 vega12_get_sclks(hwmgr, &clocks) == 0,
1956                                 "Attempt to get gfx clk levels Failed!",
1957                                 return -1);
1958                 for (i = 0; i < clocks.num_levels; i++)
1959                         size += sprintf(buf + size, "%d: %uMhz %s\n",
1960                                 i, clocks.data[i].clocks_in_khz / 100,
1961                                 (clocks.data[i].clocks_in_khz == now) ? "*" : "");
1962                 break;
1963
1964         case PP_MCLK:
1965                 PP_ASSERT_WITH_CODE(
1966                                 vega12_get_current_mclk_freq(hwmgr, &now) == 0,
1967                                 "Attempt to get current mclk freq Failed!",
1968                                 return -1);
1969
1970                 PP_ASSERT_WITH_CODE(
1971                                 vega12_get_memclocks(hwmgr, &clocks) == 0,
1972                                 "Attempt to get memory clk levels Failed!",
1973                                 return -1);
1974                 for (i = 0; i < clocks.num_levels; i++)
1975                         size += sprintf(buf + size, "%d: %uMhz %s\n",
1976                                 i, clocks.data[i].clocks_in_khz / 100,
1977                                 (clocks.data[i].clocks_in_khz == now) ? "*" : "");
1978                 break;
1979
1980         case PP_PCIE:
1981                 break;
1982
1983         default:
1984                 break;
1985         }
1986         return size;
1987 }
1988
1989 static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
1990 {
1991         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1992         int result = 0;
1993         uint32_t num_turned_on_displays = 1;
1994         Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
1995         struct cgs_display_info info = {0};
1996
1997         if ((data->water_marks_bitmap & WaterMarksExist) &&
1998                         !(data->water_marks_bitmap & WaterMarksLoaded)) {
1999                 result = vega12_copy_table_to_smc(hwmgr,
2000                         (uint8_t *)wm_table, TABLE_WATERMARKS);
2001                 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
2002                 data->water_marks_bitmap |= WaterMarksLoaded;
2003         }
2004
2005         if ((data->water_marks_bitmap & WaterMarksExist) &&
2006                 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
2007                 data->smu_features[GNLD_DPM_SOCCLK].supported) {
2008                 cgs_get_active_displays_info(hwmgr->device, &info);
2009                 num_turned_on_displays = info.display_count;
2010                 smum_send_msg_to_smc_with_parameter(hwmgr,
2011                         PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
2012         }
2013
2014         return result;
2015 }
2016
2017 int vega12_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
2018 {
2019         struct vega12_hwmgr *data =
2020                         (struct vega12_hwmgr *)(hwmgr->backend);
2021
2022         if (data->smu_features[GNLD_DPM_UVD].supported) {
2023                 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
2024                                 enable,
2025                                 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
2026                                 "Attempt to Enable/Disable DPM UVD Failed!",
2027                                 return -1);
2028                 data->smu_features[GNLD_DPM_UVD].enabled = enable;
2029         }
2030
2031         return 0;
2032 }
2033
2034 static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
2035 {
2036         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2037
2038         data->vce_power_gated = bgate;
2039         vega12_enable_disable_vce_dpm(hwmgr, !bgate);
2040 }
2041
2042 static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
2043 {
2044         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2045
2046         data->uvd_power_gated = bgate;
2047         vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
2048 }
2049
2050 static inline bool vega12_are_power_levels_equal(
2051                                 const struct vega12_performance_level *pl1,
2052                                 const struct vega12_performance_level *pl2)
2053 {
2054         return ((pl1->soc_clock == pl2->soc_clock) &&
2055                         (pl1->gfx_clock == pl2->gfx_clock) &&
2056                         (pl1->mem_clock == pl2->mem_clock));
2057 }
2058
2059 static int vega12_check_states_equal(struct pp_hwmgr *hwmgr,
2060                                 const struct pp_hw_power_state *pstate1,
2061                         const struct pp_hw_power_state *pstate2, bool *equal)
2062 {
2063         const struct vega12_power_state *psa;
2064         const struct vega12_power_state *psb;
2065         int i;
2066
2067         if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
2068                 return -EINVAL;
2069
2070         psa = cast_const_phw_vega12_power_state(pstate1);
2071         psb = cast_const_phw_vega12_power_state(pstate2);
2072         /* If the two states don't even have the same number of performance levels they cannot be the same state. */
2073         if (psa->performance_level_count != psb->performance_level_count) {
2074                 *equal = false;
2075                 return 0;
2076         }
2077
2078         for (i = 0; i < psa->performance_level_count; i++) {
2079                 if (!vega12_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
2080                         /* If we have found even one performance level pair that is different the states are different. */
2081                         *equal = false;
2082                         return 0;
2083                 }
2084         }
2085
2086         /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
2087         *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
2088         *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
2089         *equal &= (psa->sclk_threshold == psb->sclk_threshold);
2090
2091         return 0;
2092 }
2093
2094 static bool
2095 vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
2096 {
2097         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2098         bool is_update_required = false;
2099         struct cgs_display_info info = {0, 0, NULL};
2100
2101         cgs_get_active_displays_info(hwmgr->device, &info);
2102
2103         if (data->display_timing.num_existing_displays != info.display_count)
2104                 is_update_required = true;
2105
2106         if (data->registry_data.gfx_clk_deep_sleep_support) {
2107                 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
2108                         is_update_required = true;
2109         }
2110
2111         return is_update_required;
2112 }
2113
2114 static int vega12_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
2115 {
2116         int tmp_result, result = 0;
2117
2118         tmp_result = vega12_disable_all_smu_features(hwmgr);
2119         PP_ASSERT_WITH_CODE((tmp_result == 0),
2120                         "Failed to disable all smu features!", result = tmp_result);
2121
2122         return result;
2123 }
2124
2125 static int vega12_power_off_asic(struct pp_hwmgr *hwmgr)
2126 {
2127         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2128         int result;
2129
2130         result = vega12_disable_dpm_tasks(hwmgr);
2131         PP_ASSERT_WITH_CODE((0 == result),
2132                         "[disable_dpm_tasks] Failed to disable DPM!",
2133                         );
2134         data->water_marks_bitmap &= ~(WaterMarksLoaded);
2135
2136         return result;
2137 }
2138
2139 #if 0
2140 static void vega12_find_min_clock_index(struct pp_hwmgr *hwmgr,
2141                 uint32_t *sclk_idx, uint32_t *mclk_idx,
2142                 uint32_t min_sclk, uint32_t min_mclk)
2143 {
2144         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2145         struct vega12_dpm_table *dpm_table = &(data->dpm_table);
2146         uint32_t i;
2147
2148         for (i = 0; i < dpm_table->gfx_table.count; i++) {
2149                 if (dpm_table->gfx_table.dpm_levels[i].enabled &&
2150                         dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
2151                         *sclk_idx = i;
2152                         break;
2153                 }
2154         }
2155
2156         for (i = 0; i < dpm_table->mem_table.count; i++) {
2157                 if (dpm_table->mem_table.dpm_levels[i].enabled &&
2158                         dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
2159                         *mclk_idx = i;
2160                         break;
2161                 }
2162         }
2163 }
2164 #endif
2165
2166 #if 0
2167 static int vega12_set_power_profile_state(struct pp_hwmgr *hwmgr,
2168                 struct amd_pp_profile *request)
2169 {
2170         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2171         uint32_t sclk_idx = ~0, mclk_idx = ~0;
2172
2173         if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
2174                 return -EINVAL;
2175
2176         vega12_find_min_clock_index(hwmgr, &sclk_idx, &mclk_idx,
2177                         request->min_sclk, request->min_mclk);
2178
2179         if (sclk_idx != ~0) {
2180                 if (!data->registry_data.sclk_dpm_key_disabled)
2181                         PP_ASSERT_WITH_CODE(
2182                                         !smum_send_msg_to_smc_with_parameter(
2183                                         hwmgr,
2184                                         PPSMC_MSG_SetSoftMinGfxclkByIndex,
2185                                         sclk_idx),
2186                                         "Failed to set soft min sclk index!",
2187                                         return -EINVAL);
2188         }
2189
2190         if (mclk_idx != ~0) {
2191                 if (!data->registry_data.mclk_dpm_key_disabled)
2192                         PP_ASSERT_WITH_CODE(
2193                                         !smum_send_msg_to_smc_with_parameter(
2194                                         hwmgr,
2195                                         PPSMC_MSG_SetSoftMinUclkByIndex,
2196                                         mclk_idx),
2197                                         "Failed to set soft min mclk index!",
2198                                         return -EINVAL);
2199         }
2200
2201         return 0;
2202 }
2203
2204 static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
2205 {
2206         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2207         struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
2208         struct vega12_single_dpm_table *golden_sclk_table =
2209                         &(data->golden_dpm_table.gfx_table);
2210         int value;
2211
2212         value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
2213                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
2214                         100 /
2215                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
2216
2217         return value;
2218 }
2219
2220 static int vega12_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
2221 {
2222         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2223         struct vega12_single_dpm_table *golden_sclk_table =
2224                         &(data->golden_dpm_table.gfx_table);
2225         struct pp_power_state *ps;
2226         struct vega12_power_state *vega12_ps;
2227
2228         ps = hwmgr->request_ps;
2229
2230         if (ps == NULL)
2231                 return -EINVAL;
2232
2233         vega12_ps = cast_phw_vega12_power_state(&ps->hardware);
2234
2235         vega12_ps->performance_levels[vega12_ps->performance_level_count - 1].gfx_clock =
2236                 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * value / 100 +
2237                 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
2238
2239         if (vega12_ps->performance_levels[vega12_ps->performance_level_count - 1].gfx_clock >
2240                         hwmgr->platform_descriptor.overdriveLimit.engineClock)
2241                 vega12_ps->performance_levels[vega12_ps->performance_level_count - 1].gfx_clock =
2242                         hwmgr->platform_descriptor.overdriveLimit.engineClock;
2243
2244         return 0;
2245 }
2246
2247 static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
2248 {
2249         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2250         struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
2251         struct vega12_single_dpm_table *golden_mclk_table =
2252                         &(data->golden_dpm_table.mem_table);
2253         int value;
2254
2255         value = (mclk_table->dpm_levels
2256                         [mclk_table->count - 1].value -
2257                         golden_mclk_table->dpm_levels
2258                         [golden_mclk_table->count - 1].value) *
2259                         100 /
2260                         golden_mclk_table->dpm_levels
2261                         [golden_mclk_table->count - 1].value;
2262
2263         return value;
2264 }
2265
2266 static int vega12_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
2267 {
2268         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2269         struct vega12_single_dpm_table *golden_mclk_table =
2270                         &(data->golden_dpm_table.mem_table);
2271         struct pp_power_state  *ps;
2272         struct vega12_power_state  *vega12_ps;
2273
2274         ps = hwmgr->request_ps;
2275
2276         if (ps == NULL)
2277                 return -EINVAL;
2278
2279         vega12_ps = cast_phw_vega12_power_state(&ps->hardware);
2280
2281         vega12_ps->performance_levels
2282         [vega12_ps->performance_level_count - 1].mem_clock =
2283                         golden_mclk_table->dpm_levels
2284                         [golden_mclk_table->count - 1].value *
2285                         value / 100 +
2286                         golden_mclk_table->dpm_levels
2287                         [golden_mclk_table->count - 1].value;
2288
2289         if (vega12_ps->performance_levels
2290                         [vega12_ps->performance_level_count - 1].mem_clock >
2291                         hwmgr->platform_descriptor.overdriveLimit.memoryClock)
2292                 vega12_ps->performance_levels
2293                 [vega12_ps->performance_level_count - 1].mem_clock =
2294                                 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
2295
2296         return 0;
2297 }
2298 #endif
2299
2300 static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
2301                                         uint32_t virtual_addr_low,
2302                                         uint32_t virtual_addr_hi,
2303                                         uint32_t mc_addr_low,
2304                                         uint32_t mc_addr_hi,
2305                                         uint32_t size)
2306 {
2307         smum_send_msg_to_smc_with_parameter(hwmgr,
2308                                         PPSMC_MSG_SetSystemVirtualDramAddrHigh,
2309                                         virtual_addr_hi);
2310         smum_send_msg_to_smc_with_parameter(hwmgr,
2311                                         PPSMC_MSG_SetSystemVirtualDramAddrLow,
2312                                         virtual_addr_low);
2313         smum_send_msg_to_smc_with_parameter(hwmgr,
2314                                         PPSMC_MSG_DramLogSetDramAddrHigh,
2315                                         mc_addr_hi);
2316
2317         smum_send_msg_to_smc_with_parameter(hwmgr,
2318                                         PPSMC_MSG_DramLogSetDramAddrLow,
2319                                         mc_addr_low);
2320
2321         smum_send_msg_to_smc_with_parameter(hwmgr,
2322                                         PPSMC_MSG_DramLogSetDramSize,
2323                                         size);
2324         return 0;
2325 }
2326
2327 static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
2328                 struct PP_TemperatureRange *thermal_data)
2329 {
2330         struct phm_ppt_v3_information *pptable_information =
2331                 (struct phm_ppt_v3_information *)hwmgr->pptable;
2332
2333         memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
2334
2335         thermal_data->max = pptable_information->us_software_shutdown_temp *
2336                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2337
2338         return 0;
2339 }
2340
2341 static int vega12_is_hardware_ctf_enabled(struct pp_hwmgr *hwmgr)
2342 {
2343         uint32_t reg;
2344
2345         reg = soc15_get_register_offset(THM_HWID, 0,
2346                         mmTHM_TCON_THERM_TRIP_BASE_IDX,
2347                         mmTHM_TCON_THERM_TRIP);
2348
2349         return (((cgs_read_register(hwmgr->device, reg) &
2350                 THM_TCON_THERM_TRIP__THERM_TP_EN_MASK) >>
2351                 THM_TCON_THERM_TRIP__THERM_TP_EN__SHIFT) == 1);
2352 }
2353
2354 static int vega12_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
2355                 const void *info)
2356 {
2357         struct cgs_irq_src_funcs *irq_src =
2358                         (struct cgs_irq_src_funcs *)info;
2359
2360         if (hwmgr->thermal_controller.ucType ==
2361                         ATOM_VEGA12_PP_THERMALCONTROLLER_VEGA12) {
2362                 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
2363                                 0xf, /* AMDGPU_IH_CLIENTID_THM */
2364                                 0, 0, irq_src[0].set, irq_src[0].handler, hwmgr),
2365                                 "Failed to register high thermal interrupt!",
2366                                 return -EINVAL);
2367                 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
2368                                 0xf, /* AMDGPU_IH_CLIENTID_THM */
2369                                 1, 0, irq_src[1].set, irq_src[1].handler, hwmgr),
2370                                 "Failed to register low thermal interrupt!",
2371                                 return -EINVAL);
2372         }
2373
2374         if (vega12_is_hardware_ctf_enabled(hwmgr))
2375                 /* Register CTF(GPIO_19) interrupt */
2376                 PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
2377                                 0x16, /* AMDGPU_IH_CLIENTID_ROM_SMUIO, */
2378                                 83, 0, irq_src[2].set, irq_src[2].handler, hwmgr),
2379                                 "Failed to register CTF thermal interrupt!",
2380                                 return -EINVAL);
2381
2382         return 0;
2383 }
2384
2385 static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
2386         .backend_init = vega12_hwmgr_backend_init,
2387         .backend_fini = vega12_hwmgr_backend_fini,
2388         .asic_setup = vega12_setup_asic_task,
2389         .dynamic_state_management_enable = vega12_enable_dpm_tasks,
2390         .dynamic_state_management_disable = vega12_disable_dpm_tasks,
2391         .get_num_of_pp_table_entries =
2392                         vega12_get_number_of_pp_table_entries,
2393         .get_power_state_size = vega12_get_power_state_size,
2394         .patch_boot_state = vega12_patch_boot_state,
2395         .apply_state_adjust_rules = vega12_apply_state_adjust_rules,
2396         .power_state_set = vega12_set_power_state_tasks,
2397         .get_sclk = vega12_dpm_get_sclk,
2398         .get_mclk = vega12_dpm_get_mclk,
2399         .notify_smc_display_config_after_ps_adjustment =
2400                         vega12_notify_smc_display_config_after_ps_adjustment,
2401         .force_dpm_level = vega12_dpm_force_dpm_level,
2402         .stop_thermal_controller = vega12_thermal_stop_thermal_controller,
2403         .get_fan_speed_info = vega12_fan_ctrl_get_fan_speed_info,
2404         .reset_fan_speed_to_default =
2405                         vega12_fan_ctrl_reset_fan_speed_to_default,
2406         .get_fan_speed_rpm = vega12_fan_ctrl_get_fan_speed_rpm,
2407         .set_fan_control_mode = vega12_set_fan_control_mode,
2408         .get_fan_control_mode = vega12_get_fan_control_mode,
2409         .read_sensor = vega12_read_sensor,
2410         .get_dal_power_level = vega12_get_dal_power_level,
2411         .get_clock_by_type_with_latency = vega12_get_clock_by_type_with_latency,
2412         .get_clock_by_type_with_voltage = vega12_get_clock_by_type_with_voltage,
2413         .set_watermarks_for_clocks_ranges = vega12_set_watermarks_for_clocks_ranges,
2414         .display_clock_voltage_request = vega12_display_clock_voltage_request,
2415         .force_clock_level = vega12_force_clock_level,
2416         .print_clock_levels = vega12_print_clock_levels,
2417         .display_config_changed = vega12_display_configuration_changed_task,
2418         .powergate_uvd = vega12_power_gate_uvd,
2419         .powergate_vce = vega12_power_gate_vce,
2420         .check_states_equal = vega12_check_states_equal,
2421         .check_smc_update_required_for_display_configuration =
2422                         vega12_check_smc_update_required_for_display_configuration,
2423         .power_off_asic = vega12_power_off_asic,
2424         .disable_smc_firmware_ctf = vega12_thermal_disable_alert,
2425 #if 0
2426         .set_power_profile_state = vega12_set_power_profile_state,
2427         .get_sclk_od = vega12_get_sclk_od,
2428         .set_sclk_od = vega12_set_sclk_od,
2429         .get_mclk_od = vega12_get_mclk_od,
2430         .set_mclk_od = vega12_set_mclk_od,
2431 #endif
2432         .notify_cac_buffer_info = vega12_notify_cac_buffer_info,
2433         .get_thermal_temperature_range = vega12_get_thermal_temperature_range,
2434         .register_internal_thermal_interrupt = vega12_register_thermal_interrupt,
2435         .start_thermal_controller = vega12_start_thermal_controller,
2436 };
2437
2438 int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
2439 {
2440         hwmgr->hwmgr_func = &vega12_hwmgr_funcs;
2441         hwmgr->pptable_func = &vega12_pptable_funcs;
2442
2443         return 0;
2444 }