drm/amd/powerplay: apply clocks adjust rules on power state change
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / amd / powerplay / hwmgr / vega12_hwmgr.c
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28
29 #include "hwmgr.h"
30 #include "amd_powerplay.h"
31 #include "vega12_smumgr.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega12_inc.h"
37 #include "pppcielanes.h"
38 #include "vega12_hwmgr.h"
39 #include "vega12_processpptables.h"
40 #include "vega12_pptable.h"
41 #include "vega12_thermal.h"
42 #include "vega12_ppsmc.h"
43 #include "pp_debug.h"
44 #include "amd_pcie_helpers.h"
45 #include "ppinterrupt.h"
46 #include "pp_overdriver.h"
47 #include "pp_thermal.h"
48
49
50 static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
51                 enum pp_clock_type type, uint32_t mask);
52 static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
53                 uint32_t *clock,
54                 PPCLK_e clock_select,
55                 bool max);
56
57 static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
58 {
59         struct vega12_hwmgr *data =
60                         (struct vega12_hwmgr *)(hwmgr->backend);
61
62         data->gfxclk_average_alpha = PPVEGA12_VEGA12GFXCLKAVERAGEALPHA_DFLT;
63         data->socclk_average_alpha = PPVEGA12_VEGA12SOCCLKAVERAGEALPHA_DFLT;
64         data->uclk_average_alpha = PPVEGA12_VEGA12UCLKCLKAVERAGEALPHA_DFLT;
65         data->gfx_activity_average_alpha = PPVEGA12_VEGA12GFXACTIVITYAVERAGEALPHA_DFLT;
66         data->lowest_uclk_reserved_for_ulv = PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT;
67
68         data->display_voltage_mode = PPVEGA12_VEGA12DISPLAYVOLTAGEMODE_DFLT;
69         data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
70         data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
71         data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
72         data->disp_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
73         data->disp_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
74         data->disp_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
75         data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
76         data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
77         data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
78         data->phy_clk_quad_eqn_a = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
79         data->phy_clk_quad_eqn_b = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
80         data->phy_clk_quad_eqn_c = PPREGKEY_VEGA12QUADRATICEQUATION_DFLT;
81
82         data->registry_data.disallowed_features = 0x0;
83         data->registry_data.od_state_in_dc_support = 0;
84         data->registry_data.skip_baco_hardware = 0;
85
86         data->registry_data.log_avfs_param = 0;
87         data->registry_data.sclk_throttle_low_notification = 1;
88         data->registry_data.force_dpm_high = 0;
89         data->registry_data.stable_pstate_sclk_dpm_percentage = 75;
90
91         data->registry_data.didt_support = 0;
92         if (data->registry_data.didt_support) {
93                 data->registry_data.didt_mode = 6;
94                 data->registry_data.sq_ramping_support = 1;
95                 data->registry_data.db_ramping_support = 0;
96                 data->registry_data.td_ramping_support = 0;
97                 data->registry_data.tcp_ramping_support = 0;
98                 data->registry_data.dbr_ramping_support = 0;
99                 data->registry_data.edc_didt_support = 1;
100                 data->registry_data.gc_didt_support = 0;
101                 data->registry_data.psm_didt_support = 0;
102         }
103
104         data->registry_data.pcie_lane_override = 0xff;
105         data->registry_data.pcie_speed_override = 0xff;
106         data->registry_data.pcie_clock_override = 0xffffffff;
107         data->registry_data.regulator_hot_gpio_support = 1;
108         data->registry_data.ac_dc_switch_gpio_support = 0;
109         data->registry_data.quick_transition_support = 0;
110         data->registry_data.zrpm_start_temp = 0xffff;
111         data->registry_data.zrpm_stop_temp = 0xffff;
112         data->registry_data.odn_feature_enable = 1;
113         data->registry_data.disable_water_mark = 0;
114         data->registry_data.disable_pp_tuning = 0;
115         data->registry_data.disable_xlpp_tuning = 0;
116         data->registry_data.disable_workload_policy = 0;
117         data->registry_data.perf_ui_tuning_profile_turbo = 0x19190F0F;
118         data->registry_data.perf_ui_tuning_profile_powerSave = 0x19191919;
119         data->registry_data.perf_ui_tuning_profile_xl = 0x00000F0A;
120         data->registry_data.force_workload_policy_mask = 0;
121         data->registry_data.disable_3d_fs_detection = 0;
122         data->registry_data.fps_support = 1;
123         data->registry_data.disable_auto_wattman = 1;
124         data->registry_data.auto_wattman_debug = 0;
125         data->registry_data.auto_wattman_sample_period = 100;
126         data->registry_data.auto_wattman_threshold = 50;
127 }
128
129 static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
130 {
131         struct vega12_hwmgr *data =
132                         (struct vega12_hwmgr *)(hwmgr->backend);
133         struct amdgpu_device *adev = hwmgr->adev;
134
135         if (data->vddci_control == VEGA12_VOLTAGE_CONTROL_NONE)
136                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
137                                 PHM_PlatformCaps_ControlVDDCI);
138
139         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
140                         PHM_PlatformCaps_TablelessHardwareInterface);
141
142         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
143                         PHM_PlatformCaps_EnableSMU7ThermalManagement);
144
145         if (adev->pg_flags & AMD_PG_SUPPORT_UVD) {
146                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
147                                 PHM_PlatformCaps_UVDPowerGating);
148                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
149                                 PHM_PlatformCaps_UVDDynamicPowerGating);
150         }
151
152         if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
153                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
154                                 PHM_PlatformCaps_VCEPowerGating);
155
156         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
157                         PHM_PlatformCaps_UnTabledHardwareInterface);
158
159         if (data->registry_data.odn_feature_enable)
160                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
161                                 PHM_PlatformCaps_ODNinACSupport);
162         else {
163                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
164                                 PHM_PlatformCaps_OD6inACSupport);
165                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
166                                 PHM_PlatformCaps_OD6PlusinACSupport);
167         }
168
169         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
170                         PHM_PlatformCaps_ActivityReporting);
171         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
172                         PHM_PlatformCaps_FanSpeedInTableIsRPM);
173
174         if (data->registry_data.od_state_in_dc_support) {
175                 if (data->registry_data.odn_feature_enable)
176                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
177                                         PHM_PlatformCaps_ODNinDCSupport);
178                 else {
179                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
180                                         PHM_PlatformCaps_OD6inDCSupport);
181                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
182                                         PHM_PlatformCaps_OD6PlusinDCSupport);
183                 }
184         }
185
186         if (data->registry_data.thermal_support
187                         && data->registry_data.fuzzy_fan_control_support
188                         && hwmgr->thermal_controller.advanceFanControlParameters.usTMax)
189                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
190                                 PHM_PlatformCaps_ODFuzzyFanControlSupport);
191
192         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
193                                 PHM_PlatformCaps_DynamicPowerManagement);
194         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
195                         PHM_PlatformCaps_SMC);
196         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
197                         PHM_PlatformCaps_ThermalPolicyDelay);
198
199         if (data->registry_data.force_dpm_high)
200                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
201                                 PHM_PlatformCaps_ExclusiveModeAlwaysHigh);
202
203         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
204                         PHM_PlatformCaps_DynamicUVDState);
205
206         if (data->registry_data.sclk_throttle_low_notification)
207                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
208                                 PHM_PlatformCaps_SclkThrottleLowNotification);
209
210         /* power tune caps */
211         /* assume disabled */
212         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
213                         PHM_PlatformCaps_PowerContainment);
214         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
215                         PHM_PlatformCaps_DiDtSupport);
216         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
217                         PHM_PlatformCaps_SQRamping);
218         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
219                         PHM_PlatformCaps_DBRamping);
220         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
221                         PHM_PlatformCaps_TDRamping);
222         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
223                         PHM_PlatformCaps_TCPRamping);
224         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
225                         PHM_PlatformCaps_DBRRamping);
226         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
227                         PHM_PlatformCaps_DiDtEDCEnable);
228         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
229                         PHM_PlatformCaps_GCEDC);
230         phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
231                         PHM_PlatformCaps_PSM);
232
233         if (data->registry_data.didt_support) {
234                 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
235                 if (data->registry_data.sq_ramping_support)
236                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
237                 if (data->registry_data.db_ramping_support)
238                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
239                 if (data->registry_data.td_ramping_support)
240                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
241                 if (data->registry_data.tcp_ramping_support)
242                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
243                 if (data->registry_data.dbr_ramping_support)
244                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
245                 if (data->registry_data.edc_didt_support)
246                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
247                 if (data->registry_data.gc_didt_support)
248                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
249                 if (data->registry_data.psm_didt_support)
250                         phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
251         }
252
253         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
254                         PHM_PlatformCaps_RegulatorHot);
255
256         if (data->registry_data.ac_dc_switch_gpio_support) {
257                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
258                                 PHM_PlatformCaps_AutomaticDCTransition);
259                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
260                                 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
261         }
262
263         if (data->registry_data.quick_transition_support) {
264                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
265                                 PHM_PlatformCaps_AutomaticDCTransition);
266                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
267                                 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
268                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
269                                 PHM_PlatformCaps_Falcon_QuickTransition);
270         }
271
272         if (data->lowest_uclk_reserved_for_ulv != PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT) {
273                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
274                                 PHM_PlatformCaps_LowestUclkReservedForUlv);
275                 if (data->lowest_uclk_reserved_for_ulv == 1)
276                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
277                                         PHM_PlatformCaps_LowestUclkReservedForUlv);
278         }
279
280         if (data->registry_data.custom_fan_support)
281                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
282                                 PHM_PlatformCaps_CustomFanControlSupport);
283
284         return 0;
285 }
286
287 static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
288 {
289         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
290         int i;
291
292         data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
293                         FEATURE_DPM_PREFETCHER_BIT;
294         data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
295                         FEATURE_DPM_GFXCLK_BIT;
296         data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
297                         FEATURE_DPM_UCLK_BIT;
298         data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
299                         FEATURE_DPM_SOCCLK_BIT;
300         data->smu_features[GNLD_DPM_UVD].smu_feature_id =
301                         FEATURE_DPM_UVD_BIT;
302         data->smu_features[GNLD_DPM_VCE].smu_feature_id =
303                         FEATURE_DPM_VCE_BIT;
304         data->smu_features[GNLD_ULV].smu_feature_id =
305                         FEATURE_ULV_BIT;
306         data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
307                         FEATURE_DPM_MP0CLK_BIT;
308         data->smu_features[GNLD_DPM_LINK].smu_feature_id =
309                         FEATURE_DPM_LINK_BIT;
310         data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
311                         FEATURE_DPM_DCEFCLK_BIT;
312         data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
313                         FEATURE_DS_GFXCLK_BIT;
314         data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
315                         FEATURE_DS_SOCCLK_BIT;
316         data->smu_features[GNLD_DS_LCLK].smu_feature_id =
317                         FEATURE_DS_LCLK_BIT;
318         data->smu_features[GNLD_PPT].smu_feature_id =
319                         FEATURE_PPT_BIT;
320         data->smu_features[GNLD_TDC].smu_feature_id =
321                         FEATURE_TDC_BIT;
322         data->smu_features[GNLD_THERMAL].smu_feature_id =
323                         FEATURE_THERMAL_BIT;
324         data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
325                         FEATURE_GFX_PER_CU_CG_BIT;
326         data->smu_features[GNLD_RM].smu_feature_id =
327                         FEATURE_RM_BIT;
328         data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
329                         FEATURE_DS_DCEFCLK_BIT;
330         data->smu_features[GNLD_ACDC].smu_feature_id =
331                         FEATURE_ACDC_BIT;
332         data->smu_features[GNLD_VR0HOT].smu_feature_id =
333                         FEATURE_VR0HOT_BIT;
334         data->smu_features[GNLD_VR1HOT].smu_feature_id =
335                         FEATURE_VR1HOT_BIT;
336         data->smu_features[GNLD_FW_CTF].smu_feature_id =
337                         FEATURE_FW_CTF_BIT;
338         data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
339                         FEATURE_LED_DISPLAY_BIT;
340         data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
341                         FEATURE_FAN_CONTROL_BIT;
342         data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
343         data->smu_features[GNLD_GFXOFF].smu_feature_id = FEATURE_GFXOFF_BIT;
344         data->smu_features[GNLD_CG].smu_feature_id = FEATURE_CG_BIT;
345         data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
346
347         for (i = 0; i < GNLD_FEATURES_MAX; i++) {
348                 data->smu_features[i].smu_feature_bitmap =
349                         (uint64_t)(1ULL << data->smu_features[i].smu_feature_id);
350                 data->smu_features[i].allowed =
351                         ((data->registry_data.disallowed_features >> i) & 1) ?
352                         false : true;
353         }
354 }
355
356 static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
357 {
358         return 0;
359 }
360
361 static int vega12_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
362 {
363         kfree(hwmgr->backend);
364         hwmgr->backend = NULL;
365
366         return 0;
367 }
368
369 static int vega12_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
370 {
371         int result = 0;
372         struct vega12_hwmgr *data;
373         struct amdgpu_device *adev = hwmgr->adev;
374
375         data = kzalloc(sizeof(struct vega12_hwmgr), GFP_KERNEL);
376         if (data == NULL)
377                 return -ENOMEM;
378
379         hwmgr->backend = data;
380
381         vega12_set_default_registry_data(hwmgr);
382
383         data->disable_dpm_mask = 0xff;
384         data->workload_mask = 0xff;
385
386         /* need to set voltage control types before EVV patching */
387         data->vddc_control = VEGA12_VOLTAGE_CONTROL_NONE;
388         data->mvdd_control = VEGA12_VOLTAGE_CONTROL_NONE;
389         data->vddci_control = VEGA12_VOLTAGE_CONTROL_NONE;
390
391         data->water_marks_bitmap = 0;
392         data->avfs_exist = false;
393
394         vega12_set_features_platform_caps(hwmgr);
395
396         vega12_init_dpm_defaults(hwmgr);
397
398         /* Parse pptable data read from VBIOS */
399         vega12_set_private_data_based_on_pptable(hwmgr);
400
401         data->is_tlu_enabled = false;
402
403         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
404                         VEGA12_MAX_HARDWARE_POWERLEVELS;
405         hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
406         hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
407
408         hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
409         /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
410         hwmgr->platform_descriptor.clockStep.engineClock = 500;
411         hwmgr->platform_descriptor.clockStep.memoryClock = 500;
412
413         data->total_active_cus = adev->gfx.cu_info.number;
414         /* Setup default Overdrive Fan control settings */
415         data->odn_fan_table.target_fan_speed =
416                         hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
417         data->odn_fan_table.target_temperature =
418                         hwmgr->thermal_controller.advanceFanControlParameters.ucTargetTemperature;
419         data->odn_fan_table.min_performance_clock =
420                         hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit;
421         data->odn_fan_table.min_fan_limit =
422                         hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit *
423                         hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
424
425         return result;
426 }
427
428 static int vega12_init_sclk_threshold(struct pp_hwmgr *hwmgr)
429 {
430         struct vega12_hwmgr *data =
431                         (struct vega12_hwmgr *)(hwmgr->backend);
432
433         data->low_sclk_interrupt_threshold = 0;
434
435         return 0;
436 }
437
438 static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
439 {
440         PP_ASSERT_WITH_CODE(!vega12_init_sclk_threshold(hwmgr),
441                         "Failed to init sclk threshold!",
442                         return -EINVAL);
443
444         return 0;
445 }
446
447 /*
448  * @fn vega12_init_dpm_state
449  * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
450  *
451  * @param    dpm_state - the address of the DPM Table to initiailize.
452  * @return   None.
453  */
454 static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
455 {
456         dpm_state->soft_min_level = 0x0;
457         dpm_state->soft_max_level = 0xffff;
458         dpm_state->hard_min_level = 0x0;
459         dpm_state->hard_max_level = 0xffff;
460 }
461
462 static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
463                 PPCLK_e clk_id, uint32_t *num_of_levels)
464 {
465         int ret = 0;
466
467         ret = smum_send_msg_to_smc_with_parameter(hwmgr,
468                         PPSMC_MSG_GetDpmFreqByIndex,
469                         (clk_id << 16 | 0xFF));
470         PP_ASSERT_WITH_CODE(!ret,
471                         "[GetNumOfDpmLevel] failed to get dpm levels!",
472                         return ret);
473
474         vega12_read_arg_from_smc(hwmgr, num_of_levels);
475         PP_ASSERT_WITH_CODE(*num_of_levels > 0,
476                         "[GetNumOfDpmLevel] number of clk levels is invalid!",
477                         return -EINVAL);
478
479         return ret;
480 }
481
482 static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
483                 PPCLK_e clkID, uint32_t index, uint32_t *clock)
484 {
485         int result;
486
487         /*
488          *SMU expects the Clock ID to be in the top 16 bits.
489          *Lower 16 bits specify the level
490          */
491         PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
492                 PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index)) == 0,
493                 "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
494                 return -EINVAL);
495
496         result = vega12_read_arg_from_smc(hwmgr, clock);
497
498         PP_ASSERT_WITH_CODE(*clock != 0,
499                 "[GetDPMFrequencyByIndex] Failed to get dpm frequency by index.!",
500                 return -EINVAL);
501
502         return result;
503 }
504
505 static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
506                 struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id)
507 {
508         int ret = 0;
509         uint32_t i, num_of_levels, clk;
510
511         ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
512         PP_ASSERT_WITH_CODE(!ret,
513                         "[SetupSingleDpmTable] failed to get clk levels!",
514                         return ret);
515
516         dpm_table->count = num_of_levels;
517
518         for (i = 0; i < num_of_levels; i++) {
519                 ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
520                 PP_ASSERT_WITH_CODE(!ret,
521                         "[SetupSingleDpmTable] failed to get clk of specific level!",
522                         return ret);
523                 dpm_table->dpm_levels[i].value = clk;
524                 dpm_table->dpm_levels[i].enabled = true;
525         }
526
527         return ret;
528 }
529
530 /*
531  * This function is to initialize all DPM state tables
532  * for SMU based on the dependency table.
533  * Dynamic state patching function will then trim these
534  * state tables to the allowed range based
535  * on the power policy or external client requests,
536  * such as UVD request, etc.
537  */
538 static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
539 {
540
541         struct vega12_hwmgr *data =
542                         (struct vega12_hwmgr *)(hwmgr->backend);
543         struct vega12_single_dpm_table *dpm_table;
544         int ret = 0;
545
546         memset(&data->dpm_table, 0, sizeof(data->dpm_table));
547
548         /* socclk */
549         dpm_table = &(data->dpm_table.soc_table);
550         if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
551                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
552                 PP_ASSERT_WITH_CODE(!ret,
553                                 "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
554                                 return ret);
555         } else {
556                 dpm_table->count = 1;
557                 dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
558         }
559         vega12_init_dpm_state(&(dpm_table->dpm_state));
560
561         /* gfxclk */
562         dpm_table = &(data->dpm_table.gfx_table);
563         if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
564                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
565                 PP_ASSERT_WITH_CODE(!ret,
566                                 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
567                                 return ret);
568         } else {
569                 dpm_table->count = 1;
570                 dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
571         }
572         vega12_init_dpm_state(&(dpm_table->dpm_state));
573
574         /* memclk */
575         dpm_table = &(data->dpm_table.mem_table);
576         if (data->smu_features[GNLD_DPM_UCLK].enabled) {
577                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
578                 PP_ASSERT_WITH_CODE(!ret,
579                                 "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
580                                 return ret);
581         } else {
582                 dpm_table->count = 1;
583                 dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
584         }
585         vega12_init_dpm_state(&(dpm_table->dpm_state));
586
587         /* eclk */
588         dpm_table = &(data->dpm_table.eclk_table);
589         if (data->smu_features[GNLD_DPM_VCE].enabled) {
590                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
591                 PP_ASSERT_WITH_CODE(!ret,
592                                 "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
593                                 return ret);
594         } else {
595                 dpm_table->count = 1;
596                 dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
597         }
598         vega12_init_dpm_state(&(dpm_table->dpm_state));
599
600         /* vclk */
601         dpm_table = &(data->dpm_table.vclk_table);
602         if (data->smu_features[GNLD_DPM_UVD].enabled) {
603                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
604                 PP_ASSERT_WITH_CODE(!ret,
605                                 "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
606                                 return ret);
607         } else {
608                 dpm_table->count = 1;
609                 dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
610         }
611         vega12_init_dpm_state(&(dpm_table->dpm_state));
612
613         /* dclk */
614         dpm_table = &(data->dpm_table.dclk_table);
615         if (data->smu_features[GNLD_DPM_UVD].enabled) {
616                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
617                 PP_ASSERT_WITH_CODE(!ret,
618                                 "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
619                                 return ret);
620         } else {
621                 dpm_table->count = 1;
622                 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
623         }
624         vega12_init_dpm_state(&(dpm_table->dpm_state));
625
626         /* dcefclk */
627         dpm_table = &(data->dpm_table.dcef_table);
628         if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
629                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
630                 PP_ASSERT_WITH_CODE(!ret,
631                                 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
632                                 return ret);
633         } else {
634                 dpm_table->count = 1;
635                 dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
636         }
637         vega12_init_dpm_state(&(dpm_table->dpm_state));
638
639         /* pixclk */
640         dpm_table = &(data->dpm_table.pixel_table);
641         if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
642                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
643                 PP_ASSERT_WITH_CODE(!ret,
644                                 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
645                                 return ret);
646         } else
647                 dpm_table->count = 0;
648         vega12_init_dpm_state(&(dpm_table->dpm_state));
649
650         /* dispclk */
651         dpm_table = &(data->dpm_table.display_table);
652         if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
653                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
654                 PP_ASSERT_WITH_CODE(!ret,
655                                 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
656                                 return ret);
657         } else
658                 dpm_table->count = 0;
659         vega12_init_dpm_state(&(dpm_table->dpm_state));
660
661         /* phyclk */
662         dpm_table = &(data->dpm_table.phy_table);
663         if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
664                 ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
665                 PP_ASSERT_WITH_CODE(!ret,
666                                 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
667                                 return ret);
668         } else
669                 dpm_table->count = 0;
670         vega12_init_dpm_state(&(dpm_table->dpm_state));
671
672         /* save a copy of the default DPM table */
673         memcpy(&(data->golden_dpm_table), &(data->dpm_table),
674                         sizeof(struct vega12_dpm_table));
675
676         return 0;
677 }
678
679 #if 0
680 static int vega12_save_default_power_profile(struct pp_hwmgr *hwmgr)
681 {
682         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
683         struct vega12_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
684         uint32_t min_level;
685
686         hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
687         hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
688
689         /* Optimize compute power profile: Use only highest
690          * 2 power levels (if more than 2 are available)
691          */
692         if (dpm_table->count > 2)
693                 min_level = dpm_table->count - 2;
694         else if (dpm_table->count == 2)
695                 min_level = 1;
696         else
697                 min_level = 0;
698
699         hwmgr->default_compute_power_profile.min_sclk =
700                         dpm_table->dpm_levels[min_level].value;
701
702         hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
703         hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
704
705         return 0;
706 }
707 #endif
708
709 /**
710 * Initializes the SMC table and uploads it
711 *
712 * @param    hwmgr  the address of the powerplay hardware manager.
713 * @param    pInput  the pointer to input data (PowerState)
714 * @return   always 0
715 */
716 static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
717 {
718         int result;
719         struct vega12_hwmgr *data =
720                         (struct vega12_hwmgr *)(hwmgr->backend);
721         PPTable_t *pp_table = &(data->smc_state_table.pp_table);
722         struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
723         struct phm_ppt_v3_information *pptable_information =
724                 (struct phm_ppt_v3_information *)hwmgr->pptable;
725
726         result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
727         if (!result) {
728                 data->vbios_boot_state.vddc     = boot_up_values.usVddc;
729                 data->vbios_boot_state.vddci    = boot_up_values.usVddci;
730                 data->vbios_boot_state.mvddc    = boot_up_values.usMvddc;
731                 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
732                 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
733                 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
734                 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
735                 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
736                 data->vbios_boot_state.eclock = boot_up_values.ulEClk;
737                 data->vbios_boot_state.dclock = boot_up_values.ulDClk;
738                 data->vbios_boot_state.vclock = boot_up_values.ulVClk;
739                 smum_send_msg_to_smc_with_parameter(hwmgr,
740                                 PPSMC_MSG_SetMinDeepSleepDcefclk,
741                         (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
742         }
743
744         memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
745
746         result = vega12_copy_table_to_smc(hwmgr,
747                         (uint8_t *)pp_table, TABLE_PPTABLE);
748         PP_ASSERT_WITH_CODE(!result,
749                         "Failed to upload PPtable!", return result);
750
751         return 0;
752 }
753
754 static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
755 {
756         struct vega12_hwmgr *data =
757                         (struct vega12_hwmgr *)(hwmgr->backend);
758         int i;
759         uint32_t allowed_features_low = 0, allowed_features_high = 0;
760
761         for (i = 0; i < GNLD_FEATURES_MAX; i++)
762                 if (data->smu_features[i].allowed)
763                         data->smu_features[i].smu_feature_id > 31 ?
764                                 (allowed_features_high |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_HIGH_SHIFT) & 0xFFFFFFFF)) :
765                                 (allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF));
766
767         PP_ASSERT_WITH_CODE(
768                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high) == 0,
769                 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
770                 return -1);
771
772         PP_ASSERT_WITH_CODE(
773                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low) == 0,
774                 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
775                 return -1);
776
777         return 0;
778 }
779
780 static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr)
781 {
782         struct vega12_hwmgr *data =
783                         (struct vega12_hwmgr *)(hwmgr->backend);
784
785         data->uvd_power_gated = true;
786         data->vce_power_gated = true;
787
788         if (data->smu_features[GNLD_DPM_UVD].enabled)
789                 data->uvd_power_gated = false;
790
791         if (data->smu_features[GNLD_DPM_VCE].enabled)
792                 data->vce_power_gated = false;
793 }
794
795 static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
796 {
797         struct vega12_hwmgr *data =
798                         (struct vega12_hwmgr *)(hwmgr->backend);
799         uint64_t features_enabled;
800         int i;
801         bool enabled;
802
803         PP_ASSERT_WITH_CODE(
804                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures) == 0,
805                 "[EnableAllSMUFeatures] Failed to enable all smu features!",
806                 return -1);
807
808         if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
809                 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
810                         enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
811                         data->smu_features[i].enabled = enabled;
812                         data->smu_features[i].supported = enabled;
813                         PP_ASSERT(
814                                 !data->smu_features[i].allowed || enabled,
815                                 "[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!");
816                 }
817         }
818
819         vega12_init_powergate_state(hwmgr);
820
821         return 0;
822 }
823
824 static int vega12_disable_all_smu_features(struct pp_hwmgr *hwmgr)
825 {
826         struct vega12_hwmgr *data =
827                         (struct vega12_hwmgr *)(hwmgr->backend);
828         uint64_t features_enabled;
829         int i;
830         bool enabled;
831
832         PP_ASSERT_WITH_CODE(
833                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures) == 0,
834                 "[DisableAllSMUFeatures] Failed to disable all smu features!",
835                 return -1);
836
837         if (vega12_get_enabled_smc_features(hwmgr, &features_enabled) == 0) {
838                 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
839                         enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
840                         data->smu_features[i].enabled = enabled;
841                         data->smu_features[i].supported = enabled;
842                 }
843         }
844
845         return 0;
846 }
847
848 static int vega12_odn_initialize_default_settings(
849                 struct pp_hwmgr *hwmgr)
850 {
851         return 0;
852 }
853
854 static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
855                 uint32_t adjust_percent)
856 {
857         return smum_send_msg_to_smc_with_parameter(hwmgr,
858                         PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
859 }
860
861 static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
862 {
863         int adjust_percent, result = 0;
864
865         if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
866                 adjust_percent =
867                                 hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
868                                 hwmgr->platform_descriptor.TDPAdjustment :
869                                 (-1 * hwmgr->platform_descriptor.TDPAdjustment);
870                 result = vega12_set_overdrive_target_percentage(hwmgr,
871                                 (uint32_t)adjust_percent);
872         }
873         return result;
874 }
875
876 static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
877                 PPCLK_e clkid, struct vega12_clock_range *clock)
878 {
879         /* AC Max */
880         PP_ASSERT_WITH_CODE(
881                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
882                 "[GetClockRanges] Failed to get max ac clock from SMC!",
883                 return -EINVAL);
884         vega12_read_arg_from_smc(hwmgr, &(clock->ACMax));
885
886         /* AC Min */
887         PP_ASSERT_WITH_CODE(
888                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
889                 "[GetClockRanges] Failed to get min ac clock from SMC!",
890                 return -EINVAL);
891         vega12_read_arg_from_smc(hwmgr, &(clock->ACMin));
892
893         /* DC Max */
894         PP_ASSERT_WITH_CODE(
895                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
896                 "[GetClockRanges] Failed to get max dc clock from SMC!",
897                 return -EINVAL);
898         vega12_read_arg_from_smc(hwmgr, &(clock->DCMax));
899
900         return 0;
901 }
902
903 static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
904 {
905         struct vega12_hwmgr *data =
906                         (struct vega12_hwmgr *)(hwmgr->backend);
907         uint32_t i;
908
909         for (i = 0; i < PPCLK_COUNT; i++)
910                 PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr,
911                                         i, &(data->clk_range[i])),
912                                 "Failed to get clk range from SMC!",
913                                 return -EINVAL);
914
915         return 0;
916 }
917
918 static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
919 {
920         int tmp_result, result = 0;
921
922         smum_send_msg_to_smc_with_parameter(hwmgr,
923                         PPSMC_MSG_NumOfDisplays, 0);
924
925         result = vega12_set_allowed_featuresmask(hwmgr);
926         PP_ASSERT_WITH_CODE(result == 0,
927                         "[EnableDPMTasks] Failed to set allowed featuresmask!\n",
928                         return result);
929
930         tmp_result = vega12_init_smc_table(hwmgr);
931         PP_ASSERT_WITH_CODE(!tmp_result,
932                         "Failed to initialize SMC table!",
933                         result = tmp_result);
934
935         result = vega12_enable_all_smu_features(hwmgr);
936         PP_ASSERT_WITH_CODE(!result,
937                         "Failed to enable all smu features!",
938                         return result);
939
940         tmp_result = vega12_power_control_set_level(hwmgr);
941         PP_ASSERT_WITH_CODE(!tmp_result,
942                         "Failed to power control set level!",
943                         result = tmp_result);
944
945         result = vega12_get_all_clock_ranges(hwmgr);
946         PP_ASSERT_WITH_CODE(!result,
947                         "Failed to get all clock ranges!",
948                         return result);
949
950         result = vega12_odn_initialize_default_settings(hwmgr);
951         PP_ASSERT_WITH_CODE(!result,
952                         "Failed to power control set level!",
953                         return result);
954
955         result = vega12_setup_default_dpm_tables(hwmgr);
956         PP_ASSERT_WITH_CODE(!result,
957                         "Failed to setup default DPM tables!",
958                         return result);
959         return result;
960 }
961
962 static int vega12_patch_boot_state(struct pp_hwmgr *hwmgr,
963              struct pp_hw_power_state *hw_ps)
964 {
965         return 0;
966 }
967
968 static uint32_t vega12_find_lowest_dpm_level(
969                 struct vega12_single_dpm_table *table)
970 {
971         uint32_t i;
972
973         for (i = 0; i < table->count; i++) {
974                 if (table->dpm_levels[i].enabled)
975                         break;
976         }
977
978         if (i >= table->count) {
979                 i = 0;
980                 table->dpm_levels[i].enabled = true;
981         }
982
983         return i;
984 }
985
986 static uint32_t vega12_find_highest_dpm_level(
987                 struct vega12_single_dpm_table *table)
988 {
989         int32_t i = 0;
990         PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
991                         "[FindHighestDPMLevel] DPM Table has too many entries!",
992                         return MAX_REGULAR_DPM_NUMBER - 1);
993
994         for (i = table->count - 1; i >= 0; i--) {
995                 if (table->dpm_levels[i].enabled)
996                         break;
997         }
998
999         if (i < 0) {
1000                 i = 0;
1001                 table->dpm_levels[i].enabled = true;
1002         }
1003
1004         return (uint32_t)i;
1005 }
1006
1007 static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
1008 {
1009         struct vega12_hwmgr *data = hwmgr->backend;
1010         uint32_t min_freq;
1011         int ret = 0;
1012
1013         if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1014                 min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
1015                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1016                                         hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1017                                         (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
1018                                         "Failed to set soft min gfxclk !",
1019                                         return ret);
1020         }
1021
1022         if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1023                 min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
1024                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1025                                         hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1026                                         (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1027                                         "Failed to set soft min memclk !",
1028                                         return ret);
1029
1030                 min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
1031                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1032                                         hwmgr, PPSMC_MSG_SetHardMinByFreq,
1033                                         (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
1034                                         "Failed to set hard min memclk !",
1035                                         return ret);
1036         }
1037
1038         if (data->smu_features[GNLD_DPM_UVD].enabled) {
1039                 min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
1040
1041                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1042                                         hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1043                                         (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
1044                                         "Failed to set soft min vclk!",
1045                                         return ret);
1046
1047                 min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
1048
1049                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1050                                         hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1051                                         (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
1052                                         "Failed to set soft min dclk!",
1053                                         return ret);
1054         }
1055
1056         if (data->smu_features[GNLD_DPM_VCE].enabled) {
1057                 min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
1058
1059                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1060                                         hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1061                                         (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
1062                                         "Failed to set soft min eclk!",
1063                                         return ret);
1064         }
1065
1066         if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
1067                 min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
1068
1069                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1070                                         hwmgr, PPSMC_MSG_SetSoftMinByFreq,
1071                                         (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
1072                                         "Failed to set soft min socclk!",
1073                                         return ret);
1074         }
1075
1076         return ret;
1077
1078 }
1079
1080 static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
1081 {
1082         struct vega12_hwmgr *data = hwmgr->backend;
1083         uint32_t max_freq;
1084         int ret = 0;
1085
1086         if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
1087                 max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
1088
1089                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1090                                         hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1091                                         (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
1092                                         "Failed to set soft max gfxclk!",
1093                                         return ret);
1094         }
1095
1096         if (data->smu_features[GNLD_DPM_UCLK].enabled) {
1097                 max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
1098
1099                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1100                                         hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1101                                         (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
1102                                         "Failed to set soft max memclk!",
1103                                         return ret);
1104         }
1105
1106         if (data->smu_features[GNLD_DPM_UVD].enabled) {
1107                 max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
1108
1109                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1110                                         hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1111                                         (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
1112                                         "Failed to set soft max vclk!",
1113                                         return ret);
1114
1115                 max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
1116                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1117                                         hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1118                                         (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
1119                                         "Failed to set soft max dclk!",
1120                                         return ret);
1121         }
1122
1123         if (data->smu_features[GNLD_DPM_VCE].enabled) {
1124                 max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
1125
1126                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1127                                         hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1128                                         (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
1129                                         "Failed to set soft max eclk!",
1130                                         return ret);
1131         }
1132
1133         if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
1134                 max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
1135
1136                 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
1137                                         hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
1138                                         (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
1139                                         "Failed to set soft max socclk!",
1140                                         return ret);
1141         }
1142
1143         return ret;
1144 }
1145
1146 int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
1147 {
1148         struct vega12_hwmgr *data =
1149                         (struct vega12_hwmgr *)(hwmgr->backend);
1150
1151         if (data->smu_features[GNLD_DPM_VCE].supported) {
1152                 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
1153                                 enable,
1154                                 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
1155                                 "Attempt to Enable/Disable DPM VCE Failed!",
1156                                 return -1);
1157                 data->smu_features[GNLD_DPM_VCE].enabled = enable;
1158         }
1159
1160         return 0;
1161 }
1162
1163 static uint32_t vega12_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1164 {
1165         struct vega12_hwmgr *data =
1166                         (struct vega12_hwmgr *)(hwmgr->backend);
1167         uint32_t gfx_clk;
1168
1169         if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
1170                 return -1;
1171
1172         if (low)
1173                 PP_ASSERT_WITH_CODE(
1174                         vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, false) == 0,
1175                         "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
1176                         return -1);
1177         else
1178                 PP_ASSERT_WITH_CODE(
1179                         vega12_get_clock_ranges(hwmgr, &gfx_clk, PPCLK_GFXCLK, true) == 0,
1180                         "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
1181                         return -1);
1182
1183         return (gfx_clk * 100);
1184 }
1185
1186 static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1187 {
1188         struct vega12_hwmgr *data =
1189                         (struct vega12_hwmgr *)(hwmgr->backend);
1190         uint32_t mem_clk;
1191
1192         if (!data->smu_features[GNLD_DPM_UCLK].enabled)
1193                 return -1;
1194
1195         if (low)
1196                 PP_ASSERT_WITH_CODE(
1197                         vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, false) == 0,
1198                         "[GetMclks]: fail to get min PPCLK_UCLK\n",
1199                         return -1);
1200         else
1201                 PP_ASSERT_WITH_CODE(
1202                         vega12_get_clock_ranges(hwmgr, &mem_clk, PPCLK_UCLK, true) == 0,
1203                         "[GetMclks]: fail to get max PPCLK_UCLK\n",
1204                         return -1);
1205
1206         return (mem_clk * 100);
1207 }
1208
1209 static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query)
1210 {
1211 #if 0
1212         uint32_t value;
1213
1214         PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
1215                         PPSMC_MSG_GetCurrPkgPwr),
1216                         "Failed to get current package power!",
1217                         return -EINVAL);
1218
1219         vega12_read_arg_from_smc(hwmgr, &value);
1220         /* power value is an integer */
1221         *query = value << 8;
1222 #endif
1223         return 0;
1224 }
1225
1226 static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq)
1227 {
1228         uint32_t gfx_clk = 0;
1229
1230         *gfx_freq = 0;
1231
1232         PP_ASSERT_WITH_CODE(
1233                         smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
1234                         "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
1235                         return -1);
1236         PP_ASSERT_WITH_CODE(
1237                         vega12_read_arg_from_smc(hwmgr, &gfx_clk) == 0,
1238                         "[GetCurrentGfxClkFreq] Attempt to read arg from SMC Failed",
1239                         return -1);
1240
1241         *gfx_freq = gfx_clk * 100;
1242
1243         return 0;
1244 }
1245
1246 static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq)
1247 {
1248         uint32_t mem_clk = 0;
1249
1250         *mclk_freq = 0;
1251
1252         PP_ASSERT_WITH_CODE(
1253                         smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
1254                         "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
1255                         return -1);
1256         PP_ASSERT_WITH_CODE(
1257                         vega12_read_arg_from_smc(hwmgr, &mem_clk) == 0,
1258                         "[GetCurrentMClkFreq] Attempt to read arg from SMC Failed",
1259                         return -1);
1260
1261         *mclk_freq = mem_clk * 100;
1262
1263         return 0;
1264 }
1265
1266 static int vega12_get_current_activity_percent(
1267                 struct pp_hwmgr *hwmgr,
1268                 uint32_t *activity_percent)
1269 {
1270         int ret = 0;
1271         uint32_t current_activity = 50;
1272
1273 #if 0
1274         ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
1275         if (!ret) {
1276                 ret = vega12_read_arg_from_smc(hwmgr, &current_activity);
1277                 if (!ret) {
1278                         if (current_activity > 100) {
1279                                 PP_ASSERT(false,
1280                                         "[GetCurrentActivityPercent] Activity Percentage Exceeds 100!");
1281                                 current_activity = 100;
1282                         }
1283                 } else
1284                         PP_ASSERT(false,
1285                                 "[GetCurrentActivityPercent] Attempt To Read Average Graphics Activity from SMU Failed!");
1286         } else
1287                 PP_ASSERT(false,
1288                         "[GetCurrentActivityPercent] Attempt To Send Get Average Graphics Activity to SMU Failed!");
1289 #endif
1290         *activity_percent = current_activity;
1291
1292         return ret;
1293 }
1294
1295 static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1296                               void *value, int *size)
1297 {
1298         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1299         int ret = 0;
1300
1301         switch (idx) {
1302         case AMDGPU_PP_SENSOR_GFX_SCLK:
1303                 ret = vega12_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value);
1304                 if (!ret)
1305                         *size = 4;
1306                 break;
1307         case AMDGPU_PP_SENSOR_GFX_MCLK:
1308                 ret = vega12_get_current_mclk_freq(hwmgr, (uint32_t *)value);
1309                 if (!ret)
1310                         *size = 4;
1311                 break;
1312         case AMDGPU_PP_SENSOR_GPU_LOAD:
1313                 ret = vega12_get_current_activity_percent(hwmgr, (uint32_t *)value);
1314                 if (!ret)
1315                         *size = 4;
1316                 break;
1317         case AMDGPU_PP_SENSOR_GPU_TEMP:
1318                 *((uint32_t *)value) = vega12_thermal_get_temperature(hwmgr);
1319                 *size = 4;
1320                 break;
1321         case AMDGPU_PP_SENSOR_UVD_POWER:
1322                 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
1323                 *size = 4;
1324                 break;
1325         case AMDGPU_PP_SENSOR_VCE_POWER:
1326                 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
1327                 *size = 4;
1328                 break;
1329         case AMDGPU_PP_SENSOR_GPU_POWER:
1330                 ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value);
1331
1332                 break;
1333         default:
1334                 ret = -EINVAL;
1335                 break;
1336         }
1337         return ret;
1338 }
1339
1340 static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
1341                 bool has_disp)
1342 {
1343         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1344
1345         if (data->smu_features[GNLD_DPM_UCLK].enabled)
1346                 return smum_send_msg_to_smc_with_parameter(hwmgr,
1347                         PPSMC_MSG_SetUclkFastSwitch,
1348                         has_disp ? 0 : 1);
1349
1350         return 0;
1351 }
1352
1353 int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
1354                 struct pp_display_clock_request *clock_req)
1355 {
1356         int result = 0;
1357         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1358         enum amd_pp_clock_type clk_type = clock_req->clock_type;
1359         uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1360         PPCLK_e clk_select = 0;
1361         uint32_t clk_request = 0;
1362
1363         if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
1364                 switch (clk_type) {
1365                 case amd_pp_dcef_clock:
1366                         clk_freq = clock_req->clock_freq_in_khz / 100;
1367                         clk_select = PPCLK_DCEFCLK;
1368                         break;
1369                 case amd_pp_disp_clock:
1370                         clk_select = PPCLK_DISPCLK;
1371                         break;
1372                 case amd_pp_pixel_clock:
1373                         clk_select = PPCLK_PIXCLK;
1374                         break;
1375                 case amd_pp_phy_clock:
1376                         clk_select = PPCLK_PHYCLK;
1377                         break;
1378                 default:
1379                         pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
1380                         result = -1;
1381                         break;
1382                 }
1383
1384                 if (!result) {
1385                         clk_request = (clk_select << 16) | clk_freq;
1386                         result = smum_send_msg_to_smc_with_parameter(hwmgr,
1387                                         PPSMC_MSG_SetHardMinByFreq,
1388                                         clk_request);
1389                 }
1390         }
1391
1392         return result;
1393 }
1394
1395 static int vega12_notify_smc_display_config_after_ps_adjustment(
1396                 struct pp_hwmgr *hwmgr)
1397 {
1398         struct vega12_hwmgr *data =
1399                         (struct vega12_hwmgr *)(hwmgr->backend);
1400         struct PP_Clocks min_clocks = {0};
1401         struct pp_display_clock_request clock_req;
1402
1403         if ((hwmgr->display_config->num_display > 1) &&
1404                 !hwmgr->display_config->multi_monitor_in_sync)
1405                 vega12_notify_smc_display_change(hwmgr, false);
1406         else
1407                 vega12_notify_smc_display_change(hwmgr, true);
1408
1409         min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
1410         min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
1411         min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
1412
1413         if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
1414                 clock_req.clock_type = amd_pp_dcef_clock;
1415                 clock_req.clock_freq_in_khz = min_clocks.dcefClock;
1416                 if (!vega12_display_clock_voltage_request(hwmgr, &clock_req)) {
1417                         if (data->smu_features[GNLD_DS_DCEFCLK].supported)
1418                                 PP_ASSERT_WITH_CODE(
1419                                         !smum_send_msg_to_smc_with_parameter(
1420                                         hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
1421                                         min_clocks.dcefClockInSR /100),
1422                                         "Attempt to set divider for DCEFCLK Failed!",
1423                                         return -1);
1424                 } else {
1425                         pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
1426                 }
1427         }
1428
1429         return 0;
1430 }
1431
1432 static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
1433 {
1434         struct vega12_hwmgr *data =
1435                         (struct vega12_hwmgr *)(hwmgr->backend);
1436
1437         uint32_t soft_level;
1438
1439         soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
1440
1441         data->dpm_table.gfx_table.dpm_state.soft_min_level =
1442                 data->dpm_table.gfx_table.dpm_state.soft_max_level =
1443                 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
1444
1445         soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
1446
1447         data->dpm_table.mem_table.dpm_state.soft_min_level =
1448                 data->dpm_table.mem_table.dpm_state.soft_max_level =
1449                 data->dpm_table.mem_table.dpm_levels[soft_level].value;
1450
1451         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1452                         "Failed to upload boot level to highest!",
1453                         return -1);
1454
1455         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1456                         "Failed to upload dpm max level to highest!",
1457                         return -1);
1458
1459         return 0;
1460 }
1461
1462 static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1463 {
1464         struct vega12_hwmgr *data =
1465                         (struct vega12_hwmgr *)(hwmgr->backend);
1466         uint32_t soft_level;
1467
1468         soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
1469
1470         data->dpm_table.gfx_table.dpm_state.soft_min_level =
1471                 data->dpm_table.gfx_table.dpm_state.soft_max_level =
1472                 data->dpm_table.gfx_table.dpm_levels[soft_level].value;
1473
1474         soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
1475
1476         data->dpm_table.mem_table.dpm_state.soft_min_level =
1477                 data->dpm_table.mem_table.dpm_state.soft_max_level =
1478                 data->dpm_table.mem_table.dpm_levels[soft_level].value;
1479
1480         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1481                         "Failed to upload boot level to highest!",
1482                         return -1);
1483
1484         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1485                         "Failed to upload dpm max level to highest!",
1486                         return -1);
1487
1488         return 0;
1489
1490 }
1491
1492 static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1493 {
1494         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
1495                         "Failed to upload DPM Bootup Levels!",
1496                         return -1);
1497
1498         PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
1499                         "Failed to upload DPM Max Levels!",
1500                         return -1);
1501
1502         return 0;
1503 }
1504
1505 static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
1506                                 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
1507 {
1508         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1509         struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
1510         struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
1511         struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
1512
1513         *sclk_mask = 0;
1514         *mclk_mask = 0;
1515         *soc_mask  = 0;
1516
1517         if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
1518             mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL &&
1519             soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) {
1520                 *sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
1521                 *mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
1522                 *soc_mask  = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
1523         }
1524
1525         if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
1526                 *sclk_mask = 0;
1527         } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
1528                 *mclk_mask = 0;
1529         } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
1530                 *sclk_mask = gfx_dpm_table->count - 1;
1531                 *mclk_mask = mem_dpm_table->count - 1;
1532                 *soc_mask  = soc_dpm_table->count - 1;
1533         }
1534
1535         return 0;
1536 }
1537
1538 static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
1539 {
1540         switch (mode) {
1541         case AMD_FAN_CTRL_NONE:
1542                 break;
1543         case AMD_FAN_CTRL_MANUAL:
1544                 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
1545                         vega12_fan_ctrl_stop_smc_fan_control(hwmgr);
1546                 break;
1547         case AMD_FAN_CTRL_AUTO:
1548                 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
1549                         vega12_fan_ctrl_start_smc_fan_control(hwmgr);
1550                 break;
1551         default:
1552                 break;
1553         }
1554 }
1555
1556 static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1557                                 enum amd_dpm_forced_level level)
1558 {
1559         int ret = 0;
1560         uint32_t sclk_mask = 0;
1561         uint32_t mclk_mask = 0;
1562         uint32_t soc_mask = 0;
1563
1564         switch (level) {
1565         case AMD_DPM_FORCED_LEVEL_HIGH:
1566                 ret = vega12_force_dpm_highest(hwmgr);
1567                 break;
1568         case AMD_DPM_FORCED_LEVEL_LOW:
1569                 ret = vega12_force_dpm_lowest(hwmgr);
1570                 break;
1571         case AMD_DPM_FORCED_LEVEL_AUTO:
1572                 ret = vega12_unforce_dpm_levels(hwmgr);
1573                 break;
1574         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1575         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1576         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1577         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1578                 ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
1579                 if (ret)
1580                         return ret;
1581                 vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
1582                 vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
1583                 break;
1584         case AMD_DPM_FORCED_LEVEL_MANUAL:
1585         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1586         default:
1587                 break;
1588         }
1589
1590         return ret;
1591 }
1592
1593 static uint32_t vega12_get_fan_control_mode(struct pp_hwmgr *hwmgr)
1594 {
1595         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1596
1597         if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
1598                 return AMD_FAN_CTRL_MANUAL;
1599         else
1600                 return AMD_FAN_CTRL_AUTO;
1601 }
1602
1603 static int vega12_get_dal_power_level(struct pp_hwmgr *hwmgr,
1604                 struct amd_pp_simple_clock_info *info)
1605 {
1606 #if 0
1607         struct phm_ppt_v2_information *table_info =
1608                         (struct phm_ppt_v2_information *)hwmgr->pptable;
1609         struct phm_clock_and_voltage_limits *max_limits =
1610                         &table_info->max_clock_voltage_on_ac;
1611
1612         info->engine_max_clock = max_limits->sclk;
1613         info->memory_max_clock = max_limits->mclk;
1614 #endif
1615         return 0;
1616 }
1617
1618 static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
1619                 uint32_t *clock,
1620                 PPCLK_e clock_select,
1621                 bool max)
1622 {
1623         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1624
1625         if (max)
1626                 *clock = data->clk_range[clock_select].ACMax;
1627         else
1628                 *clock = data->clk_range[clock_select].ACMin;
1629
1630         return 0;
1631 }
1632
1633 static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
1634                 struct pp_clock_levels_with_latency *clocks)
1635 {
1636         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1637         uint32_t ucount;
1638         int i;
1639         struct vega12_single_dpm_table *dpm_table;
1640
1641         if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
1642                 return -1;
1643
1644         dpm_table = &(data->dpm_table.gfx_table);
1645         ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1646                 MAX_NUM_CLOCKS : dpm_table->count;
1647
1648         for (i = 0; i < ucount; i++) {
1649                 clocks->data[i].clocks_in_khz =
1650                         dpm_table->dpm_levels[i].value * 100;
1651
1652                 clocks->data[i].latency_in_us = 0;
1653         }
1654
1655         clocks->num_levels = ucount;
1656
1657         return 0;
1658 }
1659
1660 static uint32_t vega12_get_mem_latency(struct pp_hwmgr *hwmgr,
1661                 uint32_t clock)
1662 {
1663         return 25;
1664 }
1665
1666 static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
1667                 struct pp_clock_levels_with_latency *clocks)
1668 {
1669         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1670         uint32_t ucount;
1671         int i;
1672         struct vega12_single_dpm_table *dpm_table;
1673         if (!data->smu_features[GNLD_DPM_UCLK].enabled)
1674                 return -1;
1675
1676         dpm_table = &(data->dpm_table.mem_table);
1677         ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1678                 MAX_NUM_CLOCKS : dpm_table->count;
1679
1680         for (i = 0; i < ucount; i++) {
1681                 clocks->data[i].clocks_in_khz =
1682                         data->mclk_latency_table.entries[i].frequency =
1683                         dpm_table->dpm_levels[i].value * 100;
1684
1685                 clocks->data[i].latency_in_us =
1686                         data->mclk_latency_table.entries[i].latency =
1687                         vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
1688         }
1689
1690         clocks->num_levels = data->mclk_latency_table.count = ucount;
1691
1692         return 0;
1693 }
1694
1695 static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
1696                 struct pp_clock_levels_with_latency *clocks)
1697 {
1698         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1699         uint32_t ucount;
1700         int i;
1701         struct vega12_single_dpm_table *dpm_table;
1702
1703         if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
1704                 return -1;
1705
1706
1707         dpm_table = &(data->dpm_table.dcef_table);
1708         ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1709                 MAX_NUM_CLOCKS : dpm_table->count;
1710
1711         for (i = 0; i < ucount; i++) {
1712                 clocks->data[i].clocks_in_khz =
1713                         dpm_table->dpm_levels[i].value * 100;
1714
1715                 clocks->data[i].latency_in_us = 0;
1716         }
1717
1718         clocks->num_levels = ucount;
1719
1720         return 0;
1721 }
1722
1723 static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
1724                 struct pp_clock_levels_with_latency *clocks)
1725 {
1726         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1727         uint32_t ucount;
1728         int i;
1729         struct vega12_single_dpm_table *dpm_table;
1730
1731         if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
1732                 return -1;
1733
1734
1735         dpm_table = &(data->dpm_table.soc_table);
1736         ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
1737                 MAX_NUM_CLOCKS : dpm_table->count;
1738
1739         for (i = 0; i < ucount; i++) {
1740                 clocks->data[i].clocks_in_khz =
1741                         dpm_table->dpm_levels[i].value * 100;
1742
1743                 clocks->data[i].latency_in_us = 0;
1744         }
1745
1746         clocks->num_levels = ucount;
1747
1748         return 0;
1749
1750 }
1751
1752 static int vega12_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
1753                 enum amd_pp_clock_type type,
1754                 struct pp_clock_levels_with_latency *clocks)
1755 {
1756         int ret;
1757
1758         switch (type) {
1759         case amd_pp_sys_clock:
1760                 ret = vega12_get_sclks(hwmgr, clocks);
1761                 break;
1762         case amd_pp_mem_clock:
1763                 ret = vega12_get_memclocks(hwmgr, clocks);
1764                 break;
1765         case amd_pp_dcef_clock:
1766                 ret = vega12_get_dcefclocks(hwmgr, clocks);
1767                 break;
1768         case amd_pp_soc_clock:
1769                 ret = vega12_get_socclocks(hwmgr, clocks);
1770                 break;
1771         default:
1772                 return -EINVAL;
1773         }
1774
1775         return ret;
1776 }
1777
1778 static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
1779                 enum amd_pp_clock_type type,
1780                 struct pp_clock_levels_with_voltage *clocks)
1781 {
1782         clocks->num_levels = 0;
1783
1784         return 0;
1785 }
1786
1787 static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
1788                 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
1789 {
1790         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1791         Watermarks_t *table = &(data->smc_state_table.water_marks_table);
1792         int result = 0;
1793         uint32_t i;
1794
1795         if (!data->registry_data.disable_water_mark &&
1796                         data->smu_features[GNLD_DPM_DCEFCLK].supported &&
1797                         data->smu_features[GNLD_DPM_SOCCLK].supported) {
1798                 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
1799                         table->WatermarkRow[WM_DCEFCLK][i].MinClock =
1800                                 cpu_to_le16((uint16_t)
1801                                 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
1802                                 100);
1803                         table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
1804                                 cpu_to_le16((uint16_t)
1805                                 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
1806                                 100);
1807                         table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
1808                                 cpu_to_le16((uint16_t)
1809                                 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
1810                                 100);
1811                         table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
1812                                 cpu_to_le16((uint16_t)
1813                                 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
1814                                 100);
1815                         table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
1816                                         wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
1817                 }
1818
1819                 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
1820                         table->WatermarkRow[WM_SOCCLK][i].MinClock =
1821                                 cpu_to_le16((uint16_t)
1822                                 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
1823                                 100);
1824                         table->WatermarkRow[WM_SOCCLK][i].MaxClock =
1825                                 cpu_to_le16((uint16_t)
1826                                 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
1827                                 100);
1828                         table->WatermarkRow[WM_SOCCLK][i].MinUclk =
1829                                 cpu_to_le16((uint16_t)
1830                                 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
1831                                 100);
1832                         table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
1833                                 cpu_to_le16((uint16_t)
1834                                 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
1835                                 100);
1836                         table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
1837                                         wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
1838                 }
1839                 data->water_marks_bitmap |= WaterMarksExist;
1840                 data->water_marks_bitmap &= ~WaterMarksLoaded;
1841         }
1842
1843         return result;
1844 }
1845
1846 static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
1847                 enum pp_clock_type type, uint32_t mask)
1848 {
1849         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1850         uint32_t soft_min_level, soft_max_level;
1851         int ret = 0;
1852
1853         switch (type) {
1854         case PP_SCLK:
1855                 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1856                 soft_max_level = mask ? (fls(mask) - 1) : 0;
1857
1858                 data->dpm_table.gfx_table.dpm_state.soft_min_level =
1859                         data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
1860                 data->dpm_table.gfx_table.dpm_state.soft_max_level =
1861                         data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
1862
1863                 ret = vega12_upload_dpm_min_level(hwmgr);
1864                 PP_ASSERT_WITH_CODE(!ret,
1865                         "Failed to upload boot level to lowest!",
1866                         return ret);
1867
1868                 ret = vega12_upload_dpm_max_level(hwmgr);
1869                 PP_ASSERT_WITH_CODE(!ret,
1870                         "Failed to upload dpm max level to highest!",
1871                         return ret);
1872                 break;
1873
1874         case PP_MCLK:
1875                 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1876                 soft_max_level = mask ? (fls(mask) - 1) : 0;
1877
1878                 data->dpm_table.mem_table.dpm_state.soft_min_level =
1879                         data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
1880                 data->dpm_table.mem_table.dpm_state.soft_max_level =
1881                         data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
1882
1883                 ret = vega12_upload_dpm_min_level(hwmgr);
1884                 PP_ASSERT_WITH_CODE(!ret,
1885                         "Failed to upload boot level to lowest!",
1886                         return ret);
1887
1888                 ret = vega12_upload_dpm_max_level(hwmgr);
1889                 PP_ASSERT_WITH_CODE(!ret,
1890                         "Failed to upload dpm max level to highest!",
1891                         return ret);
1892
1893                 break;
1894
1895         case PP_PCIE:
1896                 break;
1897
1898         default:
1899                 break;
1900         }
1901
1902         return 0;
1903 }
1904
1905 static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
1906                 enum pp_clock_type type, char *buf)
1907 {
1908         int i, now, size = 0;
1909         struct pp_clock_levels_with_latency clocks;
1910
1911         switch (type) {
1912         case PP_SCLK:
1913                 PP_ASSERT_WITH_CODE(
1914                                 vega12_get_current_gfx_clk_freq(hwmgr, &now) == 0,
1915                                 "Attempt to get current gfx clk Failed!",
1916                                 return -1);
1917
1918                 PP_ASSERT_WITH_CODE(
1919                                 vega12_get_sclks(hwmgr, &clocks) == 0,
1920                                 "Attempt to get gfx clk levels Failed!",
1921                                 return -1);
1922                 for (i = 0; i < clocks.num_levels; i++)
1923                         size += sprintf(buf + size, "%d: %uMhz %s\n",
1924                                 i, clocks.data[i].clocks_in_khz / 100,
1925                                 (clocks.data[i].clocks_in_khz == now) ? "*" : "");
1926                 break;
1927
1928         case PP_MCLK:
1929                 PP_ASSERT_WITH_CODE(
1930                                 vega12_get_current_mclk_freq(hwmgr, &now) == 0,
1931                                 "Attempt to get current mclk freq Failed!",
1932                                 return -1);
1933
1934                 PP_ASSERT_WITH_CODE(
1935                                 vega12_get_memclocks(hwmgr, &clocks) == 0,
1936                                 "Attempt to get memory clk levels Failed!",
1937                                 return -1);
1938                 for (i = 0; i < clocks.num_levels; i++)
1939                         size += sprintf(buf + size, "%d: %uMhz %s\n",
1940                                 i, clocks.data[i].clocks_in_khz / 100,
1941                                 (clocks.data[i].clocks_in_khz == now) ? "*" : "");
1942                 break;
1943
1944         case PP_PCIE:
1945                 break;
1946
1947         default:
1948                 break;
1949         }
1950         return size;
1951 }
1952
1953 static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
1954 {
1955         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
1956         struct vega12_single_dpm_table *dpm_table;
1957         bool vblank_too_short = false;
1958         bool disable_mclk_switching;
1959         uint32_t i, latency;
1960
1961         disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
1962                                   !hwmgr->display_config->multi_monitor_in_sync) ||
1963                                   vblank_too_short;
1964         latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
1965
1966         /* gfxclk */
1967         dpm_table = &(data->dpm_table.gfx_table);
1968         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
1969         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1970         dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
1971         dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1972
1973         if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
1974                 if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
1975                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
1976                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
1977                 }
1978
1979                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
1980                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
1981                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
1982                 }
1983
1984                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
1985                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1986                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1987                 }
1988         }
1989
1990         /* memclk */
1991         dpm_table = &(data->dpm_table.mem_table);
1992         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
1993         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1994         dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
1995         dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
1996
1997         if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
1998                 if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
1999                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
2000                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
2001                 }
2002
2003                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
2004                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2005                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
2006                 }
2007
2008                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2009                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2010                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2011                 }
2012         }
2013
2014         /* honour DAL's UCLK Hardmin */
2015         if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
2016                 dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
2017
2018         /* Hardmin is dependent on displayconfig */
2019         if (disable_mclk_switching) {
2020                 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2021                 for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
2022                         if (data->mclk_latency_table.entries[i].latency <= latency) {
2023                                 if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
2024                                         dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
2025                                         break;
2026                                 }
2027                         }
2028                 }
2029         }
2030
2031         if (hwmgr->display_config->nb_pstate_switch_disable)
2032                 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2033
2034         /* vclk */
2035         dpm_table = &(data->dpm_table.vclk_table);
2036         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2037         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2038         dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2039         dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2040
2041         if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2042                 if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
2043                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2044                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2045                 }
2046
2047                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2048                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2049                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2050                 }
2051         }
2052
2053         /* dclk */
2054         dpm_table = &(data->dpm_table.dclk_table);
2055         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2056         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2057         dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2058         dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2059
2060         if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2061                 if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
2062                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2063                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
2064                 }
2065
2066                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2067                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2068                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2069                 }
2070         }
2071
2072         /* socclk */
2073         dpm_table = &(data->dpm_table.soc_table);
2074         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2075         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2076         dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2077         dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2078
2079         if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2080                 if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
2081                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
2082                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
2083                 }
2084
2085                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2086                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2087                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2088                 }
2089         }
2090
2091         /* eclk */
2092         dpm_table = &(data->dpm_table.eclk_table);
2093         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
2094         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2095         dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
2096         dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2097
2098         if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
2099                 if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
2100                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
2101                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
2102                 }
2103
2104                 if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
2105                         dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2106                         dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
2107                 }
2108         }
2109
2110         return 0;
2111 }
2112
2113 static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
2114 {
2115         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2116         int result = 0;
2117         Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
2118
2119         if ((data->water_marks_bitmap & WaterMarksExist) &&
2120                         !(data->water_marks_bitmap & WaterMarksLoaded)) {
2121                 result = vega12_copy_table_to_smc(hwmgr,
2122                         (uint8_t *)wm_table, TABLE_WATERMARKS);
2123                 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
2124                 data->water_marks_bitmap |= WaterMarksLoaded;
2125         }
2126
2127         if ((data->water_marks_bitmap & WaterMarksExist) &&
2128                 data->smu_features[GNLD_DPM_DCEFCLK].supported &&
2129                 data->smu_features[GNLD_DPM_SOCCLK].supported)
2130                 smum_send_msg_to_smc_with_parameter(hwmgr,
2131                         PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
2132
2133         return result;
2134 }
2135
2136 int vega12_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
2137 {
2138         struct vega12_hwmgr *data =
2139                         (struct vega12_hwmgr *)(hwmgr->backend);
2140
2141         if (data->smu_features[GNLD_DPM_UVD].supported) {
2142                 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr,
2143                                 enable,
2144                                 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
2145                                 "Attempt to Enable/Disable DPM UVD Failed!",
2146                                 return -1);
2147                 data->smu_features[GNLD_DPM_UVD].enabled = enable;
2148         }
2149
2150         return 0;
2151 }
2152
2153 static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
2154 {
2155         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2156
2157         if (data->vce_power_gated == bgate)
2158                 return;
2159
2160         data->vce_power_gated = bgate;
2161         vega12_enable_disable_vce_dpm(hwmgr, !bgate);
2162 }
2163
2164 static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
2165 {
2166         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2167
2168         if (data->uvd_power_gated == bgate)
2169                 return;
2170
2171         data->uvd_power_gated = bgate;
2172         vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
2173 }
2174
2175 static bool
2176 vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
2177 {
2178         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2179         bool is_update_required = false;
2180
2181         if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
2182                 is_update_required = true;
2183
2184         if (data->registry_data.gfx_clk_deep_sleep_support) {
2185                 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr)
2186                         is_update_required = true;
2187         }
2188
2189         return is_update_required;
2190 }
2191
2192 static int vega12_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
2193 {
2194         int tmp_result, result = 0;
2195
2196         tmp_result = vega12_disable_all_smu_features(hwmgr);
2197         PP_ASSERT_WITH_CODE((tmp_result == 0),
2198                         "Failed to disable all smu features!", result = tmp_result);
2199
2200         return result;
2201 }
2202
2203 static int vega12_power_off_asic(struct pp_hwmgr *hwmgr)
2204 {
2205         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2206         int result;
2207
2208         result = vega12_disable_dpm_tasks(hwmgr);
2209         PP_ASSERT_WITH_CODE((0 == result),
2210                         "[disable_dpm_tasks] Failed to disable DPM!",
2211                         );
2212         data->water_marks_bitmap &= ~(WaterMarksLoaded);
2213
2214         return result;
2215 }
2216
2217 #if 0
2218 static void vega12_find_min_clock_index(struct pp_hwmgr *hwmgr,
2219                 uint32_t *sclk_idx, uint32_t *mclk_idx,
2220                 uint32_t min_sclk, uint32_t min_mclk)
2221 {
2222         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2223         struct vega12_dpm_table *dpm_table = &(data->dpm_table);
2224         uint32_t i;
2225
2226         for (i = 0; i < dpm_table->gfx_table.count; i++) {
2227                 if (dpm_table->gfx_table.dpm_levels[i].enabled &&
2228                         dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
2229                         *sclk_idx = i;
2230                         break;
2231                 }
2232         }
2233
2234         for (i = 0; i < dpm_table->mem_table.count; i++) {
2235                 if (dpm_table->mem_table.dpm_levels[i].enabled &&
2236                         dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
2237                         *mclk_idx = i;
2238                         break;
2239                 }
2240         }
2241 }
2242 #endif
2243
2244 #if 0
2245 static int vega12_set_power_profile_state(struct pp_hwmgr *hwmgr,
2246                 struct amd_pp_profile *request)
2247 {
2248         return 0;
2249 }
2250
2251 static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
2252 {
2253         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2254         struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
2255         struct vega12_single_dpm_table *golden_sclk_table =
2256                         &(data->golden_dpm_table.gfx_table);
2257         int value;
2258
2259         value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
2260                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
2261                         100 /
2262                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
2263
2264         return value;
2265 }
2266
2267 static int vega12_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
2268 {
2269         return 0;
2270 }
2271
2272 static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
2273 {
2274         struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
2275         struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
2276         struct vega12_single_dpm_table *golden_mclk_table =
2277                         &(data->golden_dpm_table.mem_table);
2278         int value;
2279
2280         value = (mclk_table->dpm_levels
2281                         [mclk_table->count - 1].value -
2282                         golden_mclk_table->dpm_levels
2283                         [golden_mclk_table->count - 1].value) *
2284                         100 /
2285                         golden_mclk_table->dpm_levels
2286                         [golden_mclk_table->count - 1].value;
2287
2288         return value;
2289 }
2290
2291 static int vega12_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
2292 {
2293         return 0;
2294 }
2295 #endif
2296
2297 static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
2298                                         uint32_t virtual_addr_low,
2299                                         uint32_t virtual_addr_hi,
2300                                         uint32_t mc_addr_low,
2301                                         uint32_t mc_addr_hi,
2302                                         uint32_t size)
2303 {
2304         smum_send_msg_to_smc_with_parameter(hwmgr,
2305                                         PPSMC_MSG_SetSystemVirtualDramAddrHigh,
2306                                         virtual_addr_hi);
2307         smum_send_msg_to_smc_with_parameter(hwmgr,
2308                                         PPSMC_MSG_SetSystemVirtualDramAddrLow,
2309                                         virtual_addr_low);
2310         smum_send_msg_to_smc_with_parameter(hwmgr,
2311                                         PPSMC_MSG_DramLogSetDramAddrHigh,
2312                                         mc_addr_hi);
2313
2314         smum_send_msg_to_smc_with_parameter(hwmgr,
2315                                         PPSMC_MSG_DramLogSetDramAddrLow,
2316                                         mc_addr_low);
2317
2318         smum_send_msg_to_smc_with_parameter(hwmgr,
2319                                         PPSMC_MSG_DramLogSetDramSize,
2320                                         size);
2321         return 0;
2322 }
2323
2324 static int vega12_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
2325                 struct PP_TemperatureRange *thermal_data)
2326 {
2327         struct phm_ppt_v3_information *pptable_information =
2328                 (struct phm_ppt_v3_information *)hwmgr->pptable;
2329
2330         memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
2331
2332         thermal_data->max = pptable_information->us_software_shutdown_temp *
2333                 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
2334
2335         return 0;
2336 }
2337
2338 static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
2339         .backend_init = vega12_hwmgr_backend_init,
2340         .backend_fini = vega12_hwmgr_backend_fini,
2341         .asic_setup = vega12_setup_asic_task,
2342         .dynamic_state_management_enable = vega12_enable_dpm_tasks,
2343         .dynamic_state_management_disable = vega12_disable_dpm_tasks,
2344         .patch_boot_state = vega12_patch_boot_state,
2345         .get_sclk = vega12_dpm_get_sclk,
2346         .get_mclk = vega12_dpm_get_mclk,
2347         .notify_smc_display_config_after_ps_adjustment =
2348                         vega12_notify_smc_display_config_after_ps_adjustment,
2349         .force_dpm_level = vega12_dpm_force_dpm_level,
2350         .stop_thermal_controller = vega12_thermal_stop_thermal_controller,
2351         .get_fan_speed_info = vega12_fan_ctrl_get_fan_speed_info,
2352         .reset_fan_speed_to_default =
2353                         vega12_fan_ctrl_reset_fan_speed_to_default,
2354         .get_fan_speed_rpm = vega12_fan_ctrl_get_fan_speed_rpm,
2355         .set_fan_control_mode = vega12_set_fan_control_mode,
2356         .get_fan_control_mode = vega12_get_fan_control_mode,
2357         .read_sensor = vega12_read_sensor,
2358         .get_dal_power_level = vega12_get_dal_power_level,
2359         .get_clock_by_type_with_latency = vega12_get_clock_by_type_with_latency,
2360         .get_clock_by_type_with_voltage = vega12_get_clock_by_type_with_voltage,
2361         .set_watermarks_for_clocks_ranges = vega12_set_watermarks_for_clocks_ranges,
2362         .display_clock_voltage_request = vega12_display_clock_voltage_request,
2363         .force_clock_level = vega12_force_clock_level,
2364         .print_clock_levels = vega12_print_clock_levels,
2365         .apply_clocks_adjust_rules =
2366                 vega12_apply_clocks_adjust_rules,
2367         .display_config_changed = vega12_display_configuration_changed_task,
2368         .powergate_uvd = vega12_power_gate_uvd,
2369         .powergate_vce = vega12_power_gate_vce,
2370         .check_smc_update_required_for_display_configuration =
2371                         vega12_check_smc_update_required_for_display_configuration,
2372         .power_off_asic = vega12_power_off_asic,
2373         .disable_smc_firmware_ctf = vega12_thermal_disable_alert,
2374 #if 0
2375         .set_power_profile_state = vega12_set_power_profile_state,
2376         .get_sclk_od = vega12_get_sclk_od,
2377         .set_sclk_od = vega12_set_sclk_od,
2378         .get_mclk_od = vega12_get_mclk_od,
2379         .set_mclk_od = vega12_set_mclk_od,
2380 #endif
2381         .notify_cac_buffer_info = vega12_notify_cac_buffer_info,
2382         .get_thermal_temperature_range = vega12_get_thermal_temperature_range,
2383         .register_irq_handlers = smu9_register_irq_handlers,
2384         .start_thermal_controller = vega12_start_thermal_controller,
2385 };
2386
2387 int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
2388 {
2389         hwmgr->hwmgr_func = &vega12_hwmgr_funcs;
2390         hwmgr->pptable_func = &vega12_pptable_funcs;
2391
2392         return 0;
2393 }