203dc50c0fd588c5dc94ff970cd7d25c33bc6abe
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / pm / swsmu / smu13 / smu_v13_0_0_ppt.c
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #define SWSMU_CODE_LAYER_L2
25
26 #include <linux/firmware.h>
27 #include <linux/pci.h>
28 #include <linux/i2c.h>
29 #include "amdgpu.h"
30 #include "amdgpu_smu.h"
31 #include "atomfirmware.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "amdgpu_atombios.h"
34 #include "smu_v13_0.h"
35 #include "smu13_driver_if_v13_0_0.h"
36 #include "soc15_common.h"
37 #include "atom.h"
38 #include "smu_v13_0_0_ppt.h"
39 #include "smu_v13_0_0_pptable.h"
40 #include "smu_v13_0_0_ppsmc.h"
41 #include "nbio/nbio_4_3_0_offset.h"
42 #include "nbio/nbio_4_3_0_sh_mask.h"
43 #include "mp/mp_13_0_0_offset.h"
44 #include "mp/mp_13_0_0_sh_mask.h"
45
46 #include "asic_reg/mp/mp_13_0_0_sh_mask.h"
47 #include "smu_cmn.h"
48 #include "amdgpu_ras.h"
49
50 /*
51  * DO NOT use these for err/warn/info/debug messages.
52  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
53  * They are more MGPU friendly.
54  */
55 #undef pr_err
56 #undef pr_warn
57 #undef pr_info
58 #undef pr_debug
59
60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
61
62 #define FEATURE_MASK(feature) (1ULL << feature)
63 #define SMC_DPM_FEATURE ( \
64         FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)     | \
65         FEATURE_MASK(FEATURE_DPM_UCLK_BIT)       | \
66         FEATURE_MASK(FEATURE_DPM_LINK_BIT)       | \
67         FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)     | \
68         FEATURE_MASK(FEATURE_DPM_FCLK_BIT)       | \
69         FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT))
70
71 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE   0x4000
72
73 #define mmMP1_SMN_C2PMSG_66                                                                            0x0282
74 #define mmMP1_SMN_C2PMSG_66_BASE_IDX                                                                   0
75
76 #define mmMP1_SMN_C2PMSG_82                                                                            0x0292
77 #define mmMP1_SMN_C2PMSG_82_BASE_IDX                                                                   0
78
79 #define mmMP1_SMN_C2PMSG_90                                                                            0x029a
80 #define mmMP1_SMN_C2PMSG_90_BASE_IDX                                                                   0
81
82 #define mmMP1_SMN_C2PMSG_75                                                                            0x028b
83 #define mmMP1_SMN_C2PMSG_75_BASE_IDX                                                                   0
84
85 #define mmMP1_SMN_C2PMSG_53                                                                            0x0275
86 #define mmMP1_SMN_C2PMSG_53_BASE_IDX                                                                   0
87
88 #define mmMP1_SMN_C2PMSG_54                                                                            0x0276
89 #define mmMP1_SMN_C2PMSG_54_BASE_IDX                                                                   0
90
91 #define DEBUGSMC_MSG_Mode1Reset 2
92
93 /*
94  * SMU_v13_0_10 supports ECCTABLE since version 80.34.0,
95  * use this to check ECCTABLE feature whether support
96  */
97 #define SUPPORT_ECCTABLE_SMU_13_0_10_VERSION 0x00502200
98
99 #define PP_OD_FEATURE_GFXCLK_FMIN                       0
100 #define PP_OD_FEATURE_GFXCLK_FMAX                       1
101 #define PP_OD_FEATURE_UCLK_FMIN                         2
102 #define PP_OD_FEATURE_UCLK_FMAX                         3
103 #define PP_OD_FEATURE_GFX_VF_CURVE                      4
104
105 static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = {
106         MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                 1),
107         MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,               1),
108         MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion,          1),
109         MSG_MAP(SetAllowedFeaturesMaskLow,      PPSMC_MSG_SetAllowedFeaturesMaskLow,   0),
110         MSG_MAP(SetAllowedFeaturesMaskHigh,     PPSMC_MSG_SetAllowedFeaturesMaskHigh,  0),
111         MSG_MAP(EnableAllSmuFeatures,           PPSMC_MSG_EnableAllSmuFeatures,        0),
112         MSG_MAP(DisableAllSmuFeatures,          PPSMC_MSG_DisableAllSmuFeatures,       0),
113         MSG_MAP(EnableSmuFeaturesLow,           PPSMC_MSG_EnableSmuFeaturesLow,        1),
114         MSG_MAP(EnableSmuFeaturesHigh,          PPSMC_MSG_EnableSmuFeaturesHigh,       1),
115         MSG_MAP(DisableSmuFeaturesLow,          PPSMC_MSG_DisableSmuFeaturesLow,       1),
116         MSG_MAP(DisableSmuFeaturesHigh,         PPSMC_MSG_DisableSmuFeaturesHigh,      1),
117         MSG_MAP(GetEnabledSmuFeaturesLow,       PPSMC_MSG_GetRunningSmuFeaturesLow,    1),
118         MSG_MAP(GetEnabledSmuFeaturesHigh,      PPSMC_MSG_GetRunningSmuFeaturesHigh,   1),
119         MSG_MAP(SetWorkloadMask,                PPSMC_MSG_SetWorkloadMask,             1),
120         MSG_MAP(SetPptLimit,                    PPSMC_MSG_SetPptLimit,                 0),
121         MSG_MAP(SetDriverDramAddrHigh,          PPSMC_MSG_SetDriverDramAddrHigh,       1),
122         MSG_MAP(SetDriverDramAddrLow,           PPSMC_MSG_SetDriverDramAddrLow,        1),
123         MSG_MAP(SetToolsDramAddrHigh,           PPSMC_MSG_SetToolsDramAddrHigh,        0),
124         MSG_MAP(SetToolsDramAddrLow,            PPSMC_MSG_SetToolsDramAddrLow,         0),
125         MSG_MAP(TransferTableSmu2Dram,          PPSMC_MSG_TransferTableSmu2Dram,       1),
126         MSG_MAP(TransferTableDram2Smu,          PPSMC_MSG_TransferTableDram2Smu,       0),
127         MSG_MAP(UseDefaultPPTable,              PPSMC_MSG_UseDefaultPPTable,           0),
128         MSG_MAP(RunDcBtc,                       PPSMC_MSG_RunDcBtc,                    0),
129         MSG_MAP(EnterBaco,                      PPSMC_MSG_EnterBaco,                   0),
130         MSG_MAP(ExitBaco,                       PPSMC_MSG_ExitBaco,                    0),
131         MSG_MAP(SetSoftMinByFreq,               PPSMC_MSG_SetSoftMinByFreq,            1),
132         MSG_MAP(SetSoftMaxByFreq,               PPSMC_MSG_SetSoftMaxByFreq,            1),
133         MSG_MAP(SetHardMinByFreq,               PPSMC_MSG_SetHardMinByFreq,            1),
134         MSG_MAP(SetHardMaxByFreq,               PPSMC_MSG_SetHardMaxByFreq,            0),
135         MSG_MAP(GetMinDpmFreq,                  PPSMC_MSG_GetMinDpmFreq,               1),
136         MSG_MAP(GetMaxDpmFreq,                  PPSMC_MSG_GetMaxDpmFreq,               1),
137         MSG_MAP(GetDpmFreqByIndex,              PPSMC_MSG_GetDpmFreqByIndex,           1),
138         MSG_MAP(PowerUpVcn,                     PPSMC_MSG_PowerUpVcn,                  0),
139         MSG_MAP(PowerDownVcn,                   PPSMC_MSG_PowerDownVcn,                0),
140         MSG_MAP(PowerUpJpeg,                    PPSMC_MSG_PowerUpJpeg,                 0),
141         MSG_MAP(PowerDownJpeg,                  PPSMC_MSG_PowerDownJpeg,               0),
142         MSG_MAP(GetDcModeMaxDpmFreq,            PPSMC_MSG_GetDcModeMaxDpmFreq,         1),
143         MSG_MAP(OverridePcieParameters,         PPSMC_MSG_OverridePcieParameters,      0),
144         MSG_MAP(DramLogSetDramAddrHigh,         PPSMC_MSG_DramLogSetDramAddrHigh,      0),
145         MSG_MAP(DramLogSetDramAddrLow,          PPSMC_MSG_DramLogSetDramAddrLow,       0),
146         MSG_MAP(DramLogSetDramSize,             PPSMC_MSG_DramLogSetDramSize,          0),
147         MSG_MAP(AllowGfxOff,                    PPSMC_MSG_AllowGfxOff,                 0),
148         MSG_MAP(DisallowGfxOff,                 PPSMC_MSG_DisallowGfxOff,              0),
149         MSG_MAP(SetMGpuFanBoostLimitRpm,        PPSMC_MSG_SetMGpuFanBoostLimitRpm,     0),
150         MSG_MAP(GetPptLimit,                    PPSMC_MSG_GetPptLimit,                 0),
151         MSG_MAP(NotifyPowerSource,              PPSMC_MSG_NotifyPowerSource,           0),
152         MSG_MAP(Mode1Reset,                     PPSMC_MSG_Mode1Reset,                  0),
153         MSG_MAP(Mode2Reset,                     PPSMC_MSG_Mode2Reset,                      0),
154         MSG_MAP(PrepareMp1ForUnload,            PPSMC_MSG_PrepareMp1ForUnload,         0),
155         MSG_MAP(DFCstateControl,                PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
156         MSG_MAP(ArmD3,                          PPSMC_MSG_ArmD3,                       0),
157         MSG_MAP(SetNumBadMemoryPagesRetired,    PPSMC_MSG_SetNumBadMemoryPagesRetired,   0),
158         MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
159                             PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,   0),
160         MSG_MAP(AllowGpo,                       PPSMC_MSG_SetGpoAllow,           0),
161         MSG_MAP(AllowIHHostInterrupt,           PPSMC_MSG_AllowIHHostInterrupt,       0),
162         MSG_MAP(ReenableAcDcInterrupt,          PPSMC_MSG_ReenableAcDcInterrupt,       0),
163 };
164
165 static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
166         CLK_MAP(GFXCLK,         PPCLK_GFXCLK),
167         CLK_MAP(SCLK,           PPCLK_GFXCLK),
168         CLK_MAP(SOCCLK,         PPCLK_SOCCLK),
169         CLK_MAP(FCLK,           PPCLK_FCLK),
170         CLK_MAP(UCLK,           PPCLK_UCLK),
171         CLK_MAP(MCLK,           PPCLK_UCLK),
172         CLK_MAP(VCLK,           PPCLK_VCLK_0),
173         CLK_MAP(VCLK1,          PPCLK_VCLK_1),
174         CLK_MAP(DCLK,           PPCLK_DCLK_0),
175         CLK_MAP(DCLK1,          PPCLK_DCLK_1),
176 };
177
178 static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] = {
179         FEA_MAP(FW_DATA_READ),
180         FEA_MAP(DPM_GFXCLK),
181         FEA_MAP(DPM_GFX_POWER_OPTIMIZER),
182         FEA_MAP(DPM_UCLK),
183         FEA_MAP(DPM_FCLK),
184         FEA_MAP(DPM_SOCCLK),
185         FEA_MAP(DPM_MP0CLK),
186         FEA_MAP(DPM_LINK),
187         FEA_MAP(DPM_DCN),
188         FEA_MAP(VMEMP_SCALING),
189         FEA_MAP(VDDIO_MEM_SCALING),
190         FEA_MAP(DS_GFXCLK),
191         FEA_MAP(DS_SOCCLK),
192         FEA_MAP(DS_FCLK),
193         FEA_MAP(DS_LCLK),
194         FEA_MAP(DS_DCFCLK),
195         FEA_MAP(DS_UCLK),
196         FEA_MAP(GFX_ULV),
197         FEA_MAP(FW_DSTATE),
198         FEA_MAP(GFXOFF),
199         FEA_MAP(BACO),
200         FEA_MAP(MM_DPM),
201         FEA_MAP(SOC_MPCLK_DS),
202         FEA_MAP(BACO_MPCLK_DS),
203         FEA_MAP(THROTTLERS),
204         FEA_MAP(SMARTSHIFT),
205         FEA_MAP(GTHR),
206         FEA_MAP(ACDC),
207         FEA_MAP(VR0HOT),
208         FEA_MAP(FW_CTF),
209         FEA_MAP(FAN_CONTROL),
210         FEA_MAP(GFX_DCS),
211         FEA_MAP(GFX_READ_MARGIN),
212         FEA_MAP(LED_DISPLAY),
213         FEA_MAP(GFXCLK_SPREAD_SPECTRUM),
214         FEA_MAP(OUT_OF_BAND_MONITOR),
215         FEA_MAP(OPTIMIZED_VMIN),
216         FEA_MAP(GFX_IMU),
217         FEA_MAP(BOOT_TIME_CAL),
218         FEA_MAP(GFX_PCC_DFLL),
219         FEA_MAP(SOC_CG),
220         FEA_MAP(DF_CSTATE),
221         FEA_MAP(GFX_EDC),
222         FEA_MAP(BOOT_POWER_OPT),
223         FEA_MAP(CLOCK_POWER_DOWN_BYPASS),
224         FEA_MAP(DS_VCN),
225         FEA_MAP(BACO_CG),
226         FEA_MAP(MEM_TEMP_READ),
227         FEA_MAP(ATHUB_MMHUB_PG),
228         FEA_MAP(SOC_PCC),
229         [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
230         [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
231         [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
232 };
233
234 static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
235         TAB_MAP(PPTABLE),
236         TAB_MAP(WATERMARKS),
237         TAB_MAP(AVFS_PSM_DEBUG),
238         TAB_MAP(PMSTATUSLOG),
239         TAB_MAP(SMU_METRICS),
240         TAB_MAP(DRIVER_SMU_CONFIG),
241         TAB_MAP(ACTIVITY_MONITOR_COEFF),
242         [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
243         TAB_MAP(I2C_COMMANDS),
244         TAB_MAP(ECCINFO),
245         TAB_MAP(OVERDRIVE),
246 };
247
248 static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
249         PWR_MAP(AC),
250         PWR_MAP(DC),
251 };
252
253 static struct cmn2asic_mapping smu_v13_0_0_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
254         WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT,       WORKLOAD_PPLIB_DEFAULT_BIT),
255         WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,         WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
256         WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING,          WORKLOAD_PPLIB_POWER_SAVING_BIT),
257         WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,                WORKLOAD_PPLIB_VIDEO_BIT),
258         WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,                   WORKLOAD_PPLIB_VR_BIT),
259         WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,              WORKLOAD_PPLIB_COMPUTE_BIT),
260         WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,               WORKLOAD_PPLIB_CUSTOM_BIT),
261         WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D,             WORKLOAD_PPLIB_WINDOW_3D_BIT),
262 };
263
264 static const uint8_t smu_v13_0_0_throttler_map[] = {
265         [THROTTLER_PPT0_BIT]            = (SMU_THROTTLER_PPT0_BIT),
266         [THROTTLER_PPT1_BIT]            = (SMU_THROTTLER_PPT1_BIT),
267         [THROTTLER_PPT2_BIT]            = (SMU_THROTTLER_PPT2_BIT),
268         [THROTTLER_PPT3_BIT]            = (SMU_THROTTLER_PPT3_BIT),
269         [THROTTLER_TDC_GFX_BIT]         = (SMU_THROTTLER_TDC_GFX_BIT),
270         [THROTTLER_TDC_SOC_BIT]         = (SMU_THROTTLER_TDC_SOC_BIT),
271         [THROTTLER_TEMP_EDGE_BIT]       = (SMU_THROTTLER_TEMP_EDGE_BIT),
272         [THROTTLER_TEMP_HOTSPOT_BIT]    = (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
273         [THROTTLER_TEMP_MEM_BIT]        = (SMU_THROTTLER_TEMP_MEM_BIT),
274         [THROTTLER_TEMP_VR_GFX_BIT]     = (SMU_THROTTLER_TEMP_VR_GFX_BIT),
275         [THROTTLER_TEMP_VR_SOC_BIT]     = (SMU_THROTTLER_TEMP_VR_SOC_BIT),
276         [THROTTLER_TEMP_VR_MEM0_BIT]    = (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
277         [THROTTLER_TEMP_VR_MEM1_BIT]    = (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
278         [THROTTLER_TEMP_LIQUID0_BIT]    = (SMU_THROTTLER_TEMP_LIQUID0_BIT),
279         [THROTTLER_TEMP_LIQUID1_BIT]    = (SMU_THROTTLER_TEMP_LIQUID1_BIT),
280         [THROTTLER_GFX_APCC_PLUS_BIT]   = (SMU_THROTTLER_APCC_BIT),
281         [THROTTLER_FIT_BIT]             = (SMU_THROTTLER_FIT_BIT),
282 };
283
284 static int
285 smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
286                                   uint32_t *feature_mask, uint32_t num)
287 {
288         struct amdgpu_device *adev = smu->adev;
289         u32 smu_version;
290
291         if (num > 2)
292                 return -EINVAL;
293
294         memset(feature_mask, 0xff, sizeof(uint32_t) * num);
295
296         if (!(adev->pm.pp_feature & PP_SCLK_DPM_MASK)) {
297                 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
298                 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_IMU_BIT);
299         }
300
301         if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
302             !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
303                 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
304
305         if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
306                 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
307
308         /* PMFW 78.58 contains a critical fix for gfxoff feature */
309         smu_cmn_get_smc_version(smu, NULL, &smu_version);
310         if ((smu_version < 0x004e3a00) ||
311              !(adev->pm.pp_feature & PP_GFXOFF_MASK))
312                 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
313
314         if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
315                 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
316                 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
317                 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
318         }
319
320         if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
321                 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
322
323         if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
324                 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
325                 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
326         }
327
328         if (!(adev->pm.pp_feature & PP_ULV_MASK))
329                 *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
330
331         return 0;
332 }
333
334 static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
335 {
336         struct smu_table_context *table_context = &smu->smu_table;
337         struct smu_13_0_0_powerplay_table *powerplay_table =
338                 table_context->power_play_table;
339         struct smu_baco_context *smu_baco = &smu->smu_baco;
340         PPTable_t *pptable = smu->smu_table.driver_pptable;
341 #if 0
342         PPTable_t *pptable = smu->smu_table.driver_pptable;
343         const OverDriveLimits_t * const overdrive_upperlimits =
344                                 &pptable->SkuTable.OverDriveLimitsBasicMax;
345         const OverDriveLimits_t * const overdrive_lowerlimits =
346                                 &pptable->SkuTable.OverDriveLimitsMin;
347 #endif
348
349         if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
350                 smu->dc_controlled_by_gpio = true;
351
352         if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO ||
353             powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
354                 smu_baco->platform_support = true;
355
356         if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
357                 smu_baco->maco_support = true;
358
359         /*
360          * We are in the transition to a new OD mechanism.
361          * Disable the OD feature support for SMU13 temporarily.
362          * TODO: get this reverted when new OD mechanism online
363          */
364 #if 0
365         if (!overdrive_lowerlimits->FeatureCtrlMask ||
366             !overdrive_upperlimits->FeatureCtrlMask)
367                 smu->od_enabled = false;
368
369         /*
370          * Instead of having its own buffer space and get overdrive_table copied,
371          * smu->od_settings just points to the actual overdrive_table
372          */
373         smu->od_settings = &powerplay_table->overdrive_table;
374 #else
375         smu->od_enabled = false;
376 #endif
377
378         table_context->thermal_controller_type =
379                 powerplay_table->thermal_controller_type;
380
381         smu->adev->pm.no_fan =
382                 !(pptable->SkuTable.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
383
384         return 0;
385 }
386
387 static int smu_v13_0_0_store_powerplay_table(struct smu_context *smu)
388 {
389         struct smu_table_context *table_context = &smu->smu_table;
390         struct smu_13_0_0_powerplay_table *powerplay_table =
391                 table_context->power_play_table;
392
393         memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
394                sizeof(PPTable_t));
395
396         return 0;
397 }
398
399 #ifndef atom_smc_dpm_info_table_13_0_0
400 struct atom_smc_dpm_info_table_13_0_0 {
401         struct atom_common_table_header table_header;
402         BoardTable_t BoardTable;
403 };
404 #endif
405
406 static int smu_v13_0_0_append_powerplay_table(struct smu_context *smu)
407 {
408         struct smu_table_context *table_context = &smu->smu_table;
409         PPTable_t *smc_pptable = table_context->driver_pptable;
410         struct atom_smc_dpm_info_table_13_0_0 *smc_dpm_table;
411         BoardTable_t *BoardTable = &smc_pptable->BoardTable;
412         int index, ret;
413
414         index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
415                                             smc_dpm_info);
416
417         ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
418                                              (uint8_t **)&smc_dpm_table);
419         if (ret)
420                 return ret;
421
422         memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t));
423
424         return 0;
425 }
426
427 static int smu_v13_0_0_get_pptable_from_pmfw(struct smu_context *smu,
428                                              void **table,
429                                              uint32_t *size)
430 {
431         struct smu_table_context *smu_table = &smu->smu_table;
432         void *combo_pptable = smu_table->combo_pptable;
433         int ret = 0;
434
435         ret = smu_cmn_get_combo_pptable(smu);
436         if (ret)
437                 return ret;
438
439         *table = combo_pptable;
440         *size = sizeof(struct smu_13_0_0_powerplay_table);
441
442         return 0;
443 }
444
445 static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
446 {
447         struct smu_table_context *smu_table = &smu->smu_table;
448         struct amdgpu_device *adev = smu->adev;
449         int ret = 0;
450
451         if (amdgpu_sriov_vf(smu->adev))
452                 return 0;
453
454         ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
455                                                 &smu_table->power_play_table,
456                                                 &smu_table->power_play_table_size);
457         if (ret)
458                 return ret;
459
460         ret = smu_v13_0_0_store_powerplay_table(smu);
461         if (ret)
462                 return ret;
463
464         /*
465          * With SCPM enabled, the operation below will be handled
466          * by PSP. Driver involvment is unnecessary and useless.
467          */
468         if (!adev->scpm_enabled) {
469                 ret = smu_v13_0_0_append_powerplay_table(smu);
470                 if (ret)
471                         return ret;
472         }
473
474         ret = smu_v13_0_0_check_powerplay_table(smu);
475         if (ret)
476                 return ret;
477
478         return ret;
479 }
480
481 static int smu_v13_0_0_tables_init(struct smu_context *smu)
482 {
483         struct smu_table_context *smu_table = &smu->smu_table;
484         struct smu_table *tables = smu_table->tables;
485
486         SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
487                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
488         SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
489                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
490         SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),
491                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
492         SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
493                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
494         SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTableExternal_t),
495                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
496         SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
497                        PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
498         SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
499                        sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
500                        AMDGPU_GEM_DOMAIN_VRAM);
501         SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
502                         PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
503         SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
504                         PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
505
506         smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
507         if (!smu_table->metrics_table)
508                 goto err0_out;
509         smu_table->metrics_time = 0;
510
511         smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
512         smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
513         if (!smu_table->gpu_metrics_table)
514                 goto err1_out;
515
516         smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
517         if (!smu_table->watermarks_table)
518                 goto err2_out;
519
520         smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
521         if (!smu_table->ecc_table)
522                 goto err3_out;
523
524         return 0;
525
526 err3_out:
527         kfree(smu_table->watermarks_table);
528 err2_out:
529         kfree(smu_table->gpu_metrics_table);
530 err1_out:
531         kfree(smu_table->metrics_table);
532 err0_out:
533         return -ENOMEM;
534 }
535
536 static int smu_v13_0_0_allocate_dpm_context(struct smu_context *smu)
537 {
538         struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
539
540         smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
541                                        GFP_KERNEL);
542         if (!smu_dpm->dpm_context)
543                 return -ENOMEM;
544
545         smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
546
547         return 0;
548 }
549
550 static int smu_v13_0_0_init_smc_tables(struct smu_context *smu)
551 {
552         int ret = 0;
553
554         ret = smu_v13_0_0_tables_init(smu);
555         if (ret)
556                 return ret;
557
558         ret = smu_v13_0_0_allocate_dpm_context(smu);
559         if (ret)
560                 return ret;
561
562         return smu_v13_0_init_smc_tables(smu);
563 }
564
565 static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu)
566 {
567         struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
568         struct smu_table_context *table_context = &smu->smu_table;
569         PPTable_t *pptable = table_context->driver_pptable;
570         SkuTable_t *skutable = &pptable->SkuTable;
571         struct smu_13_0_dpm_table *dpm_table;
572         struct smu_13_0_pcie_table *pcie_table;
573         uint32_t link_level;
574         int ret = 0;
575
576         /* socclk dpm table setup */
577         dpm_table = &dpm_context->dpm_tables.soc_table;
578         if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
579                 ret = smu_v13_0_set_single_dpm_table(smu,
580                                                      SMU_SOCCLK,
581                                                      dpm_table);
582                 if (ret)
583                         return ret;
584         } else {
585                 dpm_table->count = 1;
586                 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
587                 dpm_table->dpm_levels[0].enabled = true;
588                 dpm_table->min = dpm_table->dpm_levels[0].value;
589                 dpm_table->max = dpm_table->dpm_levels[0].value;
590         }
591
592         /* gfxclk dpm table setup */
593         dpm_table = &dpm_context->dpm_tables.gfx_table;
594         if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
595                 ret = smu_v13_0_set_single_dpm_table(smu,
596                                                      SMU_GFXCLK,
597                                                      dpm_table);
598                 if (ret)
599                         return ret;
600
601                 /*
602                  * Update the reported maximum shader clock to the value
603                  * which can be guarded to be achieved on all cards. This
604                  * is aligned with Window setting. And considering that value
605                  * might be not the peak frequency the card can achieve, it
606                  * is normal some real-time clock frequency can overtake this
607                  * labelled maximum clock frequency(for example in pp_dpm_sclk
608                  * sysfs output).
609                  */
610                 if (skutable->DriverReportedClocks.GameClockAc &&
611                     (dpm_table->dpm_levels[dpm_table->count - 1].value >
612                     skutable->DriverReportedClocks.GameClockAc)) {
613                         dpm_table->dpm_levels[dpm_table->count - 1].value =
614                                 skutable->DriverReportedClocks.GameClockAc;
615                         dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
616                 }
617         } else {
618                 dpm_table->count = 1;
619                 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
620                 dpm_table->dpm_levels[0].enabled = true;
621                 dpm_table->min = dpm_table->dpm_levels[0].value;
622                 dpm_table->max = dpm_table->dpm_levels[0].value;
623         }
624
625         /* uclk dpm table setup */
626         dpm_table = &dpm_context->dpm_tables.uclk_table;
627         if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
628                 ret = smu_v13_0_set_single_dpm_table(smu,
629                                                      SMU_UCLK,
630                                                      dpm_table);
631                 if (ret)
632                         return ret;
633         } else {
634                 dpm_table->count = 1;
635                 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
636                 dpm_table->dpm_levels[0].enabled = true;
637                 dpm_table->min = dpm_table->dpm_levels[0].value;
638                 dpm_table->max = dpm_table->dpm_levels[0].value;
639         }
640
641         /* fclk dpm table setup */
642         dpm_table = &dpm_context->dpm_tables.fclk_table;
643         if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
644                 ret = smu_v13_0_set_single_dpm_table(smu,
645                                                      SMU_FCLK,
646                                                      dpm_table);
647                 if (ret)
648                         return ret;
649         } else {
650                 dpm_table->count = 1;
651                 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
652                 dpm_table->dpm_levels[0].enabled = true;
653                 dpm_table->min = dpm_table->dpm_levels[0].value;
654                 dpm_table->max = dpm_table->dpm_levels[0].value;
655         }
656
657         /* vclk dpm table setup */
658         dpm_table = &dpm_context->dpm_tables.vclk_table;
659         if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
660                 ret = smu_v13_0_set_single_dpm_table(smu,
661                                                      SMU_VCLK,
662                                                      dpm_table);
663                 if (ret)
664                         return ret;
665         } else {
666                 dpm_table->count = 1;
667                 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
668                 dpm_table->dpm_levels[0].enabled = true;
669                 dpm_table->min = dpm_table->dpm_levels[0].value;
670                 dpm_table->max = dpm_table->dpm_levels[0].value;
671         }
672
673         /* dclk dpm table setup */
674         dpm_table = &dpm_context->dpm_tables.dclk_table;
675         if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
676                 ret = smu_v13_0_set_single_dpm_table(smu,
677                                                      SMU_DCLK,
678                                                      dpm_table);
679                 if (ret)
680                         return ret;
681         } else {
682                 dpm_table->count = 1;
683                 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
684                 dpm_table->dpm_levels[0].enabled = true;
685                 dpm_table->min = dpm_table->dpm_levels[0].value;
686                 dpm_table->max = dpm_table->dpm_levels[0].value;
687         }
688
689         /* lclk dpm table setup */
690         pcie_table = &dpm_context->dpm_tables.pcie_table;
691         pcie_table->num_of_link_levels = 0;
692         for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
693                 if (!skutable->PcieGenSpeed[link_level] &&
694                     !skutable->PcieLaneCount[link_level] &&
695                     !skutable->LclkFreq[link_level])
696                         continue;
697
698                 pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
699                                         skutable->PcieGenSpeed[link_level];
700                 pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
701                                         skutable->PcieLaneCount[link_level];
702                 pcie_table->clk_freq[pcie_table->num_of_link_levels] =
703                                         skutable->LclkFreq[link_level];
704                 pcie_table->num_of_link_levels++;
705         }
706
707         return 0;
708 }
709
710 static bool smu_v13_0_0_is_dpm_running(struct smu_context *smu)
711 {
712         int ret = 0;
713         uint64_t feature_enabled;
714
715         ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
716         if (ret)
717                 return false;
718
719         return !!(feature_enabled & SMC_DPM_FEATURE);
720 }
721
722 static void smu_v13_0_0_dump_pptable(struct smu_context *smu)
723 {
724        struct smu_table_context *table_context = &smu->smu_table;
725        PPTable_t *pptable = table_context->driver_pptable;
726        SkuTable_t *skutable = &pptable->SkuTable;
727
728        dev_info(smu->adev->dev, "Dumped PPTable:\n");
729
730        dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version);
731        dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]);
732        dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]);
733 }
734
735 static int smu_v13_0_0_system_features_control(struct smu_context *smu,
736                                                   bool en)
737 {
738         return smu_v13_0_system_features_control(smu, en);
739 }
740
741 static uint32_t smu_v13_0_get_throttler_status(SmuMetrics_t *metrics)
742 {
743         uint32_t throttler_status = 0;
744         int i;
745
746         for (i = 0; i < THROTTLER_COUNT; i++)
747                 throttler_status |=
748                         (metrics->ThrottlingPercentage[i] ? 1U << i : 0);
749
750         return throttler_status;
751 }
752
753 #define SMU_13_0_0_BUSY_THRESHOLD       15
754 static int smu_v13_0_0_get_smu_metrics_data(struct smu_context *smu,
755                                             MetricsMember_t member,
756                                             uint32_t *value)
757 {
758         struct smu_table_context *smu_table = &smu->smu_table;
759         SmuMetrics_t *metrics =
760                 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
761         int ret = 0;
762
763         ret = smu_cmn_get_metrics_table(smu,
764                                         NULL,
765                                         false);
766         if (ret)
767                 return ret;
768
769         switch (member) {
770         case METRICS_CURR_GFXCLK:
771                 *value = metrics->CurrClock[PPCLK_GFXCLK];
772                 break;
773         case METRICS_CURR_SOCCLK:
774                 *value = metrics->CurrClock[PPCLK_SOCCLK];
775                 break;
776         case METRICS_CURR_UCLK:
777                 *value = metrics->CurrClock[PPCLK_UCLK];
778                 break;
779         case METRICS_CURR_VCLK:
780                 *value = metrics->CurrClock[PPCLK_VCLK_0];
781                 break;
782         case METRICS_CURR_VCLK1:
783                 *value = metrics->CurrClock[PPCLK_VCLK_1];
784                 break;
785         case METRICS_CURR_DCLK:
786                 *value = metrics->CurrClock[PPCLK_DCLK_0];
787                 break;
788         case METRICS_CURR_DCLK1:
789                 *value = metrics->CurrClock[PPCLK_DCLK_1];
790                 break;
791         case METRICS_CURR_FCLK:
792                 *value = metrics->CurrClock[PPCLK_FCLK];
793                 break;
794         case METRICS_AVERAGE_GFXCLK:
795                 if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD)
796                         *value = metrics->AverageGfxclkFrequencyPostDs;
797                 else
798                         *value = metrics->AverageGfxclkFrequencyPreDs;
799                 break;
800         case METRICS_AVERAGE_FCLK:
801                 if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD)
802                         *value = metrics->AverageFclkFrequencyPostDs;
803                 else
804                         *value = metrics->AverageFclkFrequencyPreDs;
805                 break;
806         case METRICS_AVERAGE_UCLK:
807                 if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD)
808                         *value = metrics->AverageMemclkFrequencyPostDs;
809                 else
810                         *value = metrics->AverageMemclkFrequencyPreDs;
811                 break;
812         case METRICS_AVERAGE_VCLK:
813                 *value = metrics->AverageVclk0Frequency;
814                 break;
815         case METRICS_AVERAGE_DCLK:
816                 *value = metrics->AverageDclk0Frequency;
817                 break;
818         case METRICS_AVERAGE_VCLK1:
819                 *value = metrics->AverageVclk1Frequency;
820                 break;
821         case METRICS_AVERAGE_DCLK1:
822                 *value = metrics->AverageDclk1Frequency;
823                 break;
824         case METRICS_AVERAGE_GFXACTIVITY:
825                 *value = metrics->AverageGfxActivity;
826                 break;
827         case METRICS_AVERAGE_MEMACTIVITY:
828                 *value = metrics->AverageUclkActivity;
829                 break;
830         case METRICS_AVERAGE_SOCKETPOWER:
831                 *value = metrics->AverageSocketPower << 8;
832                 break;
833         case METRICS_TEMPERATURE_EDGE:
834                 *value = metrics->AvgTemperature[TEMP_EDGE] *
835                         SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
836                 break;
837         case METRICS_TEMPERATURE_HOTSPOT:
838                 *value = metrics->AvgTemperature[TEMP_HOTSPOT] *
839                         SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
840                 break;
841         case METRICS_TEMPERATURE_MEM:
842                 *value = metrics->AvgTemperature[TEMP_MEM] *
843                         SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
844                 break;
845         case METRICS_TEMPERATURE_VRGFX:
846                 *value = metrics->AvgTemperature[TEMP_VR_GFX] *
847                         SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
848                 break;
849         case METRICS_TEMPERATURE_VRSOC:
850                 *value = metrics->AvgTemperature[TEMP_VR_SOC] *
851                         SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
852                 break;
853         case METRICS_THROTTLER_STATUS:
854                 *value = smu_v13_0_get_throttler_status(metrics);
855                 break;
856         case METRICS_CURR_FANSPEED:
857                 *value = metrics->AvgFanRpm;
858                 break;
859         case METRICS_CURR_FANPWM:
860                 *value = metrics->AvgFanPwm;
861                 break;
862         case METRICS_VOLTAGE_VDDGFX:
863                 *value = metrics->AvgVoltage[SVI_PLANE_GFX];
864                 break;
865         case METRICS_PCIE_RATE:
866                 *value = metrics->PcieRate;
867                 break;
868         case METRICS_PCIE_WIDTH:
869                 *value = metrics->PcieWidth;
870                 break;
871         default:
872                 *value = UINT_MAX;
873                 break;
874         }
875
876         return ret;
877 }
878
879 static int smu_v13_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
880                                              enum smu_clk_type clk_type,
881                                              uint32_t *min,
882                                              uint32_t *max)
883 {
884         struct smu_13_0_dpm_context *dpm_context =
885                 smu->smu_dpm.dpm_context;
886         struct smu_13_0_dpm_table *dpm_table;
887
888         switch (clk_type) {
889         case SMU_MCLK:
890         case SMU_UCLK:
891                 /* uclk dpm table */
892                 dpm_table = &dpm_context->dpm_tables.uclk_table;
893                 break;
894         case SMU_GFXCLK:
895         case SMU_SCLK:
896                 /* gfxclk dpm table */
897                 dpm_table = &dpm_context->dpm_tables.gfx_table;
898                 break;
899         case SMU_SOCCLK:
900                 /* socclk dpm table */
901                 dpm_table = &dpm_context->dpm_tables.soc_table;
902                 break;
903         case SMU_FCLK:
904                 /* fclk dpm table */
905                 dpm_table = &dpm_context->dpm_tables.fclk_table;
906                 break;
907         case SMU_VCLK:
908         case SMU_VCLK1:
909                 /* vclk dpm table */
910                 dpm_table = &dpm_context->dpm_tables.vclk_table;
911                 break;
912         case SMU_DCLK:
913         case SMU_DCLK1:
914                 /* dclk dpm table */
915                 dpm_table = &dpm_context->dpm_tables.dclk_table;
916                 break;
917         default:
918                 dev_err(smu->adev->dev, "Unsupported clock type!\n");
919                 return -EINVAL;
920         }
921
922         if (min)
923                 *min = dpm_table->min;
924         if (max)
925                 *max = dpm_table->max;
926
927         return 0;
928 }
929
930 static int smu_v13_0_0_read_sensor(struct smu_context *smu,
931                                    enum amd_pp_sensors sensor,
932                                    void *data,
933                                    uint32_t *size)
934 {
935         struct smu_table_context *table_context = &smu->smu_table;
936         PPTable_t *smc_pptable = table_context->driver_pptable;
937         int ret = 0;
938
939         switch (sensor) {
940         case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
941                 *(uint16_t *)data = smc_pptable->SkuTable.FanMaximumRpm;
942                 *size = 4;
943                 break;
944         case AMDGPU_PP_SENSOR_MEM_LOAD:
945                 ret = smu_v13_0_0_get_smu_metrics_data(smu,
946                                                        METRICS_AVERAGE_MEMACTIVITY,
947                                                        (uint32_t *)data);
948                 *size = 4;
949                 break;
950         case AMDGPU_PP_SENSOR_GPU_LOAD:
951                 ret = smu_v13_0_0_get_smu_metrics_data(smu,
952                                                        METRICS_AVERAGE_GFXACTIVITY,
953                                                        (uint32_t *)data);
954                 *size = 4;
955                 break;
956         case AMDGPU_PP_SENSOR_GPU_POWER:
957                 ret = smu_v13_0_0_get_smu_metrics_data(smu,
958                                                        METRICS_AVERAGE_SOCKETPOWER,
959                                                        (uint32_t *)data);
960                 *size = 4;
961                 break;
962         case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
963                 ret = smu_v13_0_0_get_smu_metrics_data(smu,
964                                                        METRICS_TEMPERATURE_HOTSPOT,
965                                                        (uint32_t *)data);
966                 *size = 4;
967                 break;
968         case AMDGPU_PP_SENSOR_EDGE_TEMP:
969                 ret = smu_v13_0_0_get_smu_metrics_data(smu,
970                                                        METRICS_TEMPERATURE_EDGE,
971                                                        (uint32_t *)data);
972                 *size = 4;
973                 break;
974         case AMDGPU_PP_SENSOR_MEM_TEMP:
975                 ret = smu_v13_0_0_get_smu_metrics_data(smu,
976                                                        METRICS_TEMPERATURE_MEM,
977                                                        (uint32_t *)data);
978                 *size = 4;
979                 break;
980         case AMDGPU_PP_SENSOR_GFX_MCLK:
981                 ret = smu_v13_0_0_get_smu_metrics_data(smu,
982                                                        METRICS_CURR_UCLK,
983                                                        (uint32_t *)data);
984                 *(uint32_t *)data *= 100;
985                 *size = 4;
986                 break;
987         case AMDGPU_PP_SENSOR_GFX_SCLK:
988                 ret = smu_v13_0_0_get_smu_metrics_data(smu,
989                                                        METRICS_AVERAGE_GFXCLK,
990                                                        (uint32_t *)data);
991                 *(uint32_t *)data *= 100;
992                 *size = 4;
993                 break;
994         case AMDGPU_PP_SENSOR_VDDGFX:
995                 ret = smu_v13_0_0_get_smu_metrics_data(smu,
996                                                        METRICS_VOLTAGE_VDDGFX,
997                                                        (uint32_t *)data);
998                 *size = 4;
999                 break;
1000         case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
1001         default:
1002                 ret = -EOPNOTSUPP;
1003                 break;
1004         }
1005
1006         return ret;
1007 }
1008
1009 static int smu_v13_0_0_get_current_clk_freq_by_table(struct smu_context *smu,
1010                                                      enum smu_clk_type clk_type,
1011                                                      uint32_t *value)
1012 {
1013         MetricsMember_t member_type;
1014         int clk_id = 0;
1015
1016         clk_id = smu_cmn_to_asic_specific_index(smu,
1017                                                 CMN2ASIC_MAPPING_CLK,
1018                                                 clk_type);
1019         if (clk_id < 0)
1020                 return -EINVAL;
1021
1022         switch (clk_id) {
1023         case PPCLK_GFXCLK:
1024                 member_type = METRICS_AVERAGE_GFXCLK;
1025                 break;
1026         case PPCLK_UCLK:
1027                 member_type = METRICS_CURR_UCLK;
1028                 break;
1029         case PPCLK_FCLK:
1030                 member_type = METRICS_CURR_FCLK;
1031                 break;
1032         case PPCLK_SOCCLK:
1033                 member_type = METRICS_CURR_SOCCLK;
1034                 break;
1035         case PPCLK_VCLK_0:
1036                 member_type = METRICS_AVERAGE_VCLK;
1037                 break;
1038         case PPCLK_DCLK_0:
1039                 member_type = METRICS_AVERAGE_DCLK;
1040                 break;
1041         case PPCLK_VCLK_1:
1042                 member_type = METRICS_AVERAGE_VCLK1;
1043                 break;
1044         case PPCLK_DCLK_1:
1045                 member_type = METRICS_AVERAGE_DCLK1;
1046                 break;
1047         default:
1048                 return -EINVAL;
1049         }
1050
1051         return smu_v13_0_0_get_smu_metrics_data(smu,
1052                                                 member_type,
1053                                                 value);
1054 }
1055
1056 static bool smu_v13_0_0_is_od_feature_supported(struct smu_context *smu,
1057                                                 int od_feature_bit)
1058 {
1059         PPTable_t *pptable = smu->smu_table.driver_pptable;
1060         const OverDriveLimits_t * const overdrive_upperlimits =
1061                                 &pptable->SkuTable.OverDriveLimitsBasicMax;
1062
1063         return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit);
1064 }
1065
1066 static void smu_v13_0_0_get_od_setting_limits(struct smu_context *smu,
1067                                               int od_feature_bit,
1068                                               int32_t *min,
1069                                               int32_t *max)
1070 {
1071         PPTable_t *pptable = smu->smu_table.driver_pptable;
1072         const OverDriveLimits_t * const overdrive_upperlimits =
1073                                 &pptable->SkuTable.OverDriveLimitsBasicMax;
1074         const OverDriveLimits_t * const overdrive_lowerlimits =
1075                                 &pptable->SkuTable.OverDriveLimitsMin;
1076         int32_t od_min_setting, od_max_setting;
1077
1078         switch (od_feature_bit) {
1079         case PP_OD_FEATURE_GFXCLK_FMIN:
1080                 od_min_setting = overdrive_lowerlimits->GfxclkFmin;
1081                 od_max_setting = overdrive_upperlimits->GfxclkFmin;
1082                 break;
1083         case PP_OD_FEATURE_GFXCLK_FMAX:
1084                 od_min_setting = overdrive_lowerlimits->GfxclkFmax;
1085                 od_max_setting = overdrive_upperlimits->GfxclkFmax;
1086                 break;
1087         case PP_OD_FEATURE_UCLK_FMIN:
1088                 od_min_setting = overdrive_lowerlimits->UclkFmin;
1089                 od_max_setting = overdrive_upperlimits->UclkFmin;
1090                 break;
1091         case PP_OD_FEATURE_UCLK_FMAX:
1092                 od_min_setting = overdrive_lowerlimits->UclkFmax;
1093                 od_max_setting = overdrive_upperlimits->UclkFmax;
1094                 break;
1095         case PP_OD_FEATURE_GFX_VF_CURVE:
1096                 od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary;
1097                 od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary;
1098                 break;
1099         default:
1100                 od_min_setting = od_max_setting = INT_MAX;
1101                 break;
1102         }
1103
1104         if (min)
1105                 *min = od_min_setting;
1106         if (max)
1107                 *max = od_max_setting;
1108 }
1109
1110 static void smu_v13_0_0_dump_od_table(struct smu_context *smu,
1111                                       OverDriveTableExternal_t *od_table)
1112 {
1113         struct amdgpu_device *adev = smu->adev;
1114
1115         dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin,
1116                                                      od_table->OverDriveTable.GfxclkFmax);
1117         dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin,
1118                                                    od_table->OverDriveTable.UclkFmax);
1119 }
1120
1121 static int smu_v13_0_0_get_overdrive_table(struct smu_context *smu,
1122                                            OverDriveTableExternal_t *od_table)
1123 {
1124         int ret = 0;
1125
1126         ret = smu_cmn_update_table(smu,
1127                                    SMU_TABLE_OVERDRIVE,
1128                                    0,
1129                                    (void *)od_table,
1130                                    false);
1131         if (ret)
1132                 dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
1133
1134         return ret;
1135 }
1136
1137 static int smu_v13_0_0_upload_overdrive_table(struct smu_context *smu,
1138                                               OverDriveTableExternal_t *od_table)
1139 {
1140         int ret = 0;
1141
1142         ret = smu_cmn_update_table(smu,
1143                                    SMU_TABLE_OVERDRIVE,
1144                                    0,
1145                                    (void *)od_table,
1146                                    true);
1147         if (ret)
1148                 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
1149
1150         return ret;
1151 }
1152
1153 static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
1154                                         enum smu_clk_type clk_type,
1155                                         char *buf)
1156 {
1157         struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1158         struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1159         OverDriveTableExternal_t *od_table =
1160                 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
1161         struct smu_13_0_dpm_table *single_dpm_table;
1162         struct smu_13_0_pcie_table *pcie_table;
1163         uint32_t gen_speed, lane_width;
1164         int i, curr_freq, size = 0;
1165         int32_t min_value, max_value;
1166         int ret = 0;
1167
1168         smu_cmn_get_sysfs_buf(&buf, &size);
1169
1170         if (amdgpu_ras_intr_triggered()) {
1171                 size += sysfs_emit_at(buf, size, "unavailable\n");
1172                 return size;
1173         }
1174
1175         switch (clk_type) {
1176         case SMU_SCLK:
1177                 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1178                 break;
1179         case SMU_MCLK:
1180                 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1181                 break;
1182         case SMU_SOCCLK:
1183                 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1184                 break;
1185         case SMU_FCLK:
1186                 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1187                 break;
1188         case SMU_VCLK:
1189         case SMU_VCLK1:
1190                 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1191                 break;
1192         case SMU_DCLK:
1193         case SMU_DCLK1:
1194                 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1195                 break;
1196         default:
1197                 break;
1198         }
1199
1200         switch (clk_type) {
1201         case SMU_SCLK:
1202         case SMU_MCLK:
1203         case SMU_SOCCLK:
1204         case SMU_FCLK:
1205         case SMU_VCLK:
1206         case SMU_VCLK1:
1207         case SMU_DCLK:
1208         case SMU_DCLK1:
1209                 ret = smu_v13_0_0_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
1210                 if (ret) {
1211                         dev_err(smu->adev->dev, "Failed to get current clock freq!");
1212                         return ret;
1213                 }
1214
1215                 if (single_dpm_table->is_fine_grained) {
1216                         /*
1217                          * For fine grained dpms, there are only two dpm levels:
1218                          *   - level 0 -> min clock freq
1219                          *   - level 1 -> max clock freq
1220                          * And the current clock frequency can be any value between them.
1221                          * So, if the current clock frequency is not at level 0 or level 1,
1222                          * we will fake it as three dpm levels:
1223                          *   - level 0 -> min clock freq
1224                          *   - level 1 -> current actual clock freq
1225                          *   - level 2 -> max clock freq
1226                          */
1227                         if ((single_dpm_table->dpm_levels[0].value != curr_freq) &&
1228                              (single_dpm_table->dpm_levels[1].value != curr_freq)) {
1229                                 size += sysfs_emit_at(buf, size, "0: %uMhz\n",
1230                                                 single_dpm_table->dpm_levels[0].value);
1231                                 size += sysfs_emit_at(buf, size, "1: %uMhz *\n",
1232                                                 curr_freq);
1233                                 size += sysfs_emit_at(buf, size, "2: %uMhz\n",
1234                                                 single_dpm_table->dpm_levels[1].value);
1235                         } else {
1236                                 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
1237                                                 single_dpm_table->dpm_levels[0].value,
1238                                                 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : "");
1239                                 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
1240                                                 single_dpm_table->dpm_levels[1].value,
1241                                                 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : "");
1242                         }
1243                 } else {
1244                         for (i = 0; i < single_dpm_table->count; i++)
1245                                 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1246                                                 i, single_dpm_table->dpm_levels[i].value,
1247                                                 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : "");
1248                 }
1249                 break;
1250         case SMU_PCIE:
1251                 ret = smu_v13_0_0_get_smu_metrics_data(smu,
1252                                                        METRICS_PCIE_RATE,
1253                                                        &gen_speed);
1254                 if (ret)
1255                         return ret;
1256
1257                 ret = smu_v13_0_0_get_smu_metrics_data(smu,
1258                                                        METRICS_PCIE_WIDTH,
1259                                                        &lane_width);
1260                 if (ret)
1261                         return ret;
1262
1263                 pcie_table = &(dpm_context->dpm_tables.pcie_table);
1264                 for (i = 0; i < pcie_table->num_of_link_levels; i++)
1265                         size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
1266                                         (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
1267                                         (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
1268                                         (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
1269                                         (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "",
1270                                         (pcie_table->pcie_lane[i] == 1) ? "x1" :
1271                                         (pcie_table->pcie_lane[i] == 2) ? "x2" :
1272                                         (pcie_table->pcie_lane[i] == 3) ? "x4" :
1273                                         (pcie_table->pcie_lane[i] == 4) ? "x8" :
1274                                         (pcie_table->pcie_lane[i] == 5) ? "x12" :
1275                                         (pcie_table->pcie_lane[i] == 6) ? "x16" : "",
1276                                         pcie_table->clk_freq[i],
1277                                         (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
1278                                         (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
1279                                         "*" : "");
1280                 break;
1281
1282         case SMU_OD_SCLK:
1283                 if (!smu_v13_0_0_is_od_feature_supported(smu,
1284                                                          PP_OD_FEATURE_GFXCLK_BIT))
1285                         break;
1286
1287                 size += sysfs_emit_at(buf, size, "OD_SCLK:\n");
1288                 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n",
1289                                         od_table->OverDriveTable.GfxclkFmin,
1290                                         od_table->OverDriveTable.GfxclkFmax);
1291                 break;
1292
1293         case SMU_OD_MCLK:
1294                 if (!smu_v13_0_0_is_od_feature_supported(smu,
1295                                                          PP_OD_FEATURE_UCLK_BIT))
1296                         break;
1297
1298                 size += sysfs_emit_at(buf, size, "OD_MCLK:\n");
1299                 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n",
1300                                         od_table->OverDriveTable.UclkFmin,
1301                                         od_table->OverDriveTable.UclkFmax);
1302                 break;
1303
1304         case SMU_OD_VDDC_CURVE:
1305                 if (!smu_v13_0_0_is_od_feature_supported(smu,
1306                                                          PP_OD_FEATURE_GFX_VF_CURVE_BIT))
1307                         break;
1308
1309                 size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n");
1310                 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
1311                         size += sysfs_emit_at(buf, size, "%d: %dmv\n",
1312                                                 i,
1313                                                 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i]);
1314                 break;
1315
1316         case SMU_OD_RANGE:
1317                 if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) &&
1318                     !smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) &&
1319                     !smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT))
1320                         break;
1321
1322                 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1323
1324                 if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
1325                         smu_v13_0_0_get_od_setting_limits(smu,
1326                                                           PP_OD_FEATURE_GFXCLK_FMIN,
1327                                                           &min_value,
1328                                                           NULL);
1329                         smu_v13_0_0_get_od_setting_limits(smu,
1330                                                           PP_OD_FEATURE_GFXCLK_FMAX,
1331                                                           NULL,
1332                                                           &max_value);
1333                         size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
1334                                               min_value, max_value);
1335                 }
1336
1337                 if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
1338                         smu_v13_0_0_get_od_setting_limits(smu,
1339                                                           PP_OD_FEATURE_UCLK_FMIN,
1340                                                           &min_value,
1341                                                           NULL);
1342                         smu_v13_0_0_get_od_setting_limits(smu,
1343                                                           PP_OD_FEATURE_UCLK_FMAX,
1344                                                           NULL,
1345                                                           &max_value);
1346                         size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
1347                                               min_value, max_value);
1348                 }
1349
1350                 if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
1351                         smu_v13_0_0_get_od_setting_limits(smu,
1352                                                           PP_OD_FEATURE_GFX_VF_CURVE,
1353                                                           &min_value,
1354                                                           &max_value);
1355                         size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n",
1356                                               min_value, max_value);
1357                 }
1358                 break;
1359
1360         default:
1361                 break;
1362         }
1363
1364         return size;
1365 }
1366
1367 static int smu_v13_0_0_od_edit_dpm_table(struct smu_context *smu,
1368                                          enum PP_OD_DPM_TABLE_COMMAND type,
1369                                          long input[],
1370                                          uint32_t size)
1371 {
1372         struct smu_table_context *table_context = &smu->smu_table;
1373         OverDriveTableExternal_t *od_table =
1374                 (OverDriveTableExternal_t *)table_context->overdrive_table;
1375         struct amdgpu_device *adev = smu->adev;
1376         uint32_t offset_of_voltageoffset;
1377         int32_t minimum, maximum;
1378         uint32_t feature_ctrlmask;
1379         int i, ret = 0;
1380
1381         switch (type) {
1382         case PP_OD_EDIT_SCLK_VDDC_TABLE:
1383                 if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) {
1384                         dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n");
1385                         return -ENOTSUPP;
1386                 }
1387
1388                 for (i = 0; i < size; i += 2) {
1389                         if (i + 2 > size) {
1390                                 dev_info(adev->dev, "invalid number of input parameters %d\n", size);
1391                                 return -EINVAL;
1392                         }
1393
1394                         switch (input[i]) {
1395                         case 0:
1396                                 smu_v13_0_0_get_od_setting_limits(smu,
1397                                                                   PP_OD_FEATURE_GFXCLK_FMIN,
1398                                                                   &minimum,
1399                                                                   &maximum);
1400                                 if (input[i + 1] < minimum ||
1401                                     input[i + 1] > maximum) {
1402                                         dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n",
1403                                                 input[i + 1], minimum, maximum);
1404                                         return -EINVAL;
1405                                 }
1406
1407                                 od_table->OverDriveTable.GfxclkFmin = input[i + 1];
1408                                 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
1409                                 break;
1410
1411                         case 1:
1412                                 smu_v13_0_0_get_od_setting_limits(smu,
1413                                                                   PP_OD_FEATURE_GFXCLK_FMAX,
1414                                                                   &minimum,
1415                                                                   &maximum);
1416                                 if (input[i + 1] < minimum ||
1417                                     input[i + 1] > maximum) {
1418                                         dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n",
1419                                                 input[i + 1], minimum, maximum);
1420                                         return -EINVAL;
1421                                 }
1422
1423                                 od_table->OverDriveTable.GfxclkFmax = input[i + 1];
1424                                 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT;
1425                                 break;
1426
1427                         default:
1428                                 dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
1429                                 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
1430                                 return -EINVAL;
1431                         }
1432                 }
1433
1434                 if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) {
1435                         dev_err(adev->dev,
1436                                 "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n",
1437                                 (uint32_t)od_table->OverDriveTable.GfxclkFmin,
1438                                 (uint32_t)od_table->OverDriveTable.GfxclkFmax);
1439                         return -EINVAL;
1440                 }
1441                 break;
1442
1443         case PP_OD_EDIT_MCLK_VDDC_TABLE:
1444                 if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) {
1445                         dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n");
1446                         return -ENOTSUPP;
1447                 }
1448
1449                 for (i = 0; i < size; i += 2) {
1450                         if (i + 2 > size) {
1451                                 dev_info(adev->dev, "invalid number of input parameters %d\n", size);
1452                                 return -EINVAL;
1453                         }
1454
1455                         switch (input[i]) {
1456                         case 0:
1457                                 smu_v13_0_0_get_od_setting_limits(smu,
1458                                                                   PP_OD_FEATURE_UCLK_FMIN,
1459                                                                   &minimum,
1460                                                                   &maximum);
1461                                 if (input[i + 1] < minimum ||
1462                                     input[i + 1] > maximum) {
1463                                         dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n",
1464                                                 input[i + 1], minimum, maximum);
1465                                         return -EINVAL;
1466                                 }
1467
1468                                 od_table->OverDriveTable.UclkFmin = input[i + 1];
1469                                 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
1470                                 break;
1471
1472                         case 1:
1473                                 smu_v13_0_0_get_od_setting_limits(smu,
1474                                                                   PP_OD_FEATURE_UCLK_FMAX,
1475                                                                   &minimum,
1476                                                                   &maximum);
1477                                 if (input[i + 1] < minimum ||
1478                                     input[i + 1] > maximum) {
1479                                         dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n",
1480                                                 input[i + 1], minimum, maximum);
1481                                         return -EINVAL;
1482                                 }
1483
1484                                 od_table->OverDriveTable.UclkFmax = input[i + 1];
1485                                 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT;
1486                                 break;
1487
1488                         default:
1489                                 dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]);
1490                                 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n");
1491                                 return -EINVAL;
1492                         }
1493                 }
1494
1495                 if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) {
1496                         dev_err(adev->dev,
1497                                 "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n",
1498                                 (uint32_t)od_table->OverDriveTable.UclkFmin,
1499                                 (uint32_t)od_table->OverDriveTable.UclkFmax);
1500                         return -EINVAL;
1501                 }
1502                 break;
1503
1504         case PP_OD_EDIT_VDDC_CURVE:
1505                 if (!smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) {
1506                         dev_warn(adev->dev, "VF curve setting not supported!\n");
1507                         return -ENOTSUPP;
1508                 }
1509
1510                 if (input[0] >= PP_NUM_OD_VF_CURVE_POINTS ||
1511                     input[0] < 0)
1512                         return -EINVAL;
1513
1514                 smu_v13_0_0_get_od_setting_limits(smu,
1515                                                   PP_OD_FEATURE_GFX_VF_CURVE,
1516                                                   &minimum,
1517                                                   &maximum);
1518                 if (input[1] < minimum ||
1519                     input[1] > maximum) {
1520                         dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n",
1521                                  input[1], minimum, maximum);
1522                         return -EINVAL;
1523                 }
1524
1525                 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[input[0]] = input[1];
1526                 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT;
1527                 break;
1528
1529         case PP_OD_RESTORE_DEFAULT_TABLE:
1530                 feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask;
1531                 memcpy(od_table,
1532                        table_context->boot_overdrive_table,
1533                        sizeof(OverDriveTableExternal_t));
1534                 od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask;
1535                 fallthrough;
1536
1537         case PP_OD_COMMIT_DPM_TABLE:
1538                 /*
1539                  * The member below instructs PMFW the settings focused in
1540                  * this single operation.
1541                  * `uint32_t FeatureCtrlMask;`
1542                  * It does not contain actual informations about user's custom
1543                  * settings. Thus we do not cache it.
1544                  */
1545                 offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary);
1546                 if (memcmp((u8 *)od_table + offset_of_voltageoffset,
1547                            table_context->user_overdrive_table + offset_of_voltageoffset,
1548                            sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) {
1549                         smu_v13_0_0_dump_od_table(smu, od_table);
1550
1551                         ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
1552                         if (ret) {
1553                                 dev_err(adev->dev, "Failed to upload overdrive table!\n");
1554                                 return ret;
1555                         }
1556
1557                         od_table->OverDriveTable.FeatureCtrlMask = 0;
1558                         memcpy(table_context->user_overdrive_table + offset_of_voltageoffset,
1559                                (u8 *)od_table + offset_of_voltageoffset,
1560                                sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset);
1561
1562                         if (!memcmp(table_context->user_overdrive_table,
1563                                     table_context->boot_overdrive_table,
1564                                     sizeof(OverDriveTableExternal_t)))
1565                                 smu->user_dpm_profile.user_od = false;
1566                         else
1567                                 smu->user_dpm_profile.user_od = true;
1568                 }
1569                 break;
1570
1571         default:
1572                 return -ENOSYS;
1573         }
1574
1575         return ret;
1576 }
1577
1578 static int smu_v13_0_0_force_clk_levels(struct smu_context *smu,
1579                                         enum smu_clk_type clk_type,
1580                                         uint32_t mask)
1581 {
1582         struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1583         struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1584         struct smu_13_0_dpm_table *single_dpm_table;
1585         uint32_t soft_min_level, soft_max_level;
1586         uint32_t min_freq, max_freq;
1587         int ret = 0;
1588
1589         soft_min_level = mask ? (ffs(mask) - 1) : 0;
1590         soft_max_level = mask ? (fls(mask) - 1) : 0;
1591
1592         switch (clk_type) {
1593         case SMU_GFXCLK:
1594         case SMU_SCLK:
1595                 single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1596                 break;
1597         case SMU_MCLK:
1598         case SMU_UCLK:
1599                 single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1600                 break;
1601         case SMU_SOCCLK:
1602                 single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1603                 break;
1604         case SMU_FCLK:
1605                 single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1606                 break;
1607         case SMU_VCLK:
1608         case SMU_VCLK1:
1609                 single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1610                 break;
1611         case SMU_DCLK:
1612         case SMU_DCLK1:
1613                 single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1614                 break;
1615         default:
1616                 break;
1617         }
1618
1619         switch (clk_type) {
1620         case SMU_GFXCLK:
1621         case SMU_SCLK:
1622         case SMU_MCLK:
1623         case SMU_UCLK:
1624         case SMU_SOCCLK:
1625         case SMU_FCLK:
1626         case SMU_VCLK:
1627         case SMU_VCLK1:
1628         case SMU_DCLK:
1629         case SMU_DCLK1:
1630                 if (single_dpm_table->is_fine_grained) {
1631                         /* There is only 2 levels for fine grained DPM */
1632                         soft_max_level = (soft_max_level >= 1 ? 1 : 0);
1633                         soft_min_level = (soft_min_level >= 1 ? 1 : 0);
1634                 } else {
1635                         if ((soft_max_level >= single_dpm_table->count) ||
1636                             (soft_min_level >= single_dpm_table->count))
1637                                 return -EINVAL;
1638                 }
1639
1640                 min_freq = single_dpm_table->dpm_levels[soft_min_level].value;
1641                 max_freq = single_dpm_table->dpm_levels[soft_max_level].value;
1642
1643                 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1644                                                             clk_type,
1645                                                             min_freq,
1646                                                             max_freq);
1647                 break;
1648         case SMU_DCEFCLK:
1649         case SMU_PCIE:
1650         default:
1651                 break;
1652         }
1653
1654         return ret;
1655 }
1656
1657 static const struct smu_temperature_range smu13_thermal_policy[] = {
1658         {-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
1659         { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
1660 };
1661
1662 static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
1663                                                      struct smu_temperature_range *range)
1664 {
1665         struct smu_table_context *table_context = &smu->smu_table;
1666         struct smu_13_0_0_powerplay_table *powerplay_table =
1667                 table_context->power_play_table;
1668         PPTable_t *pptable = smu->smu_table.driver_pptable;
1669
1670         if (amdgpu_sriov_vf(smu->adev))
1671                 return 0;
1672
1673         if (!range)
1674                 return -EINVAL;
1675
1676         memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range));
1677
1678         range->max = pptable->SkuTable.TemperatureLimit[TEMP_EDGE] *
1679                 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1680         range->edge_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) *
1681                 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1682         range->hotspot_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] *
1683                 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1684         range->hotspot_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) *
1685                 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1686         range->mem_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_MEM] *
1687                 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1688         range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)*
1689                 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
1690         range->software_shutdown_temp = powerplay_table->software_shutdown_temp;
1691         range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset;
1692
1693         return 0;
1694 }
1695
1696 #define MAX(a, b)       ((a) > (b) ? (a) : (b))
1697 static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
1698                                            void **table)
1699 {
1700         struct smu_table_context *smu_table = &smu->smu_table;
1701         struct gpu_metrics_v1_3 *gpu_metrics =
1702                 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
1703         SmuMetricsExternal_t metrics_ext;
1704         SmuMetrics_t *metrics = &metrics_ext.SmuMetrics;
1705         int ret = 0;
1706
1707         ret = smu_cmn_get_metrics_table(smu,
1708                                         &metrics_ext,
1709                                         true);
1710         if (ret)
1711                 return ret;
1712
1713         smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
1714
1715         gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE];
1716         gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT];
1717         gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM];
1718         gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX];
1719         gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC];
1720         gpu_metrics->temperature_vrmem = MAX(metrics->AvgTemperature[TEMP_VR_MEM0],
1721                                              metrics->AvgTemperature[TEMP_VR_MEM1]);
1722
1723         gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
1724         gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
1725         gpu_metrics->average_mm_activity = MAX(metrics->Vcn0ActivityPercentage,
1726                                                metrics->Vcn1ActivityPercentage);
1727
1728         gpu_metrics->average_socket_power = metrics->AverageSocketPower;
1729         gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
1730
1731         if (metrics->AverageGfxActivity <= SMU_13_0_0_BUSY_THRESHOLD)
1732                 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
1733         else
1734                 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
1735
1736         if (metrics->AverageUclkActivity <= SMU_13_0_0_BUSY_THRESHOLD)
1737                 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs;
1738         else
1739                 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs;
1740
1741         gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
1742         gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
1743         gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
1744         gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
1745
1746         gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
1747         gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
1748         gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
1749         gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
1750         gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
1751         gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1];
1752         gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1];
1753
1754         gpu_metrics->throttle_status =
1755                         smu_v13_0_get_throttler_status(metrics);
1756         gpu_metrics->indep_throttle_status =
1757                         smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
1758                                                            smu_v13_0_0_throttler_map);
1759
1760         gpu_metrics->current_fan_speed = metrics->AvgFanRpm;
1761
1762         gpu_metrics->pcie_link_width = metrics->PcieWidth;
1763         gpu_metrics->pcie_link_speed = metrics->PcieRate;
1764
1765         gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1766
1767         gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_GFX];
1768         gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_SOC];
1769         gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VMEMP];
1770
1771         *table = (void *)gpu_metrics;
1772
1773         return sizeof(struct gpu_metrics_v1_3);
1774 }
1775
1776 static int smu_v13_0_0_set_default_od_settings(struct smu_context *smu)
1777 {
1778         OverDriveTableExternal_t *od_table =
1779                 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
1780         OverDriveTableExternal_t *boot_od_table =
1781                 (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table;
1782         OverDriveTableExternal_t *user_od_table =
1783                 (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table;
1784         OverDriveTableExternal_t user_od_table_bak;
1785         int ret = 0;
1786         int i;
1787
1788         ret = smu_v13_0_0_get_overdrive_table(smu, boot_od_table);
1789         if (ret)
1790                 return ret;
1791
1792         smu_v13_0_0_dump_od_table(smu, boot_od_table);
1793
1794         memcpy(od_table,
1795                boot_od_table,
1796                sizeof(OverDriveTableExternal_t));
1797
1798         /*
1799          * For S3/S4/Runpm resume, we need to setup those overdrive tables again,
1800          * but we have to preserve user defined values in "user_od_table".
1801          */
1802         if (!smu->adev->in_suspend) {
1803                 memcpy(user_od_table,
1804                        boot_od_table,
1805                        sizeof(OverDriveTableExternal_t));
1806                 smu->user_dpm_profile.user_od = false;
1807         } else if (smu->user_dpm_profile.user_od) {
1808                 memcpy(&user_od_table_bak,
1809                        user_od_table,
1810                        sizeof(OverDriveTableExternal_t));
1811                 memcpy(user_od_table,
1812                        boot_od_table,
1813                        sizeof(OverDriveTableExternal_t));
1814                 user_od_table->OverDriveTable.GfxclkFmin =
1815                                 user_od_table_bak.OverDriveTable.GfxclkFmin;
1816                 user_od_table->OverDriveTable.GfxclkFmax =
1817                                 user_od_table_bak.OverDriveTable.GfxclkFmax;
1818                 user_od_table->OverDriveTable.UclkFmin =
1819                                 user_od_table_bak.OverDriveTable.UclkFmin;
1820                 user_od_table->OverDriveTable.UclkFmax =
1821                                 user_od_table_bak.OverDriveTable.UclkFmax;
1822                 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++)
1823                         user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] =
1824                                 user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i];
1825         }
1826
1827         return 0;
1828 }
1829
1830 static int smu_v13_0_0_restore_user_od_settings(struct smu_context *smu)
1831 {
1832         struct smu_table_context *table_context = &smu->smu_table;
1833         OverDriveTableExternal_t *od_table = table_context->overdrive_table;
1834         OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table;
1835         int res;
1836
1837         user_od_table->OverDriveTable.FeatureCtrlMask = 1U << PP_OD_FEATURE_GFXCLK_BIT |
1838                                                         1U << PP_OD_FEATURE_UCLK_BIT |
1839                                                         1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT;
1840         res = smu_v13_0_0_upload_overdrive_table(smu, user_od_table);
1841         user_od_table->OverDriveTable.FeatureCtrlMask = 0;
1842         if (res == 0)
1843                 memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t));
1844
1845         return res;
1846 }
1847
1848 static int smu_v13_0_0_populate_umd_state_clk(struct smu_context *smu)
1849 {
1850         struct smu_13_0_dpm_context *dpm_context =
1851                                 smu->smu_dpm.dpm_context;
1852         struct smu_13_0_dpm_table *gfx_table =
1853                                 &dpm_context->dpm_tables.gfx_table;
1854         struct smu_13_0_dpm_table *mem_table =
1855                                 &dpm_context->dpm_tables.uclk_table;
1856         struct smu_13_0_dpm_table *soc_table =
1857                                 &dpm_context->dpm_tables.soc_table;
1858         struct smu_13_0_dpm_table *vclk_table =
1859                                 &dpm_context->dpm_tables.vclk_table;
1860         struct smu_13_0_dpm_table *dclk_table =
1861                                 &dpm_context->dpm_tables.dclk_table;
1862         struct smu_13_0_dpm_table *fclk_table =
1863                                 &dpm_context->dpm_tables.fclk_table;
1864         struct smu_umd_pstate_table *pstate_table =
1865                                 &smu->pstate_table;
1866         struct smu_table_context *table_context = &smu->smu_table;
1867         PPTable_t *pptable = table_context->driver_pptable;
1868         DriverReportedClocks_t driver_clocks =
1869                         pptable->SkuTable.DriverReportedClocks;
1870
1871         pstate_table->gfxclk_pstate.min = gfx_table->min;
1872         if (driver_clocks.GameClockAc &&
1873             (driver_clocks.GameClockAc < gfx_table->max))
1874                 pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc;
1875         else
1876                 pstate_table->gfxclk_pstate.peak = gfx_table->max;
1877
1878         pstate_table->uclk_pstate.min = mem_table->min;
1879         pstate_table->uclk_pstate.peak = mem_table->max;
1880
1881         pstate_table->socclk_pstate.min = soc_table->min;
1882         pstate_table->socclk_pstate.peak = soc_table->max;
1883
1884         pstate_table->vclk_pstate.min = vclk_table->min;
1885         pstate_table->vclk_pstate.peak = vclk_table->max;
1886
1887         pstate_table->dclk_pstate.min = dclk_table->min;
1888         pstate_table->dclk_pstate.peak = dclk_table->max;
1889
1890         pstate_table->fclk_pstate.min = fclk_table->min;
1891         pstate_table->fclk_pstate.peak = fclk_table->max;
1892
1893         if (driver_clocks.BaseClockAc &&
1894             driver_clocks.BaseClockAc < gfx_table->max)
1895                 pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc;
1896         else
1897                 pstate_table->gfxclk_pstate.standard = gfx_table->max;
1898         pstate_table->uclk_pstate.standard = mem_table->max;
1899         pstate_table->socclk_pstate.standard = soc_table->min;
1900         pstate_table->vclk_pstate.standard = vclk_table->min;
1901         pstate_table->dclk_pstate.standard = dclk_table->min;
1902         pstate_table->fclk_pstate.standard = fclk_table->min;
1903
1904         return 0;
1905 }
1906
1907 static void smu_v13_0_0_get_unique_id(struct smu_context *smu)
1908 {
1909         struct smu_table_context *smu_table = &smu->smu_table;
1910         SmuMetrics_t *metrics =
1911                 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
1912         struct amdgpu_device *adev = smu->adev;
1913         uint32_t upper32 = 0, lower32 = 0;
1914         int ret;
1915
1916         ret = smu_cmn_get_metrics_table(smu, NULL, false);
1917         if (ret)
1918                 goto out;
1919
1920         upper32 = metrics->PublicSerialNumberUpper;
1921         lower32 = metrics->PublicSerialNumberLower;
1922
1923 out:
1924         adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
1925         if (adev->serial[0] == '\0')
1926                 sprintf(adev->serial, "%016llx", adev->unique_id);
1927 }
1928
1929 static int smu_v13_0_0_get_fan_speed_pwm(struct smu_context *smu,
1930                                          uint32_t *speed)
1931 {
1932         int ret;
1933
1934         if (!speed)
1935                 return -EINVAL;
1936
1937         ret = smu_v13_0_0_get_smu_metrics_data(smu,
1938                                                METRICS_CURR_FANPWM,
1939                                                speed);
1940         if (ret) {
1941                 dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!");
1942                 return ret;
1943         }
1944
1945         /* Convert the PMFW output which is in percent to pwm(255) based */
1946         *speed = MIN(*speed * 255 / 100, 255);
1947
1948         return 0;
1949 }
1950
1951 static int smu_v13_0_0_get_fan_speed_rpm(struct smu_context *smu,
1952                                          uint32_t *speed)
1953 {
1954         if (!speed)
1955                 return -EINVAL;
1956
1957         return smu_v13_0_0_get_smu_metrics_data(smu,
1958                                                 METRICS_CURR_FANSPEED,
1959                                                 speed);
1960 }
1961
1962 static int smu_v13_0_0_enable_mgpu_fan_boost(struct smu_context *smu)
1963 {
1964         struct smu_table_context *table_context = &smu->smu_table;
1965         PPTable_t *pptable = table_context->driver_pptable;
1966         SkuTable_t *skutable = &pptable->SkuTable;
1967
1968         /*
1969          * Skip the MGpuFanBoost setting for those ASICs
1970          * which do not support it
1971          */
1972         if (skutable->MGpuAcousticLimitRpmThreshold == 0)
1973                 return 0;
1974
1975         return smu_cmn_send_smc_msg_with_param(smu,
1976                                                SMU_MSG_SetMGpuFanBoostLimitRpm,
1977                                                0,
1978                                                NULL);
1979 }
1980
1981 static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
1982                                        uint32_t *current_power_limit,
1983                                        uint32_t *default_power_limit,
1984                                        uint32_t *max_power_limit)
1985 {
1986         struct smu_table_context *table_context = &smu->smu_table;
1987         struct smu_13_0_0_powerplay_table *powerplay_table =
1988                 (struct smu_13_0_0_powerplay_table *)table_context->power_play_table;
1989         PPTable_t *pptable = table_context->driver_pptable;
1990         SkuTable_t *skutable = &pptable->SkuTable;
1991         uint32_t power_limit, od_percent;
1992
1993         if (smu_v13_0_get_current_power_limit(smu, &power_limit))
1994                 power_limit = smu->adev->pm.ac_power ?
1995                               skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] :
1996                               skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0];
1997
1998         if (current_power_limit)
1999                 *current_power_limit = power_limit;
2000         if (default_power_limit)
2001                 *default_power_limit = power_limit;
2002
2003         if (max_power_limit) {
2004                 if (smu->od_enabled) {
2005                         od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
2006
2007                         dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit);
2008
2009                         power_limit *= (100 + od_percent);
2010                         power_limit /= 100;
2011                 }
2012                 *max_power_limit = power_limit;
2013         }
2014
2015         return 0;
2016 }
2017
2018 static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu,
2019                                               char *buf)
2020 {
2021         DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
2022         DpmActivityMonitorCoeffInt_t *activity_monitor =
2023                 &(activity_monitor_external.DpmActivityMonitorCoeffInt);
2024         static const char *title[] = {
2025                         "PROFILE_INDEX(NAME)",
2026                         "CLOCK_TYPE(NAME)",
2027                         "FPS",
2028                         "MinActiveFreqType",
2029                         "MinActiveFreq",
2030                         "BoosterFreqType",
2031                         "BoosterFreq",
2032                         "PD_Data_limit_c",
2033                         "PD_Data_error_coeff",
2034                         "PD_Data_error_rate_coeff"};
2035         int16_t workload_type = 0;
2036         uint32_t i, size = 0;
2037         int result = 0;
2038
2039         if (!buf)
2040                 return -EINVAL;
2041
2042         size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n",
2043                         title[0], title[1], title[2], title[3], title[4], title[5],
2044                         title[6], title[7], title[8], title[9]);
2045
2046         for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
2047                 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
2048                 workload_type = smu_cmn_to_asic_specific_index(smu,
2049                                                                CMN2ASIC_MAPPING_WORKLOAD,
2050                                                                i);
2051                 if (workload_type == -ENOTSUPP)
2052                         continue;
2053                 else if (workload_type < 0)
2054                         return -EINVAL;
2055
2056                 result = smu_cmn_update_table(smu,
2057                                               SMU_TABLE_ACTIVITY_MONITOR_COEFF,
2058                                               workload_type,
2059                                               (void *)(&activity_monitor_external),
2060                                               false);
2061                 if (result) {
2062                         dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
2063                         return result;
2064                 }
2065
2066                 size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
2067                         i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
2068
2069                 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
2070                         " ",
2071                         0,
2072                         "GFXCLK",
2073                         activity_monitor->Gfx_FPS,
2074                         activity_monitor->Gfx_MinActiveFreqType,
2075                         activity_monitor->Gfx_MinActiveFreq,
2076                         activity_monitor->Gfx_BoosterFreqType,
2077                         activity_monitor->Gfx_BoosterFreq,
2078                         activity_monitor->Gfx_PD_Data_limit_c,
2079                         activity_monitor->Gfx_PD_Data_error_coeff,
2080                         activity_monitor->Gfx_PD_Data_error_rate_coeff);
2081
2082                 size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
2083                         " ",
2084                         1,
2085                         "FCLK",
2086                         activity_monitor->Fclk_FPS,
2087                         activity_monitor->Fclk_MinActiveFreqType,
2088                         activity_monitor->Fclk_MinActiveFreq,
2089                         activity_monitor->Fclk_BoosterFreqType,
2090                         activity_monitor->Fclk_BoosterFreq,
2091                         activity_monitor->Fclk_PD_Data_limit_c,
2092                         activity_monitor->Fclk_PD_Data_error_coeff,
2093                         activity_monitor->Fclk_PD_Data_error_rate_coeff);
2094         }
2095
2096         return size;
2097 }
2098
2099 static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
2100                                               long *input,
2101                                               uint32_t size)
2102 {
2103         DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
2104         DpmActivityMonitorCoeffInt_t *activity_monitor =
2105                 &(activity_monitor_external.DpmActivityMonitorCoeffInt);
2106         int workload_type, ret = 0;
2107
2108         smu->power_profile_mode = input[size];
2109
2110         if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
2111                 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
2112                 return -EINVAL;
2113         }
2114
2115         if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
2116                 ret = smu_cmn_update_table(smu,
2117                                            SMU_TABLE_ACTIVITY_MONITOR_COEFF,
2118                                            WORKLOAD_PPLIB_CUSTOM_BIT,
2119                                            (void *)(&activity_monitor_external),
2120                                            false);
2121                 if (ret) {
2122                         dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
2123                         return ret;
2124                 }
2125
2126                 switch (input[0]) {
2127                 case 0: /* Gfxclk */
2128                         activity_monitor->Gfx_FPS = input[1];
2129                         activity_monitor->Gfx_MinActiveFreqType = input[2];
2130                         activity_monitor->Gfx_MinActiveFreq = input[3];
2131                         activity_monitor->Gfx_BoosterFreqType = input[4];
2132                         activity_monitor->Gfx_BoosterFreq = input[5];
2133                         activity_monitor->Gfx_PD_Data_limit_c = input[6];
2134                         activity_monitor->Gfx_PD_Data_error_coeff = input[7];
2135                         activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
2136                         break;
2137                 case 1: /* Fclk */
2138                         activity_monitor->Fclk_FPS = input[1];
2139                         activity_monitor->Fclk_MinActiveFreqType = input[2];
2140                         activity_monitor->Fclk_MinActiveFreq = input[3];
2141                         activity_monitor->Fclk_BoosterFreqType = input[4];
2142                         activity_monitor->Fclk_BoosterFreq = input[5];
2143                         activity_monitor->Fclk_PD_Data_limit_c = input[6];
2144                         activity_monitor->Fclk_PD_Data_error_coeff = input[7];
2145                         activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
2146                         break;
2147                 }
2148
2149                 ret = smu_cmn_update_table(smu,
2150                                            SMU_TABLE_ACTIVITY_MONITOR_COEFF,
2151                                            WORKLOAD_PPLIB_CUSTOM_BIT,
2152                                            (void *)(&activity_monitor_external),
2153                                            true);
2154                 if (ret) {
2155                         dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
2156                         return ret;
2157                 }
2158         }
2159
2160         if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
2161                 (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) ||
2162                 ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
2163                 ret = smu_cmn_update_table(smu,
2164                                            SMU_TABLE_ACTIVITY_MONITOR_COEFF,
2165                                            WORKLOAD_PPLIB_COMPUTE_BIT,
2166                                            (void *)(&activity_monitor_external),
2167                                            false);
2168                 if (ret) {
2169                         dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
2170                         return ret;
2171                 }
2172
2173                 ret = smu_cmn_update_table(smu,
2174                                            SMU_TABLE_ACTIVITY_MONITOR_COEFF,
2175                                            WORKLOAD_PPLIB_CUSTOM_BIT,
2176                                            (void *)(&activity_monitor_external),
2177                                            true);
2178                 if (ret) {
2179                         dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
2180                         return ret;
2181                 }
2182
2183                 workload_type = smu_cmn_to_asic_specific_index(smu,
2184                                                        CMN2ASIC_MAPPING_WORKLOAD,
2185                                                        PP_SMC_POWER_PROFILE_CUSTOM);
2186         } else {
2187                 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
2188                 workload_type = smu_cmn_to_asic_specific_index(smu,
2189                                                        CMN2ASIC_MAPPING_WORKLOAD,
2190                                                        smu->power_profile_mode);
2191         }
2192
2193         if (workload_type < 0)
2194                 return -EINVAL;
2195
2196         return smu_cmn_send_smc_msg_with_param(smu,
2197                                                SMU_MSG_SetWorkloadMask,
2198                                                1 << workload_type,
2199                                                NULL);
2200 }
2201
2202 static int smu_v13_0_0_baco_enter(struct smu_context *smu)
2203 {
2204         struct smu_baco_context *smu_baco = &smu->smu_baco;
2205         struct amdgpu_device *adev = smu->adev;
2206
2207         if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
2208                 return smu_v13_0_baco_set_armd3_sequence(smu,
2209                                 smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
2210         else
2211                 return smu_v13_0_baco_enter(smu);
2212 }
2213
2214 static int smu_v13_0_0_baco_exit(struct smu_context *smu)
2215 {
2216         struct amdgpu_device *adev = smu->adev;
2217
2218         if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
2219                 /* Wait for PMFW handling for the Dstate change */
2220                 usleep_range(10000, 11000);
2221                 return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
2222         } else {
2223                 return smu_v13_0_baco_exit(smu);
2224         }
2225 }
2226
2227 static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
2228 {
2229         struct amdgpu_device *adev = smu->adev;
2230         u32 smu_version;
2231
2232         /* SRIOV does not support SMU mode1 reset */
2233         if (amdgpu_sriov_vf(adev))
2234                 return false;
2235
2236         /* PMFW support is available since 78.41 */
2237         smu_cmn_get_smc_version(smu, NULL, &smu_version);
2238         if (smu_version < 0x004e2900)
2239                 return false;
2240
2241         return true;
2242 }
2243
2244 static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,
2245                                    struct i2c_msg *msg, int num_msgs)
2246 {
2247         struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
2248         struct amdgpu_device *adev = smu_i2c->adev;
2249         struct smu_context *smu = adev->powerplay.pp_handle;
2250         struct smu_table_context *smu_table = &smu->smu_table;
2251         struct smu_table *table = &smu_table->driver_table;
2252         SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
2253         int i, j, r, c;
2254         u16 dir;
2255
2256         if (!adev->pm.dpm_enabled)
2257                 return -EBUSY;
2258
2259         req = kzalloc(sizeof(*req), GFP_KERNEL);
2260         if (!req)
2261                 return -ENOMEM;
2262
2263         req->I2CcontrollerPort = smu_i2c->port;
2264         req->I2CSpeed = I2C_SPEED_FAST_400K;
2265         req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
2266         dir = msg[0].flags & I2C_M_RD;
2267
2268         for (c = i = 0; i < num_msgs; i++) {
2269                 for (j = 0; j < msg[i].len; j++, c++) {
2270                         SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
2271
2272                         if (!(msg[i].flags & I2C_M_RD)) {
2273                                 /* write */
2274                                 cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
2275                                 cmd->ReadWriteData = msg[i].buf[j];
2276                         }
2277
2278                         if ((dir ^ msg[i].flags) & I2C_M_RD) {
2279                                 /* The direction changes.
2280                                  */
2281                                 dir = msg[i].flags & I2C_M_RD;
2282                                 cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
2283                         }
2284
2285                         req->NumCmds++;
2286
2287                         /*
2288                          * Insert STOP if we are at the last byte of either last
2289                          * message for the transaction or the client explicitly
2290                          * requires a STOP at this particular message.
2291                          */
2292                         if ((j == msg[i].len - 1) &&
2293                             ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
2294                                 cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
2295                                 cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
2296                         }
2297                 }
2298         }
2299         mutex_lock(&adev->pm.mutex);
2300         r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
2301         if (r)
2302                 goto fail;
2303
2304         for (c = i = 0; i < num_msgs; i++) {
2305                 if (!(msg[i].flags & I2C_M_RD)) {
2306                         c += msg[i].len;
2307                         continue;
2308                 }
2309                 for (j = 0; j < msg[i].len; j++, c++) {
2310                         SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
2311
2312                         msg[i].buf[j] = cmd->ReadWriteData;
2313                 }
2314         }
2315         r = num_msgs;
2316 fail:
2317         mutex_unlock(&adev->pm.mutex);
2318         kfree(req);
2319         return r;
2320 }
2321
2322 static u32 smu_v13_0_0_i2c_func(struct i2c_adapter *adap)
2323 {
2324         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
2325 }
2326
2327 static const struct i2c_algorithm smu_v13_0_0_i2c_algo = {
2328         .master_xfer = smu_v13_0_0_i2c_xfer,
2329         .functionality = smu_v13_0_0_i2c_func,
2330 };
2331
2332 static const struct i2c_adapter_quirks smu_v13_0_0_i2c_control_quirks = {
2333         .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
2334         .max_read_len  = MAX_SW_I2C_COMMANDS,
2335         .max_write_len = MAX_SW_I2C_COMMANDS,
2336         .max_comb_1st_msg_len = 2,
2337         .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
2338 };
2339
2340 static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
2341 {
2342         struct amdgpu_device *adev = smu->adev;
2343         int res, i;
2344
2345         for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
2346                 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2347                 struct i2c_adapter *control = &smu_i2c->adapter;
2348
2349                 smu_i2c->adev = adev;
2350                 smu_i2c->port = i;
2351                 mutex_init(&smu_i2c->mutex);
2352                 control->owner = THIS_MODULE;
2353                 control->class = I2C_CLASS_SPD;
2354                 control->dev.parent = &adev->pdev->dev;
2355                 control->algo = &smu_v13_0_0_i2c_algo;
2356                 snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
2357                 control->quirks = &smu_v13_0_0_i2c_control_quirks;
2358                 i2c_set_adapdata(control, smu_i2c);
2359
2360                 res = i2c_add_adapter(control);
2361                 if (res) {
2362                         DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
2363                         goto Out_err;
2364                 }
2365         }
2366
2367         /* assign the buses used for the FRU EEPROM and RAS EEPROM */
2368         /* XXX ideally this would be something in a vbios data table */
2369         adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
2370         adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
2371
2372         return 0;
2373 Out_err:
2374         for ( ; i >= 0; i--) {
2375                 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2376                 struct i2c_adapter *control = &smu_i2c->adapter;
2377
2378                 i2c_del_adapter(control);
2379         }
2380         return res;
2381 }
2382
2383 static void smu_v13_0_0_i2c_control_fini(struct smu_context *smu)
2384 {
2385         struct amdgpu_device *adev = smu->adev;
2386         int i;
2387
2388         for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
2389                 struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
2390                 struct i2c_adapter *control = &smu_i2c->adapter;
2391
2392                 i2c_del_adapter(control);
2393         }
2394         adev->pm.ras_eeprom_i2c_bus = NULL;
2395         adev->pm.fru_eeprom_i2c_bus = NULL;
2396 }
2397
2398 static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
2399                                      enum pp_mp1_state mp1_state)
2400 {
2401         int ret;
2402
2403         switch (mp1_state) {
2404         case PP_MP1_STATE_UNLOAD:
2405                 ret = smu_cmn_set_mp1_state(smu, mp1_state);
2406                 break;
2407         default:
2408                 /* Ignore others */
2409                 ret = 0;
2410         }
2411
2412         return ret;
2413 }
2414
2415 static int smu_v13_0_0_set_df_cstate(struct smu_context *smu,
2416                                      enum pp_df_cstate state)
2417 {
2418         return smu_cmn_send_smc_msg_with_param(smu,
2419                                                SMU_MSG_DFCstateControl,
2420                                                state,
2421                                                NULL);
2422 }
2423
2424 static void smu_v13_0_0_set_mode1_reset_param(struct smu_context *smu,
2425                                                 uint32_t supported_version,
2426                                                 uint32_t *param)
2427 {
2428         uint32_t smu_version;
2429         struct amdgpu_device *adev = smu->adev;
2430         struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
2431
2432         smu_cmn_get_smc_version(smu, NULL, &smu_version);
2433
2434         if ((smu_version >= supported_version) &&
2435                         ras && atomic_read(&ras->in_recovery))
2436                 /* Set RAS fatal error reset flag */
2437                 *param = 1 << 16;
2438         else
2439                 *param = 0;
2440 }
2441
2442 static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
2443 {
2444         int ret;
2445         uint32_t param;
2446         struct amdgpu_device *adev = smu->adev;
2447
2448         switch (adev->ip_versions[MP1_HWIP][0]) {
2449         case IP_VERSION(13, 0, 0):
2450                 /* SMU 13_0_0 PMFW supports RAS fatal error reset from 78.77 */
2451                 smu_v13_0_0_set_mode1_reset_param(smu, 0x004e4d00, &param);
2452
2453                 ret = smu_cmn_send_smc_msg_with_param(smu,
2454                                                 SMU_MSG_Mode1Reset, param, NULL);
2455                 break;
2456
2457         case IP_VERSION(13, 0, 10):
2458                 /* SMU 13_0_10 PMFW supports RAS fatal error reset from 80.28 */
2459                 smu_v13_0_0_set_mode1_reset_param(smu, 0x00501c00, &param);
2460
2461                 ret = smu_cmn_send_debug_smc_msg_with_param(smu,
2462                                                 DEBUGSMC_MSG_Mode1Reset, param);
2463                 break;
2464
2465         default:
2466                 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
2467                 break;
2468         }
2469
2470         if (!ret)
2471                 msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
2472
2473         return ret;
2474 }
2475
2476 static int smu_v13_0_0_mode2_reset(struct smu_context *smu)
2477 {
2478         int ret;
2479         struct amdgpu_device *adev = smu->adev;
2480
2481         if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
2482                 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode2Reset, NULL);
2483         else
2484                 return -EOPNOTSUPP;
2485
2486         return ret;
2487 }
2488
2489 static int smu_v13_0_0_enable_gfx_features(struct smu_context *smu)
2490 {
2491         struct amdgpu_device *adev = smu->adev;
2492
2493         if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
2494                 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
2495                                                                                    FEATURE_PWR_GFX, NULL);
2496         else
2497                 return -EOPNOTSUPP;
2498 }
2499
2500 static void smu_v13_0_0_set_smu_mailbox_registers(struct smu_context *smu)
2501 {
2502         struct amdgpu_device *adev = smu->adev;
2503
2504         smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
2505         smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
2506         smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
2507
2508         smu->debug_param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_53);
2509         smu->debug_msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_75);
2510         smu->debug_resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_54);
2511 }
2512
2513 static int smu_v13_0_0_smu_send_bad_mem_page_num(struct smu_context *smu,
2514                 uint32_t size)
2515 {
2516         int ret = 0;
2517
2518         /* message SMU to update the bad page number on SMUBUS */
2519         ret = smu_cmn_send_smc_msg_with_param(smu,
2520                                           SMU_MSG_SetNumBadMemoryPagesRetired,
2521                                           size, NULL);
2522         if (ret)
2523                 dev_err(smu->adev->dev,
2524                           "[%s] failed to message SMU to update bad memory pages number\n",
2525                           __func__);
2526
2527         return ret;
2528 }
2529
2530 static int smu_v13_0_0_send_bad_mem_channel_flag(struct smu_context *smu,
2531                 uint32_t size)
2532 {
2533         int ret = 0;
2534
2535         /* message SMU to update the bad channel info on SMUBUS */
2536         ret = smu_cmn_send_smc_msg_with_param(smu,
2537                                   SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,
2538                                   size, NULL);
2539         if (ret)
2540                 dev_err(smu->adev->dev,
2541                           "[%s] failed to message SMU to update bad memory pages channel info\n",
2542                           __func__);
2543
2544         return ret;
2545 }
2546
2547 static int smu_v13_0_0_check_ecc_table_support(struct smu_context *smu)
2548 {
2549         struct amdgpu_device *adev = smu->adev;
2550         uint32_t if_version = 0xff, smu_version = 0xff;
2551         int ret = 0;
2552
2553         ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
2554         if (ret)
2555                 return -EOPNOTSUPP;
2556
2557         if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) &&
2558                 (smu_version >= SUPPORT_ECCTABLE_SMU_13_0_10_VERSION))
2559                 return ret;
2560         else
2561                 return -EOPNOTSUPP;
2562 }
2563
2564 static ssize_t smu_v13_0_0_get_ecc_info(struct smu_context *smu,
2565                                                                         void *table)
2566 {
2567         struct smu_table_context *smu_table = &smu->smu_table;
2568         struct amdgpu_device *adev = smu->adev;
2569         EccInfoTable_t *ecc_table = NULL;
2570         struct ecc_info_per_ch *ecc_info_per_channel = NULL;
2571         int i, ret = 0;
2572         struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
2573
2574         ret = smu_v13_0_0_check_ecc_table_support(smu);
2575         if (ret)
2576                 return ret;
2577
2578         ret = smu_cmn_update_table(smu,
2579                                         SMU_TABLE_ECCINFO,
2580                                         0,
2581                                         smu_table->ecc_table,
2582                                         false);
2583         if (ret) {
2584                 dev_info(adev->dev, "Failed to export SMU ecc table!\n");
2585                 return ret;
2586         }
2587
2588         ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
2589
2590         for (i = 0; i < ARRAY_SIZE(ecc_table->EccInfo); i++) {
2591                 ecc_info_per_channel = &(eccinfo->ecc[i]);
2592                 ecc_info_per_channel->ce_count_lo_chip =
2593                                 ecc_table->EccInfo[i].ce_count_lo_chip;
2594                 ecc_info_per_channel->ce_count_hi_chip =
2595                                 ecc_table->EccInfo[i].ce_count_hi_chip;
2596                 ecc_info_per_channel->mca_umc_status =
2597                                 ecc_table->EccInfo[i].mca_umc_status;
2598                 ecc_info_per_channel->mca_umc_addr =
2599                                 ecc_table->EccInfo[i].mca_umc_addr;
2600         }
2601
2602         return ret;
2603 }
2604
2605 static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
2606         .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
2607         .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
2608         .i2c_init = smu_v13_0_0_i2c_control_init,
2609         .i2c_fini = smu_v13_0_0_i2c_control_fini,
2610         .is_dpm_running = smu_v13_0_0_is_dpm_running,
2611         .dump_pptable = smu_v13_0_0_dump_pptable,
2612         .init_microcode = smu_v13_0_init_microcode,
2613         .load_microcode = smu_v13_0_load_microcode,
2614         .fini_microcode = smu_v13_0_fini_microcode,
2615         .init_smc_tables = smu_v13_0_0_init_smc_tables,
2616         .fini_smc_tables = smu_v13_0_fini_smc_tables,
2617         .init_power = smu_v13_0_init_power,
2618         .fini_power = smu_v13_0_fini_power,
2619         .check_fw_status = smu_v13_0_check_fw_status,
2620         .setup_pptable = smu_v13_0_0_setup_pptable,
2621         .check_fw_version = smu_v13_0_check_fw_version,
2622         .write_pptable = smu_cmn_write_pptable,
2623         .set_driver_table_location = smu_v13_0_set_driver_table_location,
2624         .system_features_control = smu_v13_0_0_system_features_control,
2625         .set_allowed_mask = smu_v13_0_set_allowed_mask,
2626         .get_enabled_mask = smu_cmn_get_enabled_mask,
2627         .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable,
2628         .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable,
2629         .get_dpm_ultimate_freq = smu_v13_0_0_get_dpm_ultimate_freq,
2630         .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
2631         .read_sensor = smu_v13_0_0_read_sensor,
2632         .feature_is_enabled = smu_cmn_feature_is_enabled,
2633         .print_clk_levels = smu_v13_0_0_print_clk_levels,
2634         .force_clk_levels = smu_v13_0_0_force_clk_levels,
2635         .update_pcie_parameters = smu_v13_0_update_pcie_parameters,
2636         .get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range,
2637         .register_irq_handler = smu_v13_0_register_irq_handler,
2638         .enable_thermal_alert = smu_v13_0_enable_thermal_alert,
2639         .disable_thermal_alert = smu_v13_0_disable_thermal_alert,
2640         .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
2641         .get_gpu_metrics = smu_v13_0_0_get_gpu_metrics,
2642         .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range,
2643         .set_default_od_settings = smu_v13_0_0_set_default_od_settings,
2644         .restore_user_od_settings = smu_v13_0_0_restore_user_od_settings,
2645         .od_edit_dpm_table = smu_v13_0_0_od_edit_dpm_table,
2646         .init_pptable_microcode = smu_v13_0_init_pptable_microcode,
2647         .populate_umd_state_clk = smu_v13_0_0_populate_umd_state_clk,
2648         .set_performance_level = smu_v13_0_set_performance_level,
2649         .gfx_off_control = smu_v13_0_gfx_off_control,
2650         .get_unique_id = smu_v13_0_0_get_unique_id,
2651         .get_fan_speed_pwm = smu_v13_0_0_get_fan_speed_pwm,
2652         .get_fan_speed_rpm = smu_v13_0_0_get_fan_speed_rpm,
2653         .set_fan_speed_pwm = smu_v13_0_set_fan_speed_pwm,
2654         .set_fan_speed_rpm = smu_v13_0_set_fan_speed_rpm,
2655         .get_fan_control_mode = smu_v13_0_get_fan_control_mode,
2656         .set_fan_control_mode = smu_v13_0_set_fan_control_mode,
2657         .enable_mgpu_fan_boost = smu_v13_0_0_enable_mgpu_fan_boost,
2658         .get_power_limit = smu_v13_0_0_get_power_limit,
2659         .set_power_limit = smu_v13_0_set_power_limit,
2660         .set_power_source = smu_v13_0_set_power_source,
2661         .get_power_profile_mode = smu_v13_0_0_get_power_profile_mode,
2662         .set_power_profile_mode = smu_v13_0_0_set_power_profile_mode,
2663         .run_btc = smu_v13_0_run_btc,
2664         .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2665         .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2666         .set_tool_table_location = smu_v13_0_set_tool_table_location,
2667         .deep_sleep_control = smu_v13_0_deep_sleep_control,
2668         .gfx_ulv_control = smu_v13_0_gfx_ulv_control,
2669         .baco_is_support = smu_v13_0_baco_is_support,
2670         .baco_get_state = smu_v13_0_baco_get_state,
2671         .baco_set_state = smu_v13_0_baco_set_state,
2672         .baco_enter = smu_v13_0_0_baco_enter,
2673         .baco_exit = smu_v13_0_0_baco_exit,
2674         .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
2675         .mode1_reset = smu_v13_0_0_mode1_reset,
2676         .mode2_reset = smu_v13_0_0_mode2_reset,
2677         .enable_gfx_features = smu_v13_0_0_enable_gfx_features,
2678         .set_mp1_state = smu_v13_0_0_set_mp1_state,
2679         .set_df_cstate = smu_v13_0_0_set_df_cstate,
2680         .send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num,
2681         .send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
2682         .gpo_control = smu_v13_0_gpo_control,
2683         .get_ecc_info = smu_v13_0_0_get_ecc_info,
2684 };
2685
2686 void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
2687 {
2688         smu->ppt_funcs = &smu_v13_0_0_ppt_funcs;
2689         smu->message_map = smu_v13_0_0_message_map;
2690         smu->clock_map = smu_v13_0_0_clk_map;
2691         smu->feature_map = smu_v13_0_0_feature_mask_map;
2692         smu->table_map = smu_v13_0_0_table_map;
2693         smu->pwr_src_map = smu_v13_0_0_pwr_src_map;
2694         smu->workload_map = smu_v13_0_0_workload_map;
2695         smu->smc_driver_if_version = SMU13_0_0_DRIVER_IF_VERSION;
2696         smu_v13_0_0_set_smu_mailbox_registers(smu);
2697 }