2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_pm.h"
28 #include "amdgpu_ucode.h"
30 #include "amdgpu_dpm.h"
35 #include <linux/seq_file.h>
37 #include "smu/smu_7_0_1_d.h"
38 #include "smu/smu_7_0_1_sh_mask.h"
40 #include "dce/dce_8_0_d.h"
41 #include "dce/dce_8_0_sh_mask.h"
43 #include "bif/bif_4_1_d.h"
44 #include "bif/bif_4_1_sh_mask.h"
46 #include "gca/gfx_7_2_d.h"
47 #include "gca/gfx_7_2_sh_mask.h"
49 #include "gmc/gmc_7_1_d.h"
50 #include "gmc/gmc_7_1_sh_mask.h"
52 MODULE_FIRMWARE("radeon/bonaire_smc.bin");
53 MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
54 MODULE_FIRMWARE("radeon/hawaii_smc.bin");
55 MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
57 #define MC_CG_ARB_FREQ_F0 0x0a
58 #define MC_CG_ARB_FREQ_F1 0x0b
59 #define MC_CG_ARB_FREQ_F2 0x0c
60 #define MC_CG_ARB_FREQ_F3 0x0d
62 #define SMC_RAM_END 0x40000
64 #define VOLTAGE_SCALE 4
65 #define VOLTAGE_VID_OFFSET_SCALE1 625
66 #define VOLTAGE_VID_OFFSET_SCALE2 100
68 static const struct ci_pt_defaults defaults_hawaii_xt =
70 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
71 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
72 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
75 static const struct ci_pt_defaults defaults_hawaii_pro =
77 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
78 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
79 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
82 static const struct ci_pt_defaults defaults_bonaire_xt =
84 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
85 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
86 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
90 static const struct ci_pt_defaults defaults_bonaire_pro =
92 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
93 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
94 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
98 static const struct ci_pt_defaults defaults_saturn_xt =
100 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
101 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
102 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
106 static const struct ci_pt_defaults defaults_saturn_pro =
108 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
109 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
110 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
114 static const struct ci_pt_config_reg didt_config_ci[] =
116 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
147 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
148 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
149 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
150 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
151 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
163 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
164 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
165 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
166 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
167 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
168 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
169 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
170 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
171 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
176 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
177 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
178 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
179 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
180 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
181 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
182 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
183 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
184 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
185 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
186 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
187 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
191 static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
193 return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
196 #define MC_CG_ARB_FREQ_F0 0x0a
197 #define MC_CG_ARB_FREQ_F1 0x0b
198 #define MC_CG_ARB_FREQ_F2 0x0c
199 #define MC_CG_ARB_FREQ_F3 0x0d
201 static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
202 u32 arb_freq_src, u32 arb_freq_dest)
204 u32 mc_arb_dram_timing;
205 u32 mc_arb_dram_timing2;
209 switch (arb_freq_src) {
210 case MC_CG_ARB_FREQ_F0:
211 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
212 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
213 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
214 MC_ARB_BURST_TIME__STATE0__SHIFT;
216 case MC_CG_ARB_FREQ_F1:
217 mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING_1);
218 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
219 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
220 MC_ARB_BURST_TIME__STATE1__SHIFT;
226 switch (arb_freq_dest) {
227 case MC_CG_ARB_FREQ_F0:
228 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
229 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
230 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
231 ~MC_ARB_BURST_TIME__STATE0_MASK);
233 case MC_CG_ARB_FREQ_F1:
234 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
235 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
236 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
237 ~MC_ARB_BURST_TIME__STATE1_MASK);
243 mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
244 WREG32(mmMC_CG_CONFIG, mc_cg_config);
245 WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
246 ~MC_ARB_CG__CG_ARB_REQ_MASK);
251 static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
255 if (memory_clock < 10000)
257 else if (memory_clock >= 80000)
258 mc_para_index = 0x0f;
260 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
261 return mc_para_index;
264 static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
269 if (memory_clock < 12500)
270 mc_para_index = 0x00;
271 else if (memory_clock > 47500)
272 mc_para_index = 0x0f;
274 mc_para_index = (u8)((memory_clock - 10000) / 2500);
276 if (memory_clock < 65000)
277 mc_para_index = 0x00;
278 else if (memory_clock > 135000)
279 mc_para_index = 0x0f;
281 mc_para_index = (u8)((memory_clock - 60000) / 5000);
283 return mc_para_index;
286 static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
287 u32 max_voltage_steps,
288 struct atom_voltage_table *voltage_table)
290 unsigned int i, diff;
292 if (voltage_table->count <= max_voltage_steps)
295 diff = voltage_table->count - max_voltage_steps;
297 for (i = 0; i < max_voltage_steps; i++)
298 voltage_table->entries[i] = voltage_table->entries[i + diff];
300 voltage_table->count = max_voltage_steps;
303 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
304 struct atom_voltage_table_entry *voltage_table,
305 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
306 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
307 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
309 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
310 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
311 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
313 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
314 PPSMC_Msg msg, u32 parameter);
315 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
316 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
318 static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
320 struct ci_power_info *pi = adev->pm.dpm.priv;
325 static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
327 struct ci_ps *ps = rps->ps_priv;
332 static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
334 struct ci_power_info *pi = ci_get_pi(adev);
336 switch (adev->pdev->device) {
344 pi->powertune_defaults = &defaults_bonaire_xt;
350 pi->powertune_defaults = &defaults_saturn_xt;
354 pi->powertune_defaults = &defaults_hawaii_xt;
358 pi->powertune_defaults = &defaults_hawaii_pro;
368 pi->powertune_defaults = &defaults_bonaire_xt;
372 pi->dte_tj_offset = 0;
374 pi->caps_power_containment = true;
375 pi->caps_cac = false;
376 pi->caps_sq_ramping = false;
377 pi->caps_db_ramping = false;
378 pi->caps_td_ramping = false;
379 pi->caps_tcp_ramping = false;
381 if (pi->caps_power_containment) {
383 if (adev->asic_type == CHIP_HAWAII)
384 pi->enable_bapm_feature = false;
386 pi->enable_bapm_feature = true;
387 pi->enable_tdc_limit_feature = true;
388 pi->enable_pkg_pwr_tracking_feature = true;
392 static u8 ci_convert_to_vid(u16 vddc)
394 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
397 static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
399 struct ci_power_info *pi = ci_get_pi(adev);
400 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
401 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
402 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
405 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
407 if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
409 if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
410 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
413 for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
414 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
415 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
416 hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
417 hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
419 lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
420 hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
426 static int ci_populate_vddc_vid(struct amdgpu_device *adev)
428 struct ci_power_info *pi = ci_get_pi(adev);
429 u8 *vid = pi->smc_powertune_table.VddCVid;
432 if (pi->vddc_voltage_table.count > 8)
435 for (i = 0; i < pi->vddc_voltage_table.count; i++)
436 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
441 static int ci_populate_svi_load_line(struct amdgpu_device *adev)
443 struct ci_power_info *pi = ci_get_pi(adev);
444 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
446 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
447 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
448 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
449 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
454 static int ci_populate_tdc_limit(struct amdgpu_device *adev)
456 struct ci_power_info *pi = ci_get_pi(adev);
457 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
460 tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
461 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
462 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
463 pt_defaults->tdc_vddc_throttle_release_limit_perc;
464 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
469 static int ci_populate_dw8(struct amdgpu_device *adev)
471 struct ci_power_info *pi = ci_get_pi(adev);
472 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
475 ret = amdgpu_ci_read_smc_sram_dword(adev,
476 SMU7_FIRMWARE_HEADER_LOCATION +
477 offsetof(SMU7_Firmware_Header, PmFuseTable) +
478 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
479 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
484 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
489 static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
491 struct ci_power_info *pi = ci_get_pi(adev);
493 if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
494 (adev->pm.dpm.fan.fan_output_sensitivity == 0))
495 adev->pm.dpm.fan.fan_output_sensitivity =
496 adev->pm.dpm.fan.default_fan_output_sensitivity;
498 pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
499 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
504 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
506 struct ci_power_info *pi = ci_get_pi(adev);
507 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
508 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
511 min = max = hi_vid[0];
512 for (i = 0; i < 8; i++) {
513 if (0 != hi_vid[i]) {
520 if (0 != lo_vid[i]) {
528 if ((min == 0) || (max == 0))
530 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
531 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
536 static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
538 struct ci_power_info *pi = ci_get_pi(adev);
539 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
540 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
541 struct amdgpu_cac_tdp_table *cac_tdp_table =
542 adev->pm.dpm.dyn_state.cac_tdp_table;
544 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
545 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
547 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
548 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
553 static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
555 struct ci_power_info *pi = ci_get_pi(adev);
556 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
557 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
558 struct amdgpu_cac_tdp_table *cac_tdp_table =
559 adev->pm.dpm.dyn_state.cac_tdp_table;
560 struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
565 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
566 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
568 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
569 dpm_table->GpuTjMax =
570 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
571 dpm_table->GpuTjHyst = 8;
573 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
576 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
577 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
579 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
580 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
583 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
584 def1 = pt_defaults->bapmti_r;
585 def2 = pt_defaults->bapmti_rc;
587 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
588 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
589 for (k = 0; k < SMU7_DTE_SINKS; k++) {
590 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
591 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
601 static int ci_populate_pm_base(struct amdgpu_device *adev)
603 struct ci_power_info *pi = ci_get_pi(adev);
604 u32 pm_fuse_table_offset;
607 if (pi->caps_power_containment) {
608 ret = amdgpu_ci_read_smc_sram_dword(adev,
609 SMU7_FIRMWARE_HEADER_LOCATION +
610 offsetof(SMU7_Firmware_Header, PmFuseTable),
611 &pm_fuse_table_offset, pi->sram_end);
614 ret = ci_populate_bapm_vddc_vid_sidd(adev);
617 ret = ci_populate_vddc_vid(adev);
620 ret = ci_populate_svi_load_line(adev);
623 ret = ci_populate_tdc_limit(adev);
626 ret = ci_populate_dw8(adev);
629 ret = ci_populate_fuzzy_fan(adev);
632 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
635 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
638 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
639 (u8 *)&pi->smc_powertune_table,
640 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
648 static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
650 struct ci_power_info *pi = ci_get_pi(adev);
653 if (pi->caps_sq_ramping) {
654 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
656 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
658 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
659 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
662 if (pi->caps_db_ramping) {
663 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
665 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
667 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
668 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
671 if (pi->caps_td_ramping) {
672 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
674 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
676 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
677 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
680 if (pi->caps_tcp_ramping) {
681 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
683 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
685 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
686 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
690 static int ci_program_pt_config_registers(struct amdgpu_device *adev,
691 const struct ci_pt_config_reg *cac_config_regs)
693 const struct ci_pt_config_reg *config_regs = cac_config_regs;
697 if (config_regs == NULL)
700 while (config_regs->offset != 0xFFFFFFFF) {
701 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
702 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
704 switch (config_regs->type) {
705 case CISLANDS_CONFIGREG_SMC_IND:
706 data = RREG32_SMC(config_regs->offset);
708 case CISLANDS_CONFIGREG_DIDT_IND:
709 data = RREG32_DIDT(config_regs->offset);
712 data = RREG32(config_regs->offset);
716 data &= ~config_regs->mask;
717 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
720 switch (config_regs->type) {
721 case CISLANDS_CONFIGREG_SMC_IND:
722 WREG32_SMC(config_regs->offset, data);
724 case CISLANDS_CONFIGREG_DIDT_IND:
725 WREG32_DIDT(config_regs->offset, data);
728 WREG32(config_regs->offset, data);
738 static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
740 struct ci_power_info *pi = ci_get_pi(adev);
743 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
744 pi->caps_td_ramping || pi->caps_tcp_ramping) {
745 adev->gfx.rlc.funcs->enter_safe_mode(adev);
748 ret = ci_program_pt_config_registers(adev, didt_config_ci);
750 adev->gfx.rlc.funcs->exit_safe_mode(adev);
755 ci_do_enable_didt(adev, enable);
757 adev->gfx.rlc.funcs->exit_safe_mode(adev);
763 static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
765 struct ci_power_info *pi = ci_get_pi(adev);
766 PPSMC_Result smc_result;
770 pi->power_containment_features = 0;
771 if (pi->caps_power_containment) {
772 if (pi->enable_bapm_feature) {
773 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
774 if (smc_result != PPSMC_Result_OK)
777 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
780 if (pi->enable_tdc_limit_feature) {
781 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
782 if (smc_result != PPSMC_Result_OK)
785 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
788 if (pi->enable_pkg_pwr_tracking_feature) {
789 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
790 if (smc_result != PPSMC_Result_OK) {
793 struct amdgpu_cac_tdp_table *cac_tdp_table =
794 adev->pm.dpm.dyn_state.cac_tdp_table;
795 u32 default_pwr_limit =
796 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
798 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
800 ci_set_power_limit(adev, default_pwr_limit);
805 if (pi->caps_power_containment && pi->power_containment_features) {
806 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
807 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
809 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
810 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
812 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
813 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
814 pi->power_containment_features = 0;
821 static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
823 struct ci_power_info *pi = ci_get_pi(adev);
824 PPSMC_Result smc_result;
829 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
830 if (smc_result != PPSMC_Result_OK) {
832 pi->cac_enabled = false;
834 pi->cac_enabled = true;
836 } else if (pi->cac_enabled) {
837 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
838 pi->cac_enabled = false;
845 static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
848 struct ci_power_info *pi = ci_get_pi(adev);
849 PPSMC_Result smc_result = PPSMC_Result_OK;
851 if (pi->thermal_sclk_dpm_enabled) {
853 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
855 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
858 if (smc_result == PPSMC_Result_OK)
864 static int ci_power_control_set_level(struct amdgpu_device *adev)
866 struct ci_power_info *pi = ci_get_pi(adev);
867 struct amdgpu_cac_tdp_table *cac_tdp_table =
868 adev->pm.dpm.dyn_state.cac_tdp_table;
872 bool adjust_polarity = false; /* ??? */
874 if (pi->caps_power_containment) {
875 adjust_percent = adjust_polarity ?
876 adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
877 target_tdp = ((100 + adjust_percent) *
878 (s32)cac_tdp_table->configurable_tdp) / 100;
880 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
886 static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
888 struct ci_power_info *pi = ci_get_pi(adev);
890 if (pi->uvd_power_gated == gate)
893 pi->uvd_power_gated = gate;
895 ci_update_uvd_dpm(adev, gate);
898 static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
900 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
901 u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
903 if (vblank_time < switch_limit)
910 static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
911 struct amdgpu_ps *rps)
913 struct ci_ps *ps = ci_get_ps(rps);
914 struct ci_power_info *pi = ci_get_pi(adev);
915 struct amdgpu_clock_and_voltage_limits *max_limits;
916 bool disable_mclk_switching;
920 if (rps->vce_active) {
921 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
922 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
928 if ((adev->pm.dpm.new_active_crtc_count > 1) ||
929 ci_dpm_vblank_too_short(adev))
930 disable_mclk_switching = true;
932 disable_mclk_switching = false;
934 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
935 pi->battery_state = true;
937 pi->battery_state = false;
939 if (adev->pm.dpm.ac_power)
940 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
942 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
944 if (adev->pm.dpm.ac_power == false) {
945 for (i = 0; i < ps->performance_level_count; i++) {
946 if (ps->performance_levels[i].mclk > max_limits->mclk)
947 ps->performance_levels[i].mclk = max_limits->mclk;
948 if (ps->performance_levels[i].sclk > max_limits->sclk)
949 ps->performance_levels[i].sclk = max_limits->sclk;
953 /* XXX validate the min clocks required for display */
955 if (disable_mclk_switching) {
956 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
957 sclk = ps->performance_levels[0].sclk;
959 mclk = ps->performance_levels[0].mclk;
960 sclk = ps->performance_levels[0].sclk;
963 if (rps->vce_active) {
964 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
965 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
966 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
967 mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
970 ps->performance_levels[0].sclk = sclk;
971 ps->performance_levels[0].mclk = mclk;
973 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
974 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
976 if (disable_mclk_switching) {
977 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
978 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
980 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
981 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
985 static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
986 int min_temp, int max_temp)
988 int low_temp = 0 * 1000;
989 int high_temp = 255 * 1000;
992 if (low_temp < min_temp)
994 if (high_temp > max_temp)
995 high_temp = max_temp;
996 if (high_temp < low_temp) {
997 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1001 tmp = RREG32_SMC(ixCG_THERMAL_INT);
1002 tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1003 tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1004 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1005 WREG32_SMC(ixCG_THERMAL_INT, tmp);
1008 /* XXX: need to figure out how to handle this properly */
1009 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1010 tmp &= DIG_THERM_DPM_MASK;
1011 tmp |= DIG_THERM_DPM(high_temp / 1000);
1012 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1015 adev->pm.dpm.thermal.min_temp = low_temp;
1016 adev->pm.dpm.thermal.max_temp = high_temp;
1020 static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1023 u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1024 PPSMC_Result result;
1027 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1028 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1029 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1030 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1031 if (result != PPSMC_Result_OK) {
1032 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1036 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1037 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1038 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1039 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1040 if (result != PPSMC_Result_OK) {
1041 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1049 static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1051 struct ci_power_info *pi = ci_get_pi(adev);
1054 if (pi->fan_ctrl_is_in_default_mode) {
1055 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1056 >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1057 pi->fan_ctrl_default_mode = tmp;
1058 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1059 >> CG_FDO_CTRL2__TMIN__SHIFT;
1061 pi->fan_ctrl_is_in_default_mode = false;
1064 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1065 tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1066 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1068 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1069 tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1070 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1073 static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1075 struct ci_power_info *pi = ci_get_pi(adev);
1076 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1078 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1079 u16 fdo_min, slope1, slope2;
1080 u32 reference_clock, tmp;
1084 if (!pi->fan_table_start) {
1085 adev->pm.dpm.fan.ucode_fan_control = false;
1089 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1090 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1093 adev->pm.dpm.fan.ucode_fan_control = false;
1097 tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1098 do_div(tmp64, 10000);
1099 fdo_min = (u16)tmp64;
1101 t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1102 t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1104 pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1105 pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1107 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1108 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1110 fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1111 fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1112 fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1114 fan_table.Slope1 = cpu_to_be16(slope1);
1115 fan_table.Slope2 = cpu_to_be16(slope2);
1117 fan_table.FdoMin = cpu_to_be16(fdo_min);
1119 fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1121 fan_table.HystUp = cpu_to_be16(1);
1123 fan_table.HystSlope = cpu_to_be16(1);
1125 fan_table.TempRespLim = cpu_to_be16(5);
1127 reference_clock = amdgpu_asic_get_xclk(adev);
1129 fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1130 reference_clock) / 1600);
1132 fan_table.FdoMax = cpu_to_be16((u16)duty100);
1134 tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1135 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1136 fan_table.TempSrc = (uint8_t)tmp;
1138 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1139 pi->fan_table_start,
1145 DRM_ERROR("Failed to load fan table to the SMC.");
1146 adev->pm.dpm.fan.ucode_fan_control = false;
1152 static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1154 struct ci_power_info *pi = ci_get_pi(adev);
1157 if (pi->caps_od_fuzzy_fan_control_support) {
1158 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1159 PPSMC_StartFanControl,
1161 if (ret != PPSMC_Result_OK)
1163 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1164 PPSMC_MSG_SetFanPwmMax,
1165 adev->pm.dpm.fan.default_max_fan_pwm);
1166 if (ret != PPSMC_Result_OK)
1169 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1170 PPSMC_StartFanControl,
1172 if (ret != PPSMC_Result_OK)
1176 pi->fan_is_controlled_by_smc = true;
1181 static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1184 struct ci_power_info *pi = ci_get_pi(adev);
1186 ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1187 if (ret == PPSMC_Result_OK) {
1188 pi->fan_is_controlled_by_smc = false;
1195 static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
1201 if (adev->pm.no_fan)
1204 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1205 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1206 duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1207 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1212 tmp64 = (u64)duty * 100;
1213 do_div(tmp64, duty100);
1214 *speed = (u32)tmp64;
1222 static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
1228 struct ci_power_info *pi = ci_get_pi(adev);
1230 if (adev->pm.no_fan)
1233 if (pi->fan_is_controlled_by_smc)
1239 duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1240 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1245 tmp64 = (u64)speed * duty100;
1249 tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1250 tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1251 WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1256 static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
1259 /* stop auto-manage */
1260 if (adev->pm.dpm.fan.ucode_fan_control)
1261 ci_fan_ctrl_stop_smc_fan_control(adev);
1262 ci_fan_ctrl_set_static_mode(adev, mode);
1264 /* restart auto-manage */
1265 if (adev->pm.dpm.fan.ucode_fan_control)
1266 ci_thermal_start_smc_fan_control(adev);
1268 ci_fan_ctrl_set_default_mode(adev);
1272 static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
1274 struct ci_power_info *pi = ci_get_pi(adev);
1277 if (pi->fan_is_controlled_by_smc)
1280 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1281 return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
1285 static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1289 u32 xclk = amdgpu_asic_get_xclk(adev);
1291 if (adev->pm.no_fan)
1294 if (adev->pm.fan_pulses_per_revolution == 0)
1297 tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1298 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1299 if (tach_period == 0)
1302 *speed = 60 * xclk * 10000 / tach_period;
1307 static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1310 u32 tach_period, tmp;
1311 u32 xclk = amdgpu_asic_get_xclk(adev);
1313 if (adev->pm.no_fan)
1316 if (adev->pm.fan_pulses_per_revolution == 0)
1319 if ((speed < adev->pm.fan_min_rpm) ||
1320 (speed > adev->pm.fan_max_rpm))
1323 if (adev->pm.dpm.fan.ucode_fan_control)
1324 ci_fan_ctrl_stop_smc_fan_control(adev);
1326 tach_period = 60 * xclk * 10000 / (8 * speed);
1327 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1328 tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1329 WREG32_SMC(CG_TACH_CTRL, tmp);
1331 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1337 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1339 struct ci_power_info *pi = ci_get_pi(adev);
1342 if (!pi->fan_ctrl_is_in_default_mode) {
1343 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1344 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1345 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1347 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1348 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1349 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1350 pi->fan_ctrl_is_in_default_mode = true;
1354 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1356 if (adev->pm.dpm.fan.ucode_fan_control) {
1357 ci_fan_ctrl_start_smc_fan_control(adev);
1358 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1362 static void ci_thermal_initialize(struct amdgpu_device *adev)
1366 if (adev->pm.fan_pulses_per_revolution) {
1367 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1368 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1369 << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1370 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1373 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1374 tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1375 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1378 static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1382 ci_thermal_initialize(adev);
1383 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1386 ret = ci_thermal_enable_alert(adev, true);
1389 if (adev->pm.dpm.fan.ucode_fan_control) {
1390 ret = ci_thermal_setup_fan_table(adev);
1393 ci_thermal_start_smc_fan_control(adev);
1399 static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1401 if (!adev->pm.no_fan)
1402 ci_fan_ctrl_set_default_mode(adev);
1405 static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1406 u16 reg_offset, u32 *value)
1408 struct ci_power_info *pi = ci_get_pi(adev);
1410 return amdgpu_ci_read_smc_sram_dword(adev,
1411 pi->soft_regs_start + reg_offset,
1412 value, pi->sram_end);
1415 static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1416 u16 reg_offset, u32 value)
1418 struct ci_power_info *pi = ci_get_pi(adev);
1420 return amdgpu_ci_write_smc_sram_dword(adev,
1421 pi->soft_regs_start + reg_offset,
1422 value, pi->sram_end);
1425 static void ci_init_fps_limits(struct amdgpu_device *adev)
1427 struct ci_power_info *pi = ci_get_pi(adev);
1428 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1434 table->FpsHighT = cpu_to_be16(tmp);
1437 table->FpsLowT = cpu_to_be16(tmp);
1441 static int ci_update_sclk_t(struct amdgpu_device *adev)
1443 struct ci_power_info *pi = ci_get_pi(adev);
1445 u32 low_sclk_interrupt_t = 0;
1447 if (pi->caps_sclk_throttle_low_notification) {
1448 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1450 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1451 pi->dpm_table_start +
1452 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1453 (u8 *)&low_sclk_interrupt_t,
1454 sizeof(u32), pi->sram_end);
1461 static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1463 struct ci_power_info *pi = ci_get_pi(adev);
1464 u16 leakage_id, virtual_voltage_id;
1468 pi->vddc_leakage.count = 0;
1469 pi->vddci_leakage.count = 0;
1471 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1472 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1473 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1474 if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1476 if (vddc != 0 && vddc != virtual_voltage_id) {
1477 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1478 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1479 pi->vddc_leakage.count++;
1482 } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1483 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1484 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1485 if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1488 if (vddc != 0 && vddc != virtual_voltage_id) {
1489 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1490 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1491 pi->vddc_leakage.count++;
1493 if (vddci != 0 && vddci != virtual_voltage_id) {
1494 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1495 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1496 pi->vddci_leakage.count++;
1503 static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1505 struct ci_power_info *pi = ci_get_pi(adev);
1506 bool want_thermal_protection;
1507 enum amdgpu_dpm_event_src dpm_event_src;
1513 want_thermal_protection = false;
1515 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1516 want_thermal_protection = true;
1517 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1519 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1520 want_thermal_protection = true;
1521 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1523 case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1524 (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1525 want_thermal_protection = true;
1526 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1530 if (want_thermal_protection) {
1532 /* XXX: need to figure out how to handle this properly */
1533 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1534 tmp &= DPM_EVENT_SRC_MASK;
1535 tmp |= DPM_EVENT_SRC(dpm_event_src);
1536 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1539 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1540 if (pi->thermal_protection)
1541 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1543 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1544 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1546 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1547 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1548 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1552 static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1553 enum amdgpu_dpm_auto_throttle_src source,
1556 struct ci_power_info *pi = ci_get_pi(adev);
1559 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1560 pi->active_auto_throttle_sources |= 1 << source;
1561 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1564 if (pi->active_auto_throttle_sources & (1 << source)) {
1565 pi->active_auto_throttle_sources &= ~(1 << source);
1566 ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1571 static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1573 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1574 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1577 static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1579 struct ci_power_info *pi = ci_get_pi(adev);
1580 PPSMC_Result smc_result;
1582 if (!pi->need_update_smu7_dpm_table)
1585 if ((!pi->sclk_dpm_key_disabled) &&
1586 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1587 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1588 if (smc_result != PPSMC_Result_OK)
1592 if ((!pi->mclk_dpm_key_disabled) &&
1593 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1594 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1595 if (smc_result != PPSMC_Result_OK)
1599 pi->need_update_smu7_dpm_table = 0;
1603 static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1605 struct ci_power_info *pi = ci_get_pi(adev);
1606 PPSMC_Result smc_result;
1609 if (!pi->sclk_dpm_key_disabled) {
1610 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1611 if (smc_result != PPSMC_Result_OK)
1615 if (!pi->mclk_dpm_key_disabled) {
1616 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1617 if (smc_result != PPSMC_Result_OK)
1620 WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1621 ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1623 WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1624 WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1625 WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1629 WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1630 WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1631 WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1634 if (!pi->sclk_dpm_key_disabled) {
1635 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1636 if (smc_result != PPSMC_Result_OK)
1640 if (!pi->mclk_dpm_key_disabled) {
1641 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1642 if (smc_result != PPSMC_Result_OK)
1650 static int ci_start_dpm(struct amdgpu_device *adev)
1652 struct ci_power_info *pi = ci_get_pi(adev);
1653 PPSMC_Result smc_result;
1657 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1658 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1659 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1661 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1662 tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1663 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1665 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1667 WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1669 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1670 if (smc_result != PPSMC_Result_OK)
1673 ret = ci_enable_sclk_mclk_dpm(adev, true);
1677 if (!pi->pcie_dpm_key_disabled) {
1678 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1679 if (smc_result != PPSMC_Result_OK)
1686 static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1688 struct ci_power_info *pi = ci_get_pi(adev);
1689 PPSMC_Result smc_result;
1691 if (!pi->need_update_smu7_dpm_table)
1694 if ((!pi->sclk_dpm_key_disabled) &&
1695 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1696 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1697 if (smc_result != PPSMC_Result_OK)
1701 if ((!pi->mclk_dpm_key_disabled) &&
1702 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1703 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1704 if (smc_result != PPSMC_Result_OK)
1711 static int ci_stop_dpm(struct amdgpu_device *adev)
1713 struct ci_power_info *pi = ci_get_pi(adev);
1714 PPSMC_Result smc_result;
1718 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1719 tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1720 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1722 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1723 tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1724 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1726 if (!pi->pcie_dpm_key_disabled) {
1727 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1728 if (smc_result != PPSMC_Result_OK)
1732 ret = ci_enable_sclk_mclk_dpm(adev, false);
1736 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1737 if (smc_result != PPSMC_Result_OK)
1743 static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1745 u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1748 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1750 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1751 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1755 static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1758 struct ci_power_info *pi = ci_get_pi(adev);
1759 struct amdgpu_cac_tdp_table *cac_tdp_table =
1760 adev->pm.dpm.dyn_state.cac_tdp_table;
1764 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1766 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1768 ci_set_power_limit(adev, power_limit);
1770 if (pi->caps_automatic_dc_transition) {
1772 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1774 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1781 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1782 PPSMC_Msg msg, u32 parameter)
1784 WREG32(mmSMC_MSG_ARG_0, parameter);
1785 return amdgpu_ci_send_msg_to_smc(adev, msg);
1788 static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1789 PPSMC_Msg msg, u32 *parameter)
1791 PPSMC_Result smc_result;
1793 smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1795 if ((smc_result == PPSMC_Result_OK) && parameter)
1796 *parameter = RREG32(mmSMC_MSG_ARG_0);
1801 static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1803 struct ci_power_info *pi = ci_get_pi(adev);
1805 if (!pi->sclk_dpm_key_disabled) {
1806 PPSMC_Result smc_result =
1807 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1808 if (smc_result != PPSMC_Result_OK)
1815 static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1817 struct ci_power_info *pi = ci_get_pi(adev);
1819 if (!pi->mclk_dpm_key_disabled) {
1820 PPSMC_Result smc_result =
1821 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1822 if (smc_result != PPSMC_Result_OK)
1829 static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1831 struct ci_power_info *pi = ci_get_pi(adev);
1833 if (!pi->pcie_dpm_key_disabled) {
1834 PPSMC_Result smc_result =
1835 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1836 if (smc_result != PPSMC_Result_OK)
1843 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1845 struct ci_power_info *pi = ci_get_pi(adev);
1847 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1848 PPSMC_Result smc_result =
1849 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1850 if (smc_result != PPSMC_Result_OK)
1857 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1860 PPSMC_Result smc_result =
1861 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1862 if (smc_result != PPSMC_Result_OK)
1868 static int ci_set_boot_state(struct amdgpu_device *adev)
1870 return ci_enable_sclk_mclk_dpm(adev, false);
1874 static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1877 PPSMC_Result smc_result =
1878 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1879 PPSMC_MSG_API_GetSclkFrequency,
1881 if (smc_result != PPSMC_Result_OK)
1887 static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1890 PPSMC_Result smc_result =
1891 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1892 PPSMC_MSG_API_GetMclkFrequency,
1894 if (smc_result != PPSMC_Result_OK)
1900 static void ci_dpm_start_smc(struct amdgpu_device *adev)
1904 amdgpu_ci_program_jump_on_start(adev);
1905 amdgpu_ci_start_smc_clock(adev);
1906 amdgpu_ci_start_smc(adev);
1907 for (i = 0; i < adev->usec_timeout; i++) {
1908 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1913 static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1915 amdgpu_ci_reset_smc(adev);
1916 amdgpu_ci_stop_smc_clock(adev);
1919 static int ci_process_firmware_header(struct amdgpu_device *adev)
1921 struct ci_power_info *pi = ci_get_pi(adev);
1925 ret = amdgpu_ci_read_smc_sram_dword(adev,
1926 SMU7_FIRMWARE_HEADER_LOCATION +
1927 offsetof(SMU7_Firmware_Header, DpmTable),
1928 &tmp, pi->sram_end);
1932 pi->dpm_table_start = tmp;
1934 ret = amdgpu_ci_read_smc_sram_dword(adev,
1935 SMU7_FIRMWARE_HEADER_LOCATION +
1936 offsetof(SMU7_Firmware_Header, SoftRegisters),
1937 &tmp, pi->sram_end);
1941 pi->soft_regs_start = tmp;
1943 ret = amdgpu_ci_read_smc_sram_dword(adev,
1944 SMU7_FIRMWARE_HEADER_LOCATION +
1945 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1946 &tmp, pi->sram_end);
1950 pi->mc_reg_table_start = tmp;
1952 ret = amdgpu_ci_read_smc_sram_dword(adev,
1953 SMU7_FIRMWARE_HEADER_LOCATION +
1954 offsetof(SMU7_Firmware_Header, FanTable),
1955 &tmp, pi->sram_end);
1959 pi->fan_table_start = tmp;
1961 ret = amdgpu_ci_read_smc_sram_dword(adev,
1962 SMU7_FIRMWARE_HEADER_LOCATION +
1963 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1964 &tmp, pi->sram_end);
1968 pi->arb_table_start = tmp;
1973 static void ci_read_clock_registers(struct amdgpu_device *adev)
1975 struct ci_power_info *pi = ci_get_pi(adev);
1977 pi->clock_registers.cg_spll_func_cntl =
1978 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
1979 pi->clock_registers.cg_spll_func_cntl_2 =
1980 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
1981 pi->clock_registers.cg_spll_func_cntl_3 =
1982 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
1983 pi->clock_registers.cg_spll_func_cntl_4 =
1984 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
1985 pi->clock_registers.cg_spll_spread_spectrum =
1986 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
1987 pi->clock_registers.cg_spll_spread_spectrum_2 =
1988 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
1989 pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
1990 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
1991 pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
1992 pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
1993 pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
1994 pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
1995 pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
1996 pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
1997 pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
2000 static void ci_init_sclk_t(struct amdgpu_device *adev)
2002 struct ci_power_info *pi = ci_get_pi(adev);
2004 pi->low_sclk_interrupt_t = 0;
2007 static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2010 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2013 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2015 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2016 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2019 static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2021 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2023 tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2025 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2029 static int ci_enter_ulp_state(struct amdgpu_device *adev)
2032 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2039 static int ci_exit_ulp_state(struct amdgpu_device *adev)
2043 WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2047 for (i = 0; i < adev->usec_timeout; i++) {
2048 if (RREG32(mmSMC_RESP_0) == 1)
2057 static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2060 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2062 return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
2065 static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2068 struct ci_power_info *pi = ci_get_pi(adev);
2071 if (pi->caps_sclk_ds) {
2072 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2075 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2079 if (pi->caps_sclk_ds) {
2080 if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2088 static void ci_program_display_gap(struct amdgpu_device *adev)
2090 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2091 u32 pre_vbi_time_in_us;
2092 u32 frame_time_in_us;
2093 u32 ref_clock = adev->clock.spll.reference_freq;
2094 u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2095 u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2097 tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2098 if (adev->pm.dpm.new_active_crtc_count > 0)
2099 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2101 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2102 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2104 if (refresh_rate == 0)
2106 if (vblank_time == 0xffffffff)
2108 frame_time_in_us = 1000000 / refresh_rate;
2109 pre_vbi_time_in_us =
2110 frame_time_in_us - 200 - vblank_time;
2111 tmp = pre_vbi_time_in_us * (ref_clock / 100);
2113 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2114 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2115 ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2118 ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2122 static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2124 struct ci_power_info *pi = ci_get_pi(adev);
2128 if (pi->caps_sclk_ss_support) {
2129 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2130 tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2131 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2134 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2135 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2136 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2138 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2139 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2140 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2144 static void ci_program_sstp(struct amdgpu_device *adev)
2146 WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2147 ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2148 (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2151 static void ci_enable_display_gap(struct amdgpu_device *adev)
2153 u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2155 tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2156 CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2157 tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2158 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2160 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2163 static void ci_program_vc(struct amdgpu_device *adev)
2167 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2168 tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2169 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2171 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2172 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2173 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2174 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2175 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2176 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2177 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2178 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2181 static void ci_clear_vc(struct amdgpu_device *adev)
2185 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2186 tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2187 WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2189 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2190 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2191 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2192 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2193 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2194 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2195 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2196 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2199 static int ci_upload_firmware(struct amdgpu_device *adev)
2201 struct ci_power_info *pi = ci_get_pi(adev);
2204 for (i = 0; i < adev->usec_timeout; i++) {
2205 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2208 WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2210 amdgpu_ci_stop_smc_clock(adev);
2211 amdgpu_ci_reset_smc(adev);
2213 ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
2219 static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2220 struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2221 struct atom_voltage_table *voltage_table)
2225 if (voltage_dependency_table == NULL)
2228 voltage_table->mask_low = 0;
2229 voltage_table->phase_delay = 0;
2231 voltage_table->count = voltage_dependency_table->count;
2232 for (i = 0; i < voltage_table->count; i++) {
2233 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2234 voltage_table->entries[i].smio_low = 0;
2240 static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2242 struct ci_power_info *pi = ci_get_pi(adev);
2245 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2246 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2247 VOLTAGE_OBJ_GPIO_LUT,
2248 &pi->vddc_voltage_table);
2251 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2252 ret = ci_get_svi2_voltage_table(adev,
2253 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2254 &pi->vddc_voltage_table);
2259 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2260 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2261 &pi->vddc_voltage_table);
2263 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2264 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2265 VOLTAGE_OBJ_GPIO_LUT,
2266 &pi->vddci_voltage_table);
2269 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2270 ret = ci_get_svi2_voltage_table(adev,
2271 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2272 &pi->vddci_voltage_table);
2277 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2278 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2279 &pi->vddci_voltage_table);
2281 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2282 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2283 VOLTAGE_OBJ_GPIO_LUT,
2284 &pi->mvdd_voltage_table);
2287 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2288 ret = ci_get_svi2_voltage_table(adev,
2289 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2290 &pi->mvdd_voltage_table);
2295 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2296 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2297 &pi->mvdd_voltage_table);
2302 static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2303 struct atom_voltage_table_entry *voltage_table,
2304 SMU7_Discrete_VoltageLevel *smc_voltage_table)
2308 ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2309 &smc_voltage_table->StdVoltageHiSidd,
2310 &smc_voltage_table->StdVoltageLoSidd);
2313 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2314 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2317 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2318 smc_voltage_table->StdVoltageHiSidd =
2319 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2320 smc_voltage_table->StdVoltageLoSidd =
2321 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2324 static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2325 SMU7_Discrete_DpmTable *table)
2327 struct ci_power_info *pi = ci_get_pi(adev);
2330 table->VddcLevelCount = pi->vddc_voltage_table.count;
2331 for (count = 0; count < table->VddcLevelCount; count++) {
2332 ci_populate_smc_voltage_table(adev,
2333 &pi->vddc_voltage_table.entries[count],
2334 &table->VddcLevel[count]);
2336 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2337 table->VddcLevel[count].Smio |=
2338 pi->vddc_voltage_table.entries[count].smio_low;
2340 table->VddcLevel[count].Smio = 0;
2342 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2347 static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2348 SMU7_Discrete_DpmTable *table)
2351 struct ci_power_info *pi = ci_get_pi(adev);
2353 table->VddciLevelCount = pi->vddci_voltage_table.count;
2354 for (count = 0; count < table->VddciLevelCount; count++) {
2355 ci_populate_smc_voltage_table(adev,
2356 &pi->vddci_voltage_table.entries[count],
2357 &table->VddciLevel[count]);
2359 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2360 table->VddciLevel[count].Smio |=
2361 pi->vddci_voltage_table.entries[count].smio_low;
2363 table->VddciLevel[count].Smio = 0;
2365 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2370 static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2371 SMU7_Discrete_DpmTable *table)
2373 struct ci_power_info *pi = ci_get_pi(adev);
2376 table->MvddLevelCount = pi->mvdd_voltage_table.count;
2377 for (count = 0; count < table->MvddLevelCount; count++) {
2378 ci_populate_smc_voltage_table(adev,
2379 &pi->mvdd_voltage_table.entries[count],
2380 &table->MvddLevel[count]);
2382 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2383 table->MvddLevel[count].Smio |=
2384 pi->mvdd_voltage_table.entries[count].smio_low;
2386 table->MvddLevel[count].Smio = 0;
2388 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2393 static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2394 SMU7_Discrete_DpmTable *table)
2398 ret = ci_populate_smc_vddc_table(adev, table);
2402 ret = ci_populate_smc_vddci_table(adev, table);
2406 ret = ci_populate_smc_mvdd_table(adev, table);
2413 static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2414 SMU7_Discrete_VoltageLevel *voltage)
2416 struct ci_power_info *pi = ci_get_pi(adev);
2419 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2420 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2421 if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2422 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2427 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2434 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2435 struct atom_voltage_table_entry *voltage_table,
2436 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2439 bool voltage_found = false;
2440 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2441 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2443 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2446 if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2447 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2448 if (voltage_table->value ==
2449 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2450 voltage_found = true;
2451 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2454 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2455 *std_voltage_lo_sidd =
2456 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2457 *std_voltage_hi_sidd =
2458 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2463 if (!voltage_found) {
2464 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2465 if (voltage_table->value <=
2466 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2467 voltage_found = true;
2468 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2471 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2472 *std_voltage_lo_sidd =
2473 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2474 *std_voltage_hi_sidd =
2475 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2485 static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2486 const struct amdgpu_phase_shedding_limits_table *limits,
2488 u32 *phase_shedding)
2492 *phase_shedding = 1;
2494 for (i = 0; i < limits->count; i++) {
2495 if (sclk < limits->entries[i].sclk) {
2496 *phase_shedding = i;
2502 static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2503 const struct amdgpu_phase_shedding_limits_table *limits,
2505 u32 *phase_shedding)
2509 *phase_shedding = 1;
2511 for (i = 0; i < limits->count; i++) {
2512 if (mclk < limits->entries[i].mclk) {
2513 *phase_shedding = i;
2519 static int ci_init_arb_table_index(struct amdgpu_device *adev)
2521 struct ci_power_info *pi = ci_get_pi(adev);
2525 ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2526 &tmp, pi->sram_end);
2531 tmp |= MC_CG_ARB_FREQ_F1 << 24;
2533 return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2537 static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2538 struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2539 u32 clock, u32 *voltage)
2543 if (allowed_clock_voltage_table->count == 0)
2546 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2547 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2548 *voltage = allowed_clock_voltage_table->entries[i].v;
2553 *voltage = allowed_clock_voltage_table->entries[i-1].v;
2558 static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
2562 u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
2567 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
2569 if (tmp >= min || i == 0)
2576 static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2578 return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2581 static int ci_reset_to_default(struct amdgpu_device *adev)
2583 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2587 static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2591 tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2593 if (tmp == MC_CG_ARB_FREQ_F0)
2596 return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2599 static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2600 const u32 engine_clock,
2601 const u32 memory_clock,
2607 tmp = RREG32(mmMC_SEQ_MISC0);
2608 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2611 ((adev->pdev->device == 0x67B0) ||
2612 (adev->pdev->device == 0x67B1))) {
2613 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2614 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2615 *dram_timimg2 &= ~0x00ff0000;
2616 *dram_timimg2 |= tmp2 << 16;
2617 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2618 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2619 *dram_timimg2 &= ~0x00ff0000;
2620 *dram_timimg2 |= tmp2 << 16;
2625 static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2628 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2634 amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2636 dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
2637 dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2638 burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2640 ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2642 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
2643 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2644 arb_regs->McArbBurstTime = (u8)burst_time;
2649 static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2651 struct ci_power_info *pi = ci_get_pi(adev);
2652 SMU7_Discrete_MCArbDramTimingTable arb_regs;
2656 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2658 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2659 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2660 ret = ci_populate_memory_timing_parameters(adev,
2661 pi->dpm_table.sclk_table.dpm_levels[i].value,
2662 pi->dpm_table.mclk_table.dpm_levels[j].value,
2663 &arb_regs.entries[i][j]);
2670 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2671 pi->arb_table_start,
2673 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2679 static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2681 struct ci_power_info *pi = ci_get_pi(adev);
2683 if (pi->need_update_smu7_dpm_table == 0)
2686 return ci_do_program_memory_timing_parameters(adev);
2689 static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2690 struct amdgpu_ps *amdgpu_boot_state)
2692 struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2693 struct ci_power_info *pi = ci_get_pi(adev);
2696 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2697 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2698 boot_state->performance_levels[0].sclk) {
2699 pi->smc_state_table.GraphicsBootLevel = level;
2704 for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2705 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2706 boot_state->performance_levels[0].mclk) {
2707 pi->smc_state_table.MemoryBootLevel = level;
2713 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2718 for (i = dpm_table->count; i > 0; i--) {
2719 mask_value = mask_value << 1;
2720 if (dpm_table->dpm_levels[i-1].enabled)
2723 mask_value &= 0xFFFFFFFE;
2729 static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2730 SMU7_Discrete_DpmTable *table)
2732 struct ci_power_info *pi = ci_get_pi(adev);
2733 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2736 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2737 table->LinkLevel[i].PcieGenSpeed =
2738 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2739 table->LinkLevel[i].PcieLaneCount =
2740 amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2741 table->LinkLevel[i].EnabledForActivity = 1;
2742 table->LinkLevel[i].DownT = cpu_to_be32(5);
2743 table->LinkLevel[i].UpT = cpu_to_be32(30);
2746 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2747 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2748 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2751 static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2752 SMU7_Discrete_DpmTable *table)
2755 struct atom_clock_dividers dividers;
2758 table->UvdLevelCount =
2759 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2761 for (count = 0; count < table->UvdLevelCount; count++) {
2762 table->UvdLevel[count].VclkFrequency =
2763 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2764 table->UvdLevel[count].DclkFrequency =
2765 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2766 table->UvdLevel[count].MinVddc =
2767 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2768 table->UvdLevel[count].MinVddcPhases = 1;
2770 ret = amdgpu_atombios_get_clock_dividers(adev,
2771 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2772 table->UvdLevel[count].VclkFrequency, false, ÷rs);
2776 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2778 ret = amdgpu_atombios_get_clock_dividers(adev,
2779 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2780 table->UvdLevel[count].DclkFrequency, false, ÷rs);
2784 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2786 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2787 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2788 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2794 static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2795 SMU7_Discrete_DpmTable *table)
2798 struct atom_clock_dividers dividers;
2801 table->VceLevelCount =
2802 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2804 for (count = 0; count < table->VceLevelCount; count++) {
2805 table->VceLevel[count].Frequency =
2806 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2807 table->VceLevel[count].MinVoltage =
2808 (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2809 table->VceLevel[count].MinPhases = 1;
2811 ret = amdgpu_atombios_get_clock_dividers(adev,
2812 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2813 table->VceLevel[count].Frequency, false, ÷rs);
2817 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2819 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2820 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2827 static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2828 SMU7_Discrete_DpmTable *table)
2831 struct atom_clock_dividers dividers;
2834 table->AcpLevelCount = (u8)
2835 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2837 for (count = 0; count < table->AcpLevelCount; count++) {
2838 table->AcpLevel[count].Frequency =
2839 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2840 table->AcpLevel[count].MinVoltage =
2841 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2842 table->AcpLevel[count].MinPhases = 1;
2844 ret = amdgpu_atombios_get_clock_dividers(adev,
2845 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2846 table->AcpLevel[count].Frequency, false, ÷rs);
2850 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2852 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2853 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2859 static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2860 SMU7_Discrete_DpmTable *table)
2863 struct atom_clock_dividers dividers;
2866 table->SamuLevelCount =
2867 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2869 for (count = 0; count < table->SamuLevelCount; count++) {
2870 table->SamuLevel[count].Frequency =
2871 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2872 table->SamuLevel[count].MinVoltage =
2873 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2874 table->SamuLevel[count].MinPhases = 1;
2876 ret = amdgpu_atombios_get_clock_dividers(adev,
2877 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2878 table->SamuLevel[count].Frequency, false, ÷rs);
2882 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2884 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2885 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2891 static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2893 SMU7_Discrete_MemoryLevel *mclk,
2897 struct ci_power_info *pi = ci_get_pi(adev);
2898 u32 dll_cntl = pi->clock_registers.dll_cntl;
2899 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2900 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2901 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2902 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2903 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2904 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2905 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2906 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2907 struct atom_mpll_param mpll_param;
2910 ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2914 mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2915 mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2917 mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2918 MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2919 mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2920 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2921 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2923 mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2924 mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2926 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
2927 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2928 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2929 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2930 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2933 if (pi->caps_mclk_ss_support) {
2934 struct amdgpu_atom_ss ss;
2937 u32 reference_clock = adev->clock.mpll.reference_freq;
2939 if (mpll_param.qdr == 1)
2940 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2942 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2944 tmp = (freq_nom / reference_clock);
2946 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2947 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2948 u32 clks = reference_clock * 5 / ss.rate;
2949 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2951 mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2952 mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2954 mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2955 mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2959 mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2960 mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2963 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2964 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2966 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2967 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
2969 mclk->MclkFrequency = memory_clock;
2970 mclk->MpllFuncCntl = mpll_func_cntl;
2971 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2972 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2973 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2974 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2975 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2976 mclk->DllCntl = dll_cntl;
2977 mclk->MpllSs1 = mpll_ss1;
2978 mclk->MpllSs2 = mpll_ss2;
2983 static int ci_populate_single_memory_level(struct amdgpu_device *adev,
2985 SMU7_Discrete_MemoryLevel *memory_level)
2987 struct ci_power_info *pi = ci_get_pi(adev);
2991 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2992 ret = ci_get_dependency_volt_by_clk(adev,
2993 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2994 memory_clock, &memory_level->MinVddc);
2999 if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
3000 ret = ci_get_dependency_volt_by_clk(adev,
3001 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3002 memory_clock, &memory_level->MinVddci);
3007 if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3008 ret = ci_get_dependency_volt_by_clk(adev,
3009 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3010 memory_clock, &memory_level->MinMvdd);
3015 memory_level->MinVddcPhases = 1;
3017 if (pi->vddc_phase_shed_control)
3018 ci_populate_phase_value_based_on_mclk(adev,
3019 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3021 &memory_level->MinVddcPhases);
3023 memory_level->EnabledForThrottle = 1;
3024 memory_level->UpH = 0;
3025 memory_level->DownH = 100;
3026 memory_level->VoltageDownH = 0;
3027 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3029 memory_level->StutterEnable = false;
3030 memory_level->StrobeEnable = false;
3031 memory_level->EdcReadEnable = false;
3032 memory_level->EdcWriteEnable = false;
3033 memory_level->RttEnable = false;
3035 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3037 if (pi->mclk_stutter_mode_threshold &&
3038 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
3039 (!pi->uvd_enabled) &&
3040 (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3041 (adev->pm.dpm.new_active_crtc_count <= 2))
3042 memory_level->StutterEnable = true;
3044 if (pi->mclk_strobe_mode_threshold &&
3045 (memory_clock <= pi->mclk_strobe_mode_threshold))
3046 memory_level->StrobeEnable = 1;
3048 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
3049 memory_level->StrobeRatio =
3050 ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3051 if (pi->mclk_edc_enable_threshold &&
3052 (memory_clock > pi->mclk_edc_enable_threshold))
3053 memory_level->EdcReadEnable = true;
3055 if (pi->mclk_edc_wr_enable_threshold &&
3056 (memory_clock > pi->mclk_edc_wr_enable_threshold))
3057 memory_level->EdcWriteEnable = true;
3059 if (memory_level->StrobeEnable) {
3060 if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3061 ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3062 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3064 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3066 dll_state_on = pi->dll_default_on;
3069 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3070 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3073 ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3077 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3078 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3079 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3080 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3082 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3083 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3084 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3085 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3086 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3087 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3088 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3089 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3090 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3091 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3092 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3097 static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3098 SMU7_Discrete_DpmTable *table)
3100 struct ci_power_info *pi = ci_get_pi(adev);
3101 struct atom_clock_dividers dividers;
3102 SMU7_Discrete_VoltageLevel voltage_level;
3103 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3104 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3105 u32 dll_cntl = pi->clock_registers.dll_cntl;
3106 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3109 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3112 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3114 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3116 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3118 table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3120 ret = amdgpu_atombios_get_clock_dividers(adev,
3121 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3122 table->ACPILevel.SclkFrequency, false, ÷rs);
3126 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3127 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3128 table->ACPILevel.DeepSleepDivId = 0;
3130 spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3131 spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3133 spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3134 spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3136 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3137 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3138 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3139 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3140 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3141 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3142 table->ACPILevel.CcPwrDynRm = 0;
3143 table->ACPILevel.CcPwrDynRm1 = 0;
3145 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3146 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3147 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3148 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3149 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3150 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3151 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3152 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3153 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3154 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3155 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3157 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3158 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3160 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3162 table->MemoryACPILevel.MinVddci =
3163 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3165 table->MemoryACPILevel.MinVddci =
3166 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3169 if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3170 table->MemoryACPILevel.MinMvdd = 0;
3172 table->MemoryACPILevel.MinMvdd =
3173 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3175 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3176 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3177 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3178 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3180 dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3182 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3183 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3184 table->MemoryACPILevel.MpllAdFuncCntl =
3185 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3186 table->MemoryACPILevel.MpllDqFuncCntl =
3187 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3188 table->MemoryACPILevel.MpllFuncCntl =
3189 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3190 table->MemoryACPILevel.MpllFuncCntl_1 =
3191 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3192 table->MemoryACPILevel.MpllFuncCntl_2 =
3193 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3194 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3195 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3197 table->MemoryACPILevel.EnabledForThrottle = 0;
3198 table->MemoryACPILevel.EnabledForActivity = 0;
3199 table->MemoryACPILevel.UpH = 0;
3200 table->MemoryACPILevel.DownH = 100;
3201 table->MemoryACPILevel.VoltageDownH = 0;
3202 table->MemoryACPILevel.ActivityLevel =
3203 cpu_to_be16((u16)pi->mclk_activity_target);
3205 table->MemoryACPILevel.StutterEnable = false;
3206 table->MemoryACPILevel.StrobeEnable = false;
3207 table->MemoryACPILevel.EdcReadEnable = false;
3208 table->MemoryACPILevel.EdcWriteEnable = false;
3209 table->MemoryACPILevel.RttEnable = false;
3215 static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3217 struct ci_power_info *pi = ci_get_pi(adev);
3218 struct ci_ulv_parm *ulv = &pi->ulv;
3220 if (ulv->supported) {
3222 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3225 return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3232 static int ci_populate_ulv_level(struct amdgpu_device *adev,
3233 SMU7_Discrete_Ulv *state)
3235 struct ci_power_info *pi = ci_get_pi(adev);
3236 u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3238 state->CcPwrDynRm = 0;
3239 state->CcPwrDynRm1 = 0;
3241 if (ulv_voltage == 0) {
3242 pi->ulv.supported = false;
3246 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3247 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3248 state->VddcOffset = 0;
3251 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3253 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3254 state->VddcOffsetVid = 0;
3256 state->VddcOffsetVid = (u8)
3257 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3258 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3260 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3262 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3263 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3264 state->VddcOffset = cpu_to_be16(state->VddcOffset);
3269 static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3271 SMU7_Discrete_GraphicsLevel *sclk)
3273 struct ci_power_info *pi = ci_get_pi(adev);
3274 struct atom_clock_dividers dividers;
3275 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3276 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3277 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3278 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3279 u32 reference_clock = adev->clock.spll.reference_freq;
3280 u32 reference_divider;
3284 ret = amdgpu_atombios_get_clock_dividers(adev,
3285 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3286 engine_clock, false, ÷rs);
3290 reference_divider = 1 + dividers.ref_div;
3291 fbdiv = dividers.fb_div & 0x3FFFFFF;
3293 spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3294 spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3295 spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3297 if (pi->caps_sclk_ss_support) {
3298 struct amdgpu_atom_ss ss;
3299 u32 vco_freq = engine_clock * dividers.post_div;
3301 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3302 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3303 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3304 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3306 cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3307 cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3308 cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3310 cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3311 cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3315 sclk->SclkFrequency = engine_clock;
3316 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3317 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3318 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3319 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
3320 sclk->SclkDid = (u8)dividers.post_divider;
3325 static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3327 u16 sclk_activity_level_t,
3328 SMU7_Discrete_GraphicsLevel *graphic_level)
3330 struct ci_power_info *pi = ci_get_pi(adev);
3333 ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3337 ret = ci_get_dependency_volt_by_clk(adev,
3338 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3339 engine_clock, &graphic_level->MinVddc);
3343 graphic_level->SclkFrequency = engine_clock;
3345 graphic_level->Flags = 0;
3346 graphic_level->MinVddcPhases = 1;
3348 if (pi->vddc_phase_shed_control)
3349 ci_populate_phase_value_based_on_sclk(adev,
3350 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3352 &graphic_level->MinVddcPhases);
3354 graphic_level->ActivityLevel = sclk_activity_level_t;
3356 graphic_level->CcPwrDynRm = 0;
3357 graphic_level->CcPwrDynRm1 = 0;
3358 graphic_level->EnabledForThrottle = 1;
3359 graphic_level->UpH = 0;
3360 graphic_level->DownH = 0;
3361 graphic_level->VoltageDownH = 0;
3362 graphic_level->PowerThrottle = 0;
3364 if (pi->caps_sclk_ds)
3365 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
3366 CISLAND_MINIMUM_ENGINE_CLOCK);
3368 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3370 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3371 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3372 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3373 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3374 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3375 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3376 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3377 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3378 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3379 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3380 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3385 static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3387 struct ci_power_info *pi = ci_get_pi(adev);
3388 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3389 u32 level_array_address = pi->dpm_table_start +
3390 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3391 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3392 SMU7_MAX_LEVELS_GRAPHICS;
3393 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3396 memset(levels, 0, level_array_size);
3398 for (i = 0; i < dpm_table->sclk_table.count; i++) {
3399 ret = ci_populate_single_graphic_level(adev,
3400 dpm_table->sclk_table.dpm_levels[i].value,
3401 (u16)pi->activity_target[i],
3402 &pi->smc_state_table.GraphicsLevel[i]);
3406 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3407 if (i == (dpm_table->sclk_table.count - 1))
3408 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3409 PPSMC_DISPLAY_WATERMARK_HIGH;
3411 pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3413 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3414 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3415 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3417 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3418 (u8 *)levels, level_array_size,
3426 static int ci_populate_ulv_state(struct amdgpu_device *adev,
3427 SMU7_Discrete_Ulv *ulv_level)
3429 return ci_populate_ulv_level(adev, ulv_level);
3432 static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3434 struct ci_power_info *pi = ci_get_pi(adev);
3435 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3436 u32 level_array_address = pi->dpm_table_start +
3437 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3438 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3439 SMU7_MAX_LEVELS_MEMORY;
3440 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3443 memset(levels, 0, level_array_size);
3445 for (i = 0; i < dpm_table->mclk_table.count; i++) {
3446 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3448 ret = ci_populate_single_memory_level(adev,
3449 dpm_table->mclk_table.dpm_levels[i].value,
3450 &pi->smc_state_table.MemoryLevel[i]);
3455 pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3457 if ((dpm_table->mclk_table.count >= 2) &&
3458 ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3459 pi->smc_state_table.MemoryLevel[1].MinVddc =
3460 pi->smc_state_table.MemoryLevel[0].MinVddc;
3461 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3462 pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3465 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3467 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3468 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3469 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3471 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3472 PPSMC_DISPLAY_WATERMARK_HIGH;
3474 ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3475 (u8 *)levels, level_array_size,
3483 static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3484 struct ci_single_dpm_table* dpm_table,
3489 dpm_table->count = count;
3490 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3491 dpm_table->dpm_levels[i].enabled = false;
3494 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3495 u32 index, u32 pcie_gen, u32 pcie_lanes)
3497 dpm_table->dpm_levels[index].value = pcie_gen;
3498 dpm_table->dpm_levels[index].param1 = pcie_lanes;
3499 dpm_table->dpm_levels[index].enabled = true;
3502 static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3504 struct ci_power_info *pi = ci_get_pi(adev);
3506 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3509 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3510 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3511 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3512 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3513 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3514 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3517 ci_reset_single_dpm_table(adev,
3518 &pi->dpm_table.pcie_speed_table,
3519 SMU7_MAX_LEVELS_LINK);
3521 if (adev->asic_type == CHIP_BONAIRE)
3522 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3523 pi->pcie_gen_powersaving.min,
3524 pi->pcie_lane_powersaving.max);
3526 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3527 pi->pcie_gen_powersaving.min,
3528 pi->pcie_lane_powersaving.min);
3529 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3530 pi->pcie_gen_performance.min,
3531 pi->pcie_lane_performance.min);
3532 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3533 pi->pcie_gen_powersaving.min,
3534 pi->pcie_lane_powersaving.max);
3535 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3536 pi->pcie_gen_performance.min,
3537 pi->pcie_lane_performance.max);
3538 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3539 pi->pcie_gen_powersaving.max,
3540 pi->pcie_lane_powersaving.max);
3541 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3542 pi->pcie_gen_performance.max,
3543 pi->pcie_lane_performance.max);
3545 pi->dpm_table.pcie_speed_table.count = 6;
3550 static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3552 struct ci_power_info *pi = ci_get_pi(adev);
3553 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3554 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3555 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3556 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3557 struct amdgpu_cac_leakage_table *std_voltage_table =
3558 &adev->pm.dpm.dyn_state.cac_leakage_table;
3561 if (allowed_sclk_vddc_table == NULL)
3563 if (allowed_sclk_vddc_table->count < 1)
3565 if (allowed_mclk_table == NULL)
3567 if (allowed_mclk_table->count < 1)
3570 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3572 ci_reset_single_dpm_table(adev,
3573 &pi->dpm_table.sclk_table,
3574 SMU7_MAX_LEVELS_GRAPHICS);
3575 ci_reset_single_dpm_table(adev,
3576 &pi->dpm_table.mclk_table,
3577 SMU7_MAX_LEVELS_MEMORY);
3578 ci_reset_single_dpm_table(adev,
3579 &pi->dpm_table.vddc_table,
3580 SMU7_MAX_LEVELS_VDDC);
3581 ci_reset_single_dpm_table(adev,
3582 &pi->dpm_table.vddci_table,
3583 SMU7_MAX_LEVELS_VDDCI);
3584 ci_reset_single_dpm_table(adev,
3585 &pi->dpm_table.mvdd_table,
3586 SMU7_MAX_LEVELS_MVDD);
3588 pi->dpm_table.sclk_table.count = 0;
3589 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3591 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3592 allowed_sclk_vddc_table->entries[i].clk)) {
3593 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3594 allowed_sclk_vddc_table->entries[i].clk;
3595 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3596 (i == 0) ? true : false;
3597 pi->dpm_table.sclk_table.count++;
3601 pi->dpm_table.mclk_table.count = 0;
3602 for (i = 0; i < allowed_mclk_table->count; i++) {
3604 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3605 allowed_mclk_table->entries[i].clk)) {
3606 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3607 allowed_mclk_table->entries[i].clk;
3608 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3609 (i == 0) ? true : false;
3610 pi->dpm_table.mclk_table.count++;
3614 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3615 pi->dpm_table.vddc_table.dpm_levels[i].value =
3616 allowed_sclk_vddc_table->entries[i].v;
3617 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3618 std_voltage_table->entries[i].leakage;
3619 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3621 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3623 allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3624 if (allowed_mclk_table) {
3625 for (i = 0; i < allowed_mclk_table->count; i++) {
3626 pi->dpm_table.vddci_table.dpm_levels[i].value =
3627 allowed_mclk_table->entries[i].v;
3628 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3630 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3633 allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3634 if (allowed_mclk_table) {
3635 for (i = 0; i < allowed_mclk_table->count; i++) {
3636 pi->dpm_table.mvdd_table.dpm_levels[i].value =
3637 allowed_mclk_table->entries[i].v;
3638 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3640 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3643 ci_setup_default_pcie_tables(adev);
3645 /* save a copy of the default DPM table */
3646 memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3647 sizeof(struct ci_dpm_table));
3652 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3653 u32 value, u32 *boot_level)
3658 for(i = 0; i < table->count; i++) {
3659 if (value == table->dpm_levels[i].value) {
3668 static int ci_init_smc_table(struct amdgpu_device *adev)
3670 struct ci_power_info *pi = ci_get_pi(adev);
3671 struct ci_ulv_parm *ulv = &pi->ulv;
3672 struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3673 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3676 ret = ci_setup_default_dpm_tables(adev);
3680 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3681 ci_populate_smc_voltage_tables(adev, table);
3683 ci_init_fps_limits(adev);
3685 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3686 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3688 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3689 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3691 if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
3692 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3694 if (ulv->supported) {
3695 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3698 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3701 ret = ci_populate_all_graphic_levels(adev);
3705 ret = ci_populate_all_memory_levels(adev);
3709 ci_populate_smc_link_level(adev, table);
3711 ret = ci_populate_smc_acpi_level(adev, table);
3715 ret = ci_populate_smc_vce_level(adev, table);
3719 ret = ci_populate_smc_acp_level(adev, table);
3723 ret = ci_populate_smc_samu_level(adev, table);
3727 ret = ci_do_program_memory_timing_parameters(adev);
3731 ret = ci_populate_smc_uvd_level(adev, table);
3735 table->UvdBootLevel = 0;
3736 table->VceBootLevel = 0;
3737 table->AcpBootLevel = 0;
3738 table->SamuBootLevel = 0;
3739 table->GraphicsBootLevel = 0;
3740 table->MemoryBootLevel = 0;
3742 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3743 pi->vbios_boot_state.sclk_bootup_value,
3744 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3746 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3747 pi->vbios_boot_state.mclk_bootup_value,
3748 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3750 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3751 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3752 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3754 ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3756 ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3760 table->UVDInterval = 1;
3761 table->VCEInterval = 1;
3762 table->ACPInterval = 1;
3763 table->SAMUInterval = 1;
3764 table->GraphicsVoltageChangeEnable = 1;
3765 table->GraphicsThermThrottleEnable = 1;
3766 table->GraphicsInterval = 1;
3767 table->VoltageInterval = 1;
3768 table->ThermalInterval = 1;
3769 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3770 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3771 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3772 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3773 table->MemoryVoltageChangeEnable = 1;
3774 table->MemoryInterval = 1;
3775 table->VoltageResponseTime = 0;
3776 table->VddcVddciDelta = 4000;
3777 table->PhaseResponseTime = 0;
3778 table->MemoryThermThrottleEnable = 1;
3779 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3780 table->PCIeGenInterval = 1;
3781 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3782 table->SVI2Enable = 1;
3784 table->SVI2Enable = 0;
3786 table->ThermGpio = 17;
3787 table->SclkStepSize = 0x4000;
3789 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3790 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3791 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3792 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3793 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3794 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3795 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3796 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3797 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3798 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3799 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3800 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3801 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3802 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3804 ret = amdgpu_ci_copy_bytes_to_smc(adev,
3805 pi->dpm_table_start +
3806 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3807 (u8 *)&table->SystemFlags,
3808 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3816 static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3817 struct ci_single_dpm_table *dpm_table,
3818 u32 low_limit, u32 high_limit)
3822 for (i = 0; i < dpm_table->count; i++) {
3823 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3824 (dpm_table->dpm_levels[i].value > high_limit))
3825 dpm_table->dpm_levels[i].enabled = false;
3827 dpm_table->dpm_levels[i].enabled = true;
3831 static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3832 u32 speed_low, u32 lanes_low,
3833 u32 speed_high, u32 lanes_high)
3835 struct ci_power_info *pi = ci_get_pi(adev);
3836 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3839 for (i = 0; i < pcie_table->count; i++) {
3840 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3841 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3842 (pcie_table->dpm_levels[i].value > speed_high) ||
3843 (pcie_table->dpm_levels[i].param1 > lanes_high))
3844 pcie_table->dpm_levels[i].enabled = false;
3846 pcie_table->dpm_levels[i].enabled = true;
3849 for (i = 0; i < pcie_table->count; i++) {
3850 if (pcie_table->dpm_levels[i].enabled) {
3851 for (j = i + 1; j < pcie_table->count; j++) {
3852 if (pcie_table->dpm_levels[j].enabled) {
3853 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3854 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3855 pcie_table->dpm_levels[j].enabled = false;
3862 static int ci_trim_dpm_states(struct amdgpu_device *adev,
3863 struct amdgpu_ps *amdgpu_state)
3865 struct ci_ps *state = ci_get_ps(amdgpu_state);
3866 struct ci_power_info *pi = ci_get_pi(adev);
3867 u32 high_limit_count;
3869 if (state->performance_level_count < 1)
3872 if (state->performance_level_count == 1)
3873 high_limit_count = 0;
3875 high_limit_count = 1;
3877 ci_trim_single_dpm_states(adev,
3878 &pi->dpm_table.sclk_table,
3879 state->performance_levels[0].sclk,
3880 state->performance_levels[high_limit_count].sclk);
3882 ci_trim_single_dpm_states(adev,
3883 &pi->dpm_table.mclk_table,
3884 state->performance_levels[0].mclk,
3885 state->performance_levels[high_limit_count].mclk);
3887 ci_trim_pcie_dpm_states(adev,
3888 state->performance_levels[0].pcie_gen,
3889 state->performance_levels[0].pcie_lane,
3890 state->performance_levels[high_limit_count].pcie_gen,
3891 state->performance_levels[high_limit_count].pcie_lane);
3896 static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3898 struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3899 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3900 struct amdgpu_clock_voltage_dependency_table *vddc_table =
3901 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3902 u32 requested_voltage = 0;
3905 if (disp_voltage_table == NULL)
3907 if (!disp_voltage_table->count)
3910 for (i = 0; i < disp_voltage_table->count; i++) {
3911 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3912 requested_voltage = disp_voltage_table->entries[i].v;
3915 for (i = 0; i < vddc_table->count; i++) {
3916 if (requested_voltage <= vddc_table->entries[i].v) {
3917 requested_voltage = vddc_table->entries[i].v;
3918 return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3919 PPSMC_MSG_VddC_Request,
3920 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3928 static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3930 struct ci_power_info *pi = ci_get_pi(adev);
3931 PPSMC_Result result;
3933 ci_apply_disp_minimum_voltage_request(adev);
3935 if (!pi->sclk_dpm_key_disabled) {
3936 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3937 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3938 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3939 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3940 if (result != PPSMC_Result_OK)
3945 if (!pi->mclk_dpm_key_disabled) {
3946 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3947 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3948 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3949 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3950 if (result != PPSMC_Result_OK)
3956 if (!pi->pcie_dpm_key_disabled) {
3957 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3958 result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3959 PPSMC_MSG_PCIeDPM_SetEnabledMask,
3960 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3961 if (result != PPSMC_Result_OK)
3970 static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
3971 struct amdgpu_ps *amdgpu_state)
3973 struct ci_power_info *pi = ci_get_pi(adev);
3974 struct ci_ps *state = ci_get_ps(amdgpu_state);
3975 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3976 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3977 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3978 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3981 pi->need_update_smu7_dpm_table = 0;
3983 for (i = 0; i < sclk_table->count; i++) {
3984 if (sclk == sclk_table->dpm_levels[i].value)
3988 if (i >= sclk_table->count) {
3989 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3991 /* XXX check display min clock requirements */
3992 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3993 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3996 for (i = 0; i < mclk_table->count; i++) {
3997 if (mclk == mclk_table->dpm_levels[i].value)
4001 if (i >= mclk_table->count)
4002 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4004 if (adev->pm.dpm.current_active_crtc_count !=
4005 adev->pm.dpm.new_active_crtc_count)
4006 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4009 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4010 struct amdgpu_ps *amdgpu_state)
4012 struct ci_power_info *pi = ci_get_pi(adev);
4013 struct ci_ps *state = ci_get_ps(amdgpu_state);
4014 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4015 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4016 struct ci_dpm_table *dpm_table = &pi->dpm_table;
4019 if (!pi->need_update_smu7_dpm_table)
4022 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4023 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4025 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4026 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4028 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4029 ret = ci_populate_all_graphic_levels(adev);
4034 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4035 ret = ci_populate_all_memory_levels(adev);
4043 static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4045 struct ci_power_info *pi = ci_get_pi(adev);
4046 const struct amdgpu_clock_and_voltage_limits *max_limits;
4049 if (adev->pm.dpm.ac_power)
4050 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4052 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4055 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4057 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4058 if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4059 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4061 if (!pi->caps_uvd_dpm)
4066 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4067 PPSMC_MSG_UVDDPM_SetEnabledMask,
4068 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4070 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4071 pi->uvd_enabled = true;
4072 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4073 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4074 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4075 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4078 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4079 pi->uvd_enabled = false;
4080 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4081 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4082 PPSMC_MSG_MCLKDPM_SetEnabledMask,
4083 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4087 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4088 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4092 static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4094 struct ci_power_info *pi = ci_get_pi(adev);
4095 const struct amdgpu_clock_and_voltage_limits *max_limits;
4098 if (adev->pm.dpm.ac_power)
4099 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4101 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4104 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4105 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4106 if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4107 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4109 if (!pi->caps_vce_dpm)
4114 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4115 PPSMC_MSG_VCEDPM_SetEnabledMask,
4116 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4119 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4120 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4125 static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4127 struct ci_power_info *pi = ci_get_pi(adev);
4128 const struct amdgpu_clock_and_voltage_limits *max_limits;
4131 if (adev->pm.dpm.ac_power)
4132 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4134 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4137 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4138 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4139 if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4140 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4142 if (!pi->caps_samu_dpm)
4147 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4148 PPSMC_MSG_SAMUDPM_SetEnabledMask,
4149 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4151 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4152 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4156 static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4158 struct ci_power_info *pi = ci_get_pi(adev);
4159 const struct amdgpu_clock_and_voltage_limits *max_limits;
4162 if (adev->pm.dpm.ac_power)
4163 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4165 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4168 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4169 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4170 if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4171 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4173 if (!pi->caps_acp_dpm)
4178 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4179 PPSMC_MSG_ACPDPM_SetEnabledMask,
4180 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4183 return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4184 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4189 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4191 struct ci_power_info *pi = ci_get_pi(adev);
4195 if (pi->caps_uvd_dpm ||
4196 (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4197 pi->smc_state_table.UvdBootLevel = 0;
4199 pi->smc_state_table.UvdBootLevel =
4200 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4202 tmp = RREG32_SMC(ixDPM_TABLE_475);
4203 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4204 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4205 WREG32_SMC(ixDPM_TABLE_475, tmp);
4208 return ci_enable_uvd_dpm(adev, !gate);
4211 static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4214 u32 min_evclk = 30000; /* ??? */
4215 struct amdgpu_vce_clock_voltage_dependency_table *table =
4216 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4218 for (i = 0; i < table->count; i++) {
4219 if (table->entries[i].evclk >= min_evclk)
4223 return table->count - 1;
4226 static int ci_update_vce_dpm(struct amdgpu_device *adev,
4227 struct amdgpu_ps *amdgpu_new_state,
4228 struct amdgpu_ps *amdgpu_current_state)
4230 struct ci_power_info *pi = ci_get_pi(adev);
4234 if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4235 if (amdgpu_new_state->evclk) {
4236 /* turn the clocks on when encoding */
4237 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4238 AMD_CG_STATE_UNGATE);
4242 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4243 tmp = RREG32_SMC(ixDPM_TABLE_475);
4244 tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4245 tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4246 WREG32_SMC(ixDPM_TABLE_475, tmp);
4248 ret = ci_enable_vce_dpm(adev, true);
4250 /* turn the clocks off when not encoding */
4251 ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4256 ret = ci_enable_vce_dpm(adev, false);
4263 static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4265 return ci_enable_samu_dpm(adev, gate);
4268 static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4270 struct ci_power_info *pi = ci_get_pi(adev);
4274 pi->smc_state_table.AcpBootLevel = 0;
4276 tmp = RREG32_SMC(ixDPM_TABLE_475);
4277 tmp &= ~AcpBootLevel_MASK;
4278 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4279 WREG32_SMC(ixDPM_TABLE_475, tmp);
4282 return ci_enable_acp_dpm(adev, !gate);
4286 static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4287 struct amdgpu_ps *amdgpu_state)
4289 struct ci_power_info *pi = ci_get_pi(adev);
4292 ret = ci_trim_dpm_states(adev, amdgpu_state);
4296 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4297 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4298 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4299 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4300 pi->last_mclk_dpm_enable_mask =
4301 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4302 if (pi->uvd_enabled) {
4303 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4304 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4306 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4307 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4312 static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4317 while ((level_mask & (1 << level)) == 0)
4324 static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
4325 enum amdgpu_dpm_forced_level level)
4327 struct ci_power_info *pi = ci_get_pi(adev);
4331 if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
4332 if ((!pi->pcie_dpm_key_disabled) &&
4333 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4335 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4339 ret = ci_dpm_force_state_pcie(adev, level);
4342 for (i = 0; i < adev->usec_timeout; i++) {
4343 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4344 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4345 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4352 if ((!pi->sclk_dpm_key_disabled) &&
4353 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4355 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4359 ret = ci_dpm_force_state_sclk(adev, levels);
4362 for (i = 0; i < adev->usec_timeout; i++) {
4363 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4364 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4365 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4372 if ((!pi->mclk_dpm_key_disabled) &&
4373 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4375 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4379 ret = ci_dpm_force_state_mclk(adev, levels);
4382 for (i = 0; i < adev->usec_timeout; i++) {
4383 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4384 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4385 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4392 } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
4393 if ((!pi->sclk_dpm_key_disabled) &&
4394 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4395 levels = ci_get_lowest_enabled_level(adev,
4396 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4397 ret = ci_dpm_force_state_sclk(adev, levels);
4400 for (i = 0; i < adev->usec_timeout; i++) {
4401 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4402 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4403 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4409 if ((!pi->mclk_dpm_key_disabled) &&
4410 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4411 levels = ci_get_lowest_enabled_level(adev,
4412 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4413 ret = ci_dpm_force_state_mclk(adev, levels);
4416 for (i = 0; i < adev->usec_timeout; i++) {
4417 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4418 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4419 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4425 if ((!pi->pcie_dpm_key_disabled) &&
4426 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4427 levels = ci_get_lowest_enabled_level(adev,
4428 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4429 ret = ci_dpm_force_state_pcie(adev, levels);
4432 for (i = 0; i < adev->usec_timeout; i++) {
4433 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4434 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4435 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4441 } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
4442 if (!pi->pcie_dpm_key_disabled) {
4443 PPSMC_Result smc_result;
4445 smc_result = amdgpu_ci_send_msg_to_smc(adev,
4446 PPSMC_MSG_PCIeDPM_UnForceLevel);
4447 if (smc_result != PPSMC_Result_OK)
4450 ret = ci_upload_dpm_level_enable_mask(adev);
4455 adev->pm.dpm.forced_level = level;
4460 static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4461 struct ci_mc_reg_table *table)
4466 for (i = 0, j = table->last; i < table->last; i++) {
4467 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4469 switch(table->mc_reg_address[i].s1) {
4470 case mmMC_SEQ_MISC1:
4471 temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4472 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4473 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4474 for (k = 0; k < table->num_entries; k++) {
4475 table->mc_reg_table_entry[k].mc_data[j] =
4476 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4479 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4482 temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4483 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4484 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4485 for (k = 0; k < table->num_entries; k++) {
4486 table->mc_reg_table_entry[k].mc_data[j] =
4487 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4488 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
4489 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4492 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4495 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
4496 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4497 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4498 for (k = 0; k < table->num_entries; k++) {
4499 table->mc_reg_table_entry[k].mc_data[j] =
4500 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4503 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4507 case mmMC_SEQ_RESERVE_M:
4508 temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4509 table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4510 table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4511 for (k = 0; k < table->num_entries; k++) {
4512 table->mc_reg_table_entry[k].mc_data[j] =
4513 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4516 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4530 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4535 case mmMC_SEQ_RAS_TIMING:
4536 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4538 case mmMC_SEQ_DLL_STBY:
4539 *out_reg = mmMC_SEQ_DLL_STBY_LP;
4541 case mmMC_SEQ_G5PDX_CMD0:
4542 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4544 case mmMC_SEQ_G5PDX_CMD1:
4545 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4547 case mmMC_SEQ_G5PDX_CTRL:
4548 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4550 case mmMC_SEQ_CAS_TIMING:
4551 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4553 case mmMC_SEQ_MISC_TIMING:
4554 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4556 case mmMC_SEQ_MISC_TIMING2:
4557 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4559 case mmMC_SEQ_PMG_DVS_CMD:
4560 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4562 case mmMC_SEQ_PMG_DVS_CTL:
4563 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4565 case mmMC_SEQ_RD_CTL_D0:
4566 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4568 case mmMC_SEQ_RD_CTL_D1:
4569 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4571 case mmMC_SEQ_WR_CTL_D0:
4572 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4574 case mmMC_SEQ_WR_CTL_D1:
4575 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4577 case mmMC_PMG_CMD_EMRS:
4578 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4580 case mmMC_PMG_CMD_MRS:
4581 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4583 case mmMC_PMG_CMD_MRS1:
4584 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4586 case mmMC_SEQ_PMG_TIMING:
4587 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4589 case mmMC_PMG_CMD_MRS2:
4590 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4592 case mmMC_SEQ_WR_CTL_2:
4593 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4603 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4607 for (i = 0; i < table->last; i++) {
4608 for (j = 1; j < table->num_entries; j++) {
4609 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4610 table->mc_reg_table_entry[j].mc_data[i]) {
4611 table->valid_flag |= 1 << i;
4618 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4623 for (i = 0; i < table->last; i++) {
4624 table->mc_reg_address[i].s0 =
4625 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4626 address : table->mc_reg_address[i].s1;
4630 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4631 struct ci_mc_reg_table *ci_table)
4635 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4637 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4640 for (i = 0; i < table->last; i++)
4641 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4643 ci_table->last = table->last;
4645 for (i = 0; i < table->num_entries; i++) {
4646 ci_table->mc_reg_table_entry[i].mclk_max =
4647 table->mc_reg_table_entry[i].mclk_max;
4648 for (j = 0; j < table->last; j++)
4649 ci_table->mc_reg_table_entry[i].mc_data[j] =
4650 table->mc_reg_table_entry[i].mc_data[j];
4652 ci_table->num_entries = table->num_entries;
4657 static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4658 struct ci_mc_reg_table *table)
4664 tmp = RREG32(mmMC_SEQ_MISC0);
4665 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4668 ((adev->pdev->device == 0x67B0) ||
4669 (adev->pdev->device == 0x67B1))) {
4670 for (i = 0; i < table->last; i++) {
4671 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4673 switch (table->mc_reg_address[i].s1) {
4674 case mmMC_SEQ_MISC1:
4675 for (k = 0; k < table->num_entries; k++) {
4676 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4677 (table->mc_reg_table_entry[k].mclk_max == 137500))
4678 table->mc_reg_table_entry[k].mc_data[i] =
4679 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4683 case mmMC_SEQ_WR_CTL_D0:
4684 for (k = 0; k < table->num_entries; k++) {
4685 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4686 (table->mc_reg_table_entry[k].mclk_max == 137500))
4687 table->mc_reg_table_entry[k].mc_data[i] =
4688 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4692 case mmMC_SEQ_WR_CTL_D1:
4693 for (k = 0; k < table->num_entries; k++) {
4694 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4695 (table->mc_reg_table_entry[k].mclk_max == 137500))
4696 table->mc_reg_table_entry[k].mc_data[i] =
4697 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4701 case mmMC_SEQ_WR_CTL_2:
4702 for (k = 0; k < table->num_entries; k++) {
4703 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4704 (table->mc_reg_table_entry[k].mclk_max == 137500))
4705 table->mc_reg_table_entry[k].mc_data[i] = 0;
4708 case mmMC_SEQ_CAS_TIMING:
4709 for (k = 0; k < table->num_entries; k++) {
4710 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4711 table->mc_reg_table_entry[k].mc_data[i] =
4712 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4714 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4715 table->mc_reg_table_entry[k].mc_data[i] =
4716 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4720 case mmMC_SEQ_MISC_TIMING:
4721 for (k = 0; k < table->num_entries; k++) {
4722 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4723 table->mc_reg_table_entry[k].mc_data[i] =
4724 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4726 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4727 table->mc_reg_table_entry[k].mc_data[i] =
4728 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4737 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4738 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4739 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4740 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4741 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4747 static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4749 struct ci_power_info *pi = ci_get_pi(adev);
4750 struct atom_mc_reg_table *table;
4751 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4752 u8 module_index = ci_get_memory_module_index(adev);
4755 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4759 WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4760 WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4761 WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4762 WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4763 WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4764 WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4765 WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4766 WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4767 WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4768 WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4769 WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4770 WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4771 WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4772 WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4773 WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4774 WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4775 WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4776 WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4777 WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4778 WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4780 ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4784 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4788 ci_set_s0_mc_reg_index(ci_table);
4790 ret = ci_register_patching_mc_seq(adev, ci_table);
4794 ret = ci_set_mc_special_registers(adev, ci_table);
4798 ci_set_valid_flag(ci_table);
4806 static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4807 SMU7_Discrete_MCRegisters *mc_reg_table)
4809 struct ci_power_info *pi = ci_get_pi(adev);
4812 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4813 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4814 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4816 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4817 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4822 mc_reg_table->last = (u8)i;
4827 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4828 SMU7_Discrete_MCRegisterSet *data,
4829 u32 num_entries, u32 valid_flag)
4833 for (i = 0, j = 0; j < num_entries; j++) {
4834 if (valid_flag & (1 << j)) {
4835 data->value[i] = cpu_to_be32(entry->mc_data[j]);
4841 static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4842 const u32 memory_clock,
4843 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4845 struct ci_power_info *pi = ci_get_pi(adev);
4848 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4849 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4853 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4856 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4857 mc_reg_table_data, pi->mc_reg_table.last,
4858 pi->mc_reg_table.valid_flag);
4861 static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4862 SMU7_Discrete_MCRegisters *mc_reg_table)
4864 struct ci_power_info *pi = ci_get_pi(adev);
4867 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4868 ci_convert_mc_reg_table_entry_to_smc(adev,
4869 pi->dpm_table.mclk_table.dpm_levels[i].value,
4870 &mc_reg_table->data[i]);
4873 static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4875 struct ci_power_info *pi = ci_get_pi(adev);
4878 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4880 ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4883 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4885 return amdgpu_ci_copy_bytes_to_smc(adev,
4886 pi->mc_reg_table_start,
4887 (u8 *)&pi->smc_mc_reg_table,
4888 sizeof(SMU7_Discrete_MCRegisters),
4892 static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4894 struct ci_power_info *pi = ci_get_pi(adev);
4896 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4899 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4901 ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4903 return amdgpu_ci_copy_bytes_to_smc(adev,
4904 pi->mc_reg_table_start +
4905 offsetof(SMU7_Discrete_MCRegisters, data[0]),
4906 (u8 *)&pi->smc_mc_reg_table.data[0],
4907 sizeof(SMU7_Discrete_MCRegisterSet) *
4908 pi->dpm_table.mclk_table.count,
4912 static void ci_enable_voltage_control(struct amdgpu_device *adev)
4914 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4916 tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4917 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4920 static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4921 struct amdgpu_ps *amdgpu_state)
4923 struct ci_ps *state = ci_get_ps(amdgpu_state);
4925 u16 pcie_speed, max_speed = 0;
4927 for (i = 0; i < state->performance_level_count; i++) {
4928 pcie_speed = state->performance_levels[i].pcie_gen;
4929 if (max_speed < pcie_speed)
4930 max_speed = pcie_speed;
4936 static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
4940 speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
4941 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
4942 speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
4944 return (u16)speed_cntl;
4947 static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
4951 link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
4952 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
4953 link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
4955 switch (link_width) {
4971 static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
4972 struct amdgpu_ps *amdgpu_new_state,
4973 struct amdgpu_ps *amdgpu_current_state)
4975 struct ci_power_info *pi = ci_get_pi(adev);
4976 enum amdgpu_pcie_gen target_link_speed =
4977 ci_get_maximum_link_speed(adev, amdgpu_new_state);
4978 enum amdgpu_pcie_gen current_link_speed;
4980 if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
4981 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
4983 current_link_speed = pi->force_pcie_gen;
4985 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
4986 pi->pspp_notify_required = false;
4987 if (target_link_speed > current_link_speed) {
4988 switch (target_link_speed) {
4990 case AMDGPU_PCIE_GEN3:
4991 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4993 pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
4994 if (current_link_speed == AMDGPU_PCIE_GEN2)
4996 case AMDGPU_PCIE_GEN2:
4997 if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
5001 pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5005 if (target_link_speed < current_link_speed)
5006 pi->pspp_notify_required = true;
5010 static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5011 struct amdgpu_ps *amdgpu_new_state,
5012 struct amdgpu_ps *amdgpu_current_state)
5014 struct ci_power_info *pi = ci_get_pi(adev);
5015 enum amdgpu_pcie_gen target_link_speed =
5016 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5019 if (pi->pspp_notify_required) {
5020 if (target_link_speed == AMDGPU_PCIE_GEN3)
5021 request = PCIE_PERF_REQ_PECI_GEN3;
5022 else if (target_link_speed == AMDGPU_PCIE_GEN2)
5023 request = PCIE_PERF_REQ_PECI_GEN2;
5025 request = PCIE_PERF_REQ_PECI_GEN1;
5027 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5028 (ci_get_current_pcie_speed(adev) > 0))
5032 amdgpu_acpi_pcie_performance_request(adev, request, false);
5037 static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5039 struct ci_power_info *pi = ci_get_pi(adev);
5040 struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5041 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5042 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5043 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5044 struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5045 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5047 if (allowed_sclk_vddc_table == NULL)
5049 if (allowed_sclk_vddc_table->count < 1)
5051 if (allowed_mclk_vddc_table == NULL)
5053 if (allowed_mclk_vddc_table->count < 1)
5055 if (allowed_mclk_vddci_table == NULL)
5057 if (allowed_mclk_vddci_table->count < 1)
5060 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5061 pi->max_vddc_in_pp_table =
5062 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5064 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5065 pi->max_vddci_in_pp_table =
5066 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5068 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5069 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5070 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5071 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5072 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5073 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5074 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5075 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5080 static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5082 struct ci_power_info *pi = ci_get_pi(adev);
5083 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5086 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5087 if (leakage_table->leakage_id[leakage_index] == *vddc) {
5088 *vddc = leakage_table->actual_voltage[leakage_index];
5094 static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5096 struct ci_power_info *pi = ci_get_pi(adev);
5097 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5100 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5101 if (leakage_table->leakage_id[leakage_index] == *vddci) {
5102 *vddci = leakage_table->actual_voltage[leakage_index];
5108 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5109 struct amdgpu_clock_voltage_dependency_table *table)
5114 for (i = 0; i < table->count; i++)
5115 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5119 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5120 struct amdgpu_clock_voltage_dependency_table *table)
5125 for (i = 0; i < table->count; i++)
5126 ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5130 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5131 struct amdgpu_vce_clock_voltage_dependency_table *table)
5136 for (i = 0; i < table->count; i++)
5137 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5141 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5142 struct amdgpu_uvd_clock_voltage_dependency_table *table)
5147 for (i = 0; i < table->count; i++)
5148 ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5152 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5153 struct amdgpu_phase_shedding_limits_table *table)
5158 for (i = 0; i < table->count; i++)
5159 ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5163 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5164 struct amdgpu_clock_and_voltage_limits *table)
5167 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5168 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5172 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5173 struct amdgpu_cac_leakage_table *table)
5178 for (i = 0; i < table->count; i++)
5179 ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5183 static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5186 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5187 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5188 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5189 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5190 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5191 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5192 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5193 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5194 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5195 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5196 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5197 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5198 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5199 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5200 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5201 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5202 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5203 &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5204 ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5205 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5206 ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5207 &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5208 ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5209 &adev->pm.dpm.dyn_state.cac_leakage_table);
5213 static void ci_update_current_ps(struct amdgpu_device *adev,
5214 struct amdgpu_ps *rps)
5216 struct ci_ps *new_ps = ci_get_ps(rps);
5217 struct ci_power_info *pi = ci_get_pi(adev);
5219 pi->current_rps = *rps;
5220 pi->current_ps = *new_ps;
5221 pi->current_rps.ps_priv = &pi->current_ps;
5224 static void ci_update_requested_ps(struct amdgpu_device *adev,
5225 struct amdgpu_ps *rps)
5227 struct ci_ps *new_ps = ci_get_ps(rps);
5228 struct ci_power_info *pi = ci_get_pi(adev);
5230 pi->requested_rps = *rps;
5231 pi->requested_ps = *new_ps;
5232 pi->requested_rps.ps_priv = &pi->requested_ps;
5235 static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
5237 struct ci_power_info *pi = ci_get_pi(adev);
5238 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5239 struct amdgpu_ps *new_ps = &requested_ps;
5241 ci_update_requested_ps(adev, new_ps);
5243 ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5248 static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
5250 struct ci_power_info *pi = ci_get_pi(adev);
5251 struct amdgpu_ps *new_ps = &pi->requested_rps;
5253 ci_update_current_ps(adev, new_ps);
5257 static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5259 ci_read_clock_registers(adev);
5260 ci_enable_acpi_power_management(adev);
5261 ci_init_sclk_t(adev);
5264 static int ci_dpm_enable(struct amdgpu_device *adev)
5266 struct ci_power_info *pi = ci_get_pi(adev);
5267 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5270 if (amdgpu_ci_is_smc_running(adev))
5272 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5273 ci_enable_voltage_control(adev);
5274 ret = ci_construct_voltage_tables(adev);
5276 DRM_ERROR("ci_construct_voltage_tables failed\n");
5280 if (pi->caps_dynamic_ac_timing) {
5281 ret = ci_initialize_mc_reg_table(adev);
5283 pi->caps_dynamic_ac_timing = false;
5286 ci_enable_spread_spectrum(adev, true);
5287 if (pi->thermal_protection)
5288 ci_enable_thermal_protection(adev, true);
5289 ci_program_sstp(adev);
5290 ci_enable_display_gap(adev);
5291 ci_program_vc(adev);
5292 ret = ci_upload_firmware(adev);
5294 DRM_ERROR("ci_upload_firmware failed\n");
5297 ret = ci_process_firmware_header(adev);
5299 DRM_ERROR("ci_process_firmware_header failed\n");
5302 ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5304 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5307 ret = ci_init_smc_table(adev);
5309 DRM_ERROR("ci_init_smc_table failed\n");
5312 ret = ci_init_arb_table_index(adev);
5314 DRM_ERROR("ci_init_arb_table_index failed\n");
5317 if (pi->caps_dynamic_ac_timing) {
5318 ret = ci_populate_initial_mc_reg_table(adev);
5320 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5324 ret = ci_populate_pm_base(adev);
5326 DRM_ERROR("ci_populate_pm_base failed\n");
5329 ci_dpm_start_smc(adev);
5330 ci_enable_vr_hot_gpio_interrupt(adev);
5331 ret = ci_notify_smc_display_change(adev, false);
5333 DRM_ERROR("ci_notify_smc_display_change failed\n");
5336 ci_enable_sclk_control(adev, true);
5337 ret = ci_enable_ulv(adev, true);
5339 DRM_ERROR("ci_enable_ulv failed\n");
5342 ret = ci_enable_ds_master_switch(adev, true);
5344 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5347 ret = ci_start_dpm(adev);
5349 DRM_ERROR("ci_start_dpm failed\n");
5352 ret = ci_enable_didt(adev, true);
5354 DRM_ERROR("ci_enable_didt failed\n");
5357 ret = ci_enable_smc_cac(adev, true);
5359 DRM_ERROR("ci_enable_smc_cac failed\n");
5362 ret = ci_enable_power_containment(adev, true);
5364 DRM_ERROR("ci_enable_power_containment failed\n");
5368 ret = ci_power_control_set_level(adev);
5370 DRM_ERROR("ci_power_control_set_level failed\n");
5374 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5376 ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5378 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5382 ci_thermal_start_thermal_controller(adev);
5384 ci_update_current_ps(adev, boot_ps);
5389 static void ci_dpm_disable(struct amdgpu_device *adev)
5391 struct ci_power_info *pi = ci_get_pi(adev);
5392 struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5394 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5395 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5396 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5397 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5399 ci_dpm_powergate_uvd(adev, false);
5401 if (!amdgpu_ci_is_smc_running(adev))
5404 ci_thermal_stop_thermal_controller(adev);
5406 if (pi->thermal_protection)
5407 ci_enable_thermal_protection(adev, false);
5408 ci_enable_power_containment(adev, false);
5409 ci_enable_smc_cac(adev, false);
5410 ci_enable_didt(adev, false);
5411 ci_enable_spread_spectrum(adev, false);
5412 ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5414 ci_enable_ds_master_switch(adev, false);
5415 ci_enable_ulv(adev, false);
5417 ci_reset_to_default(adev);
5418 ci_dpm_stop_smc(adev);
5419 ci_force_switch_to_arb_f0(adev);
5420 ci_enable_thermal_based_sclk_dpm(adev, false);
5422 ci_update_current_ps(adev, boot_ps);
5425 static int ci_dpm_set_power_state(struct amdgpu_device *adev)
5427 struct ci_power_info *pi = ci_get_pi(adev);
5428 struct amdgpu_ps *new_ps = &pi->requested_rps;
5429 struct amdgpu_ps *old_ps = &pi->current_rps;
5432 ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5433 if (pi->pcie_performance_request)
5434 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5435 ret = ci_freeze_sclk_mclk_dpm(adev);
5437 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5440 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5442 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5445 ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5447 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5451 ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5453 DRM_ERROR("ci_update_vce_dpm failed\n");
5457 ret = ci_update_sclk_t(adev);
5459 DRM_ERROR("ci_update_sclk_t failed\n");
5462 if (pi->caps_dynamic_ac_timing) {
5463 ret = ci_update_and_upload_mc_reg_table(adev);
5465 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5469 ret = ci_program_memory_timing_parameters(adev);
5471 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5474 ret = ci_unfreeze_sclk_mclk_dpm(adev);
5476 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5479 ret = ci_upload_dpm_level_enable_mask(adev);
5481 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5484 if (pi->pcie_performance_request)
5485 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5491 static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5493 ci_set_boot_state(adev);
5497 static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
5499 ci_program_display_gap(adev);
5503 struct _ATOM_POWERPLAY_INFO info;
5504 struct _ATOM_POWERPLAY_INFO_V2 info_2;
5505 struct _ATOM_POWERPLAY_INFO_V3 info_3;
5506 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5507 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5508 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5511 union pplib_clock_info {
5512 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5513 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5514 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5515 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5516 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5517 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5520 union pplib_power_state {
5521 struct _ATOM_PPLIB_STATE v1;
5522 struct _ATOM_PPLIB_STATE_V2 v2;
5525 static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5526 struct amdgpu_ps *rps,
5527 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5530 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5531 rps->class = le16_to_cpu(non_clock_info->usClassification);
5532 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5534 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5535 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5536 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5542 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5543 adev->pm.dpm.boot_ps = rps;
5544 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5545 adev->pm.dpm.uvd_ps = rps;
5548 static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5549 struct amdgpu_ps *rps, int index,
5550 union pplib_clock_info *clock_info)
5552 struct ci_power_info *pi = ci_get_pi(adev);
5553 struct ci_ps *ps = ci_get_ps(rps);
5554 struct ci_pl *pl = &ps->performance_levels[index];
5556 ps->performance_level_count = index + 1;
5558 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5559 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5560 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5561 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5563 pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5565 pi->vbios_boot_state.pcie_gen_bootup_value,
5566 clock_info->ci.ucPCIEGen);
5567 pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5568 pi->vbios_boot_state.pcie_lane_bootup_value,
5569 le16_to_cpu(clock_info->ci.usPCIELane));
5571 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5572 pi->acpi_pcie_gen = pl->pcie_gen;
5575 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5576 pi->ulv.supported = true;
5578 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5581 /* patch up boot state */
5582 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5583 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5584 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5585 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5586 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5589 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5590 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5591 pi->use_pcie_powersaving_levels = true;
5592 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5593 pi->pcie_gen_powersaving.max = pl->pcie_gen;
5594 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5595 pi->pcie_gen_powersaving.min = pl->pcie_gen;
5596 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5597 pi->pcie_lane_powersaving.max = pl->pcie_lane;
5598 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5599 pi->pcie_lane_powersaving.min = pl->pcie_lane;
5601 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5602 pi->use_pcie_performance_levels = true;
5603 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5604 pi->pcie_gen_performance.max = pl->pcie_gen;
5605 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5606 pi->pcie_gen_performance.min = pl->pcie_gen;
5607 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5608 pi->pcie_lane_performance.max = pl->pcie_lane;
5609 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5610 pi->pcie_lane_performance.min = pl->pcie_lane;
5617 static int ci_parse_power_table(struct amdgpu_device *adev)
5619 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5620 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5621 union pplib_power_state *power_state;
5622 int i, j, k, non_clock_array_index, clock_array_index;
5623 union pplib_clock_info *clock_info;
5624 struct _StateArray *state_array;
5625 struct _ClockInfoArray *clock_info_array;
5626 struct _NonClockInfoArray *non_clock_info_array;
5627 union power_info *power_info;
5628 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5631 u8 *power_state_offset;
5634 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5635 &frev, &crev, &data_offset))
5637 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5639 amdgpu_add_thermal_controller(adev);
5641 state_array = (struct _StateArray *)
5642 (mode_info->atom_context->bios + data_offset +
5643 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5644 clock_info_array = (struct _ClockInfoArray *)
5645 (mode_info->atom_context->bios + data_offset +
5646 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5647 non_clock_info_array = (struct _NonClockInfoArray *)
5648 (mode_info->atom_context->bios + data_offset +
5649 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5651 adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5652 state_array->ucNumEntries, GFP_KERNEL);
5653 if (!adev->pm.dpm.ps)
5655 power_state_offset = (u8 *)state_array->states;
5656 for (i = 0; i < state_array->ucNumEntries; i++) {
5658 power_state = (union pplib_power_state *)power_state_offset;
5659 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5660 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5661 &non_clock_info_array->nonClockInfo[non_clock_array_index];
5662 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5664 kfree(adev->pm.dpm.ps);
5667 adev->pm.dpm.ps[i].ps_priv = ps;
5668 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5670 non_clock_info_array->ucEntrySize);
5672 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5673 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5674 clock_array_index = idx[j];
5675 if (clock_array_index >= clock_info_array->ucNumEntries)
5677 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5679 clock_info = (union pplib_clock_info *)
5680 ((u8 *)&clock_info_array->clockInfo[0] +
5681 (clock_array_index * clock_info_array->ucEntrySize));
5682 ci_parse_pplib_clock_info(adev,
5683 &adev->pm.dpm.ps[i], k,
5687 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5689 adev->pm.dpm.num_ps = state_array->ucNumEntries;
5691 /* fill in the vce power states */
5692 for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
5694 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5695 clock_info = (union pplib_clock_info *)
5696 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5697 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5698 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5699 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5700 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5701 adev->pm.dpm.vce_states[i].sclk = sclk;
5702 adev->pm.dpm.vce_states[i].mclk = mclk;
5708 static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5709 struct ci_vbios_boot_state *boot_state)
5711 struct amdgpu_mode_info *mode_info = &adev->mode_info;
5712 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5713 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5717 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5718 &frev, &crev, &data_offset)) {
5720 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5722 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5723 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5724 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5725 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5726 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5727 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5728 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5735 static void ci_dpm_fini(struct amdgpu_device *adev)
5739 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5740 kfree(adev->pm.dpm.ps[i].ps_priv);
5742 kfree(adev->pm.dpm.ps);
5743 kfree(adev->pm.dpm.priv);
5744 kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5745 amdgpu_free_extended_power_table(adev);
5749 * ci_dpm_init_microcode - load ucode images from disk
5751 * @adev: amdgpu_device pointer
5753 * Use the firmware interface to load the ucode images into
5754 * the driver (not loaded into hw).
5755 * Returns 0 on success, error on failure.
5757 static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5759 const char *chip_name;
5765 switch (adev->asic_type) {
5767 if ((adev->pdev->revision == 0x80) ||
5768 (adev->pdev->revision == 0x81) ||
5769 (adev->pdev->device == 0x665f))
5770 chip_name = "bonaire_k";
5772 chip_name = "bonaire";
5775 if (adev->pdev->revision == 0x80)
5776 chip_name = "hawaii_k";
5778 chip_name = "hawaii";
5785 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
5786 err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5789 err = amdgpu_ucode_validate(adev->pm.fw);
5794 "cik_smc: Failed to load firmware \"%s\"\n",
5796 release_firmware(adev->pm.fw);
5802 static int ci_dpm_init(struct amdgpu_device *adev)
5804 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5805 SMU7_Discrete_DpmTable *dpm_table;
5806 struct amdgpu_gpio_rec gpio;
5807 u16 data_offset, size;
5809 struct ci_power_info *pi;
5812 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5815 adev->pm.dpm.priv = pi;
5818 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5819 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5821 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5823 pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5824 pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5825 pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5826 pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5828 pi->pcie_lane_performance.max = 0;
5829 pi->pcie_lane_performance.min = 16;
5830 pi->pcie_lane_powersaving.max = 0;
5831 pi->pcie_lane_powersaving.min = 16;
5833 ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5839 ret = amdgpu_get_platform_caps(adev);
5845 ret = amdgpu_parse_extended_power_table(adev);
5851 ret = ci_parse_power_table(adev);
5857 pi->dll_default_on = false;
5858 pi->sram_end = SMC_RAM_END;
5860 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5861 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5862 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5863 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5864 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5865 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5866 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5867 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5869 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5871 pi->sclk_dpm_key_disabled = 0;
5872 pi->mclk_dpm_key_disabled = 0;
5873 pi->pcie_dpm_key_disabled = 0;
5874 pi->thermal_sclk_dpm_enabled = 0;
5876 pi->caps_sclk_ds = true;
5878 pi->mclk_strobe_mode_threshold = 40000;
5879 pi->mclk_stutter_mode_threshold = 40000;
5880 pi->mclk_edc_enable_threshold = 40000;
5881 pi->mclk_edc_wr_enable_threshold = 40000;
5883 ci_initialize_powertune_defaults(adev);
5885 pi->caps_fps = false;
5887 pi->caps_sclk_throttle_low_notification = false;
5889 pi->caps_uvd_dpm = true;
5890 pi->caps_vce_dpm = true;
5892 ci_get_leakage_voltages(adev);
5893 ci_patch_dependency_tables_with_leakage(adev);
5894 ci_set_private_data_variables_based_on_pptable(adev);
5896 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5897 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5898 if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5902 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5903 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5904 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5905 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5906 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5907 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5908 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5909 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5910 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5912 adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5913 adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5914 adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5916 adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5917 adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5918 adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5919 adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5921 if (adev->asic_type == CHIP_HAWAII) {
5922 pi->thermal_temp_setting.temperature_low = 94500;
5923 pi->thermal_temp_setting.temperature_high = 95000;
5924 pi->thermal_temp_setting.temperature_shutdown = 104000;
5926 pi->thermal_temp_setting.temperature_low = 99500;
5927 pi->thermal_temp_setting.temperature_high = 100000;
5928 pi->thermal_temp_setting.temperature_shutdown = 104000;
5931 pi->uvd_enabled = false;
5933 dpm_table = &pi->smc_state_table;
5935 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
5937 dpm_table->VRHotGpio = gpio.shift;
5938 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5940 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5941 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5944 gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
5946 dpm_table->AcDcGpio = gpio.shift;
5947 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5949 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5950 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5953 gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
5955 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
5957 switch (gpio.shift) {
5959 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5960 tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5963 tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5964 tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5967 tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
5970 tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
5973 tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
5976 DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
5979 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
5982 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5983 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5984 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5985 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5986 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5987 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5988 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5990 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5991 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5992 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5993 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5994 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5996 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5999 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6000 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6001 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6002 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6003 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6005 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6008 pi->vddc_phase_shed_control = true;
6010 #if defined(CONFIG_ACPI)
6011 pi->pcie_performance_request =
6012 amdgpu_acpi_is_pcie_performance_request_supported(adev);
6014 pi->pcie_performance_request = false;
6017 if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6018 &frev, &crev, &data_offset)) {
6019 pi->caps_sclk_ss_support = true;
6020 pi->caps_mclk_ss_support = true;
6021 pi->dynamic_ss = true;
6023 pi->caps_sclk_ss_support = false;
6024 pi->caps_mclk_ss_support = false;
6025 pi->dynamic_ss = true;
6028 if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6029 pi->thermal_protection = true;
6031 pi->thermal_protection = false;
6033 pi->caps_dynamic_ac_timing = true;
6035 pi->uvd_power_gated = false;
6037 /* make sure dc limits are valid */
6038 if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6039 (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6040 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6041 adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6043 pi->fan_ctrl_is_in_default_mode = true;
6049 ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
6052 struct ci_power_info *pi = ci_get_pi(adev);
6053 struct amdgpu_ps *rps = &pi->current_rps;
6054 u32 sclk = ci_get_average_sclk_freq(adev);
6055 u32 mclk = ci_get_average_mclk_freq(adev);
6056 u32 activity_percent = 50;
6059 ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6063 activity_percent += 0x80;
6064 activity_percent >>= 8;
6065 activity_percent = activity_percent > 100 ? 100 : activity_percent;
6068 seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
6069 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6070 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
6072 seq_printf(m, "GPU load: %u %%\n", activity_percent);
6075 static void ci_dpm_print_power_state(struct amdgpu_device *adev,
6076 struct amdgpu_ps *rps)
6078 struct ci_ps *ps = ci_get_ps(rps);
6082 amdgpu_dpm_print_class_info(rps->class, rps->class2);
6083 amdgpu_dpm_print_cap_info(rps->caps);
6084 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6085 for (i = 0; i < ps->performance_level_count; i++) {
6086 pl = &ps->performance_levels[i];
6087 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6088 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6090 amdgpu_dpm_print_ps_status(adev, rps);
6093 static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
6095 struct ci_power_info *pi = ci_get_pi(adev);
6096 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6099 return requested_state->performance_levels[0].sclk;
6101 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6104 static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
6106 struct ci_power_info *pi = ci_get_pi(adev);
6107 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6110 return requested_state->performance_levels[0].mclk;
6112 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6115 /* get temperature in millidegrees */
6116 static int ci_dpm_get_temp(struct amdgpu_device *adev)
6119 int actual_temp = 0;
6121 temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6122 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6127 actual_temp = temp & 0x1ff;
6129 actual_temp = actual_temp * 1000;
6134 static int ci_set_temperature_range(struct amdgpu_device *adev)
6138 ret = ci_thermal_enable_alert(adev, false);
6141 ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6142 CISLANDS_TEMP_RANGE_MAX);
6145 ret = ci_thermal_enable_alert(adev, true);
6151 static int ci_dpm_early_init(void *handle)
6153 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6155 ci_dpm_set_dpm_funcs(adev);
6156 ci_dpm_set_irq_funcs(adev);
6161 static int ci_dpm_late_init(void *handle)
6164 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6169 /* init the sysfs and debugfs files late */
6170 ret = amdgpu_pm_sysfs_init(adev);
6174 ret = ci_set_temperature_range(adev);
6178 ci_dpm_powergate_uvd(adev, true);
6183 static int ci_dpm_sw_init(void *handle)
6186 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6188 ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
6192 ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
6196 /* default to balanced state */
6197 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6198 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
6199 adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
6200 adev->pm.default_sclk = adev->clock.default_sclk;
6201 adev->pm.default_mclk = adev->clock.default_mclk;
6202 adev->pm.current_sclk = adev->clock.default_sclk;
6203 adev->pm.current_mclk = adev->clock.default_mclk;
6204 adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6206 if (amdgpu_dpm == 0)
6209 ret = ci_dpm_init_microcode(adev);
6213 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6214 mutex_lock(&adev->pm.mutex);
6215 ret = ci_dpm_init(adev);
6218 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6219 if (amdgpu_dpm == 1)
6220 amdgpu_pm_print_power_states(adev);
6221 mutex_unlock(&adev->pm.mutex);
6222 DRM_INFO("amdgpu: dpm initialized\n");
6228 mutex_unlock(&adev->pm.mutex);
6229 DRM_ERROR("amdgpu: dpm initialization failed\n");
6233 static int ci_dpm_sw_fini(void *handle)
6235 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6237 mutex_lock(&adev->pm.mutex);
6238 amdgpu_pm_sysfs_fini(adev);
6240 mutex_unlock(&adev->pm.mutex);
6242 release_firmware(adev->pm.fw);
6248 static int ci_dpm_hw_init(void *handle)
6252 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6257 mutex_lock(&adev->pm.mutex);
6258 ci_dpm_setup_asic(adev);
6259 ret = ci_dpm_enable(adev);
6261 adev->pm.dpm_enabled = false;
6263 adev->pm.dpm_enabled = true;
6264 mutex_unlock(&adev->pm.mutex);
6269 static int ci_dpm_hw_fini(void *handle)
6271 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6273 if (adev->pm.dpm_enabled) {
6274 mutex_lock(&adev->pm.mutex);
6275 ci_dpm_disable(adev);
6276 mutex_unlock(&adev->pm.mutex);
6282 static int ci_dpm_suspend(void *handle)
6284 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6286 if (adev->pm.dpm_enabled) {
6287 mutex_lock(&adev->pm.mutex);
6289 ci_dpm_disable(adev);
6290 /* reset the power state */
6291 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6292 mutex_unlock(&adev->pm.mutex);
6297 static int ci_dpm_resume(void *handle)
6300 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6302 if (adev->pm.dpm_enabled) {
6303 /* asic init will reset to the boot state */
6304 mutex_lock(&adev->pm.mutex);
6305 ci_dpm_setup_asic(adev);
6306 ret = ci_dpm_enable(adev);
6308 adev->pm.dpm_enabled = false;
6310 adev->pm.dpm_enabled = true;
6311 mutex_unlock(&adev->pm.mutex);
6312 if (adev->pm.dpm_enabled)
6313 amdgpu_pm_compute_clocks(adev);
6318 static bool ci_dpm_is_idle(void *handle)
6324 static int ci_dpm_wait_for_idle(void *handle)
6330 static int ci_dpm_soft_reset(void *handle)
6335 static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6336 struct amdgpu_irq_src *source,
6338 enum amdgpu_interrupt_state state)
6343 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6345 case AMDGPU_IRQ_STATE_DISABLE:
6346 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6347 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6348 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6350 case AMDGPU_IRQ_STATE_ENABLE:
6351 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6352 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6353 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6360 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6362 case AMDGPU_IRQ_STATE_DISABLE:
6363 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6364 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6365 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6367 case AMDGPU_IRQ_STATE_ENABLE:
6368 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6369 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6370 WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6383 static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
6384 struct amdgpu_irq_src *source,
6385 struct amdgpu_iv_entry *entry)
6387 bool queue_thermal = false;
6392 switch (entry->src_id) {
6393 case 230: /* thermal low to high */
6394 DRM_DEBUG("IH: thermal low to high\n");
6395 adev->pm.dpm.thermal.high_to_low = false;
6396 queue_thermal = true;
6398 case 231: /* thermal high to low */
6399 DRM_DEBUG("IH: thermal high to low\n");
6400 adev->pm.dpm.thermal.high_to_low = true;
6401 queue_thermal = true;
6408 schedule_work(&adev->pm.dpm.thermal.work);
6413 static int ci_dpm_set_clockgating_state(void *handle,
6414 enum amd_clockgating_state state)
6419 static int ci_dpm_set_powergating_state(void *handle,
6420 enum amd_powergating_state state)
6425 static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
6426 enum pp_clock_type type, char *buf)
6428 struct ci_power_info *pi = ci_get_pi(adev);
6429 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6430 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6431 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6433 int i, now, size = 0;
6434 uint32_t clock, pcie_speed;
6438 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6439 clock = RREG32(mmSMC_MSG_ARG_0);
6441 for (i = 0; i < sclk_table->count; i++) {
6442 if (clock > sclk_table->dpm_levels[i].value)
6448 for (i = 0; i < sclk_table->count; i++)
6449 size += sprintf(buf + size, "%d: %uMhz %s\n",
6450 i, sclk_table->dpm_levels[i].value / 100,
6451 (i == now) ? "*" : "");
6454 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6455 clock = RREG32(mmSMC_MSG_ARG_0);
6457 for (i = 0; i < mclk_table->count; i++) {
6458 if (clock > mclk_table->dpm_levels[i].value)
6464 for (i = 0; i < mclk_table->count; i++)
6465 size += sprintf(buf + size, "%d: %uMhz %s\n",
6466 i, mclk_table->dpm_levels[i].value / 100,
6467 (i == now) ? "*" : "");
6470 pcie_speed = ci_get_current_pcie_speed(adev);
6471 for (i = 0; i < pcie_table->count; i++) {
6472 if (pcie_speed != pcie_table->dpm_levels[i].value)
6478 for (i = 0; i < pcie_table->count; i++)
6479 size += sprintf(buf + size, "%d: %s %s\n", i,
6480 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
6481 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
6482 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
6483 (i == now) ? "*" : "");
6492 static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
6493 enum pp_clock_type type, uint32_t mask)
6495 struct ci_power_info *pi = ci_get_pi(adev);
6497 if (adev->pm.dpm.forced_level
6498 != AMDGPU_DPM_FORCED_LEVEL_MANUAL)
6503 if (!pi->sclk_dpm_key_disabled)
6504 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6505 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6506 pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6510 if (!pi->mclk_dpm_key_disabled)
6511 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6512 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6513 pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6518 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6524 if (!pi->pcie_dpm_key_disabled)
6525 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6526 PPSMC_MSG_PCIeDPM_ForceLevel,
6537 static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
6539 struct ci_power_info *pi = ci_get_pi(adev);
6540 struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6541 struct ci_single_dpm_table *golden_sclk_table =
6542 &(pi->golden_dpm_table.sclk_table);
6545 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6546 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6548 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6553 static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
6555 struct ci_power_info *pi = ci_get_pi(adev);
6556 struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6557 struct ci_single_dpm_table *golden_sclk_table =
6558 &(pi->golden_dpm_table.sclk_table);
6563 ps->performance_levels[ps->performance_level_count - 1].sclk =
6564 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6566 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6571 static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
6573 struct ci_power_info *pi = ci_get_pi(adev);
6574 struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6575 struct ci_single_dpm_table *golden_mclk_table =
6576 &(pi->golden_dpm_table.mclk_table);
6579 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6580 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6582 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6587 static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
6589 struct ci_power_info *pi = ci_get_pi(adev);
6590 struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6591 struct ci_single_dpm_table *golden_mclk_table =
6592 &(pi->golden_dpm_table.mclk_table);
6597 ps->performance_levels[ps->performance_level_count - 1].mclk =
6598 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6600 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6605 const struct amd_ip_funcs ci_dpm_ip_funcs = {
6607 .early_init = ci_dpm_early_init,
6608 .late_init = ci_dpm_late_init,
6609 .sw_init = ci_dpm_sw_init,
6610 .sw_fini = ci_dpm_sw_fini,
6611 .hw_init = ci_dpm_hw_init,
6612 .hw_fini = ci_dpm_hw_fini,
6613 .suspend = ci_dpm_suspend,
6614 .resume = ci_dpm_resume,
6615 .is_idle = ci_dpm_is_idle,
6616 .wait_for_idle = ci_dpm_wait_for_idle,
6617 .soft_reset = ci_dpm_soft_reset,
6618 .set_clockgating_state = ci_dpm_set_clockgating_state,
6619 .set_powergating_state = ci_dpm_set_powergating_state,
6622 static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
6623 .get_temperature = &ci_dpm_get_temp,
6624 .pre_set_power_state = &ci_dpm_pre_set_power_state,
6625 .set_power_state = &ci_dpm_set_power_state,
6626 .post_set_power_state = &ci_dpm_post_set_power_state,
6627 .display_configuration_changed = &ci_dpm_display_configuration_changed,
6628 .get_sclk = &ci_dpm_get_sclk,
6629 .get_mclk = &ci_dpm_get_mclk,
6630 .print_power_state = &ci_dpm_print_power_state,
6631 .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
6632 .force_performance_level = &ci_dpm_force_performance_level,
6633 .vblank_too_short = &ci_dpm_vblank_too_short,
6634 .powergate_uvd = &ci_dpm_powergate_uvd,
6635 .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
6636 .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
6637 .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
6638 .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
6639 .print_clock_levels = ci_dpm_print_clock_levels,
6640 .force_clock_level = ci_dpm_force_clock_level,
6641 .get_sclk_od = ci_dpm_get_sclk_od,
6642 .set_sclk_od = ci_dpm_set_sclk_od,
6643 .get_mclk_od = ci_dpm_get_mclk_od,
6644 .set_mclk_od = ci_dpm_set_mclk_od,
6647 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
6649 if (adev->pm.funcs == NULL)
6650 adev->pm.funcs = &ci_dpm_funcs;
6653 static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
6654 .set = ci_dpm_set_interrupt_state,
6655 .process = ci_dpm_process_interrupt,
6658 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
6660 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
6661 adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;