2 * Copyright 2020 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #define SWSMU_CODE_LAYER_L2
27 #include "amdgpu_smu.h"
28 #include "smu_v13_0.h"
29 #include "smu13_driver_if_yellow_carp.h"
30 #include "yellow_carp_ppt.h"
31 #include "smu_v13_0_1_ppsmc.h"
32 #include "smu_v13_0_1_pmfw.h"
36 * DO NOT use these for err/warn/info/debug messages.
37 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
38 * They are more MGPU friendly.
45 #define regSMUIO_GFX_MISC_CNTL 0x00c5
46 #define regSMUIO_GFX_MISC_CNTL_BASE_IDX 0
47 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
48 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1L
50 #define SMU_13_0_8_UMD_PSTATE_GFXCLK 533
51 #define SMU_13_0_8_UMD_PSTATE_SOCCLK 533
52 #define SMU_13_0_8_UMD_PSTATE_FCLK 800
54 #define SMU_13_0_1_UMD_PSTATE_GFXCLK 700
55 #define SMU_13_0_1_UMD_PSTATE_SOCCLK 678
56 #define SMU_13_0_1_UMD_PSTATE_FCLK 1800
58 #define FEATURE_MASK(feature) (1ULL << feature)
59 #define SMC_DPM_FEATURE ( \
60 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
61 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \
62 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
63 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \
64 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \
65 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \
66 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \
67 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \
68 FEATURE_MASK(FEATURE_GFX_DPM_BIT))
70 static struct cmn2asic_msg_mapping yellow_carp_message_map[SMU_MSG_MAX_COUNT] = {
71 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
72 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
73 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
74 MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 1),
75 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
76 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
77 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
78 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
79 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1),
80 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
81 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
82 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
83 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
84 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1),
85 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 1),
86 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1),
87 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1),
88 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1),
89 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 1),
90 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 1),
91 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
92 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1),
93 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1),
94 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1),
95 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1),
96 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 1),
97 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1),
98 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1),
99 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1),
100 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1),
103 static struct cmn2asic_mapping yellow_carp_feature_mask_map[SMU_FEATURE_COUNT] = {
105 FEA_MAP(FAN_CONTROLLER),
111 FEA_MAP_REVERSE(FCLK),
112 FEA_MAP_REVERSE(SOCCLK),
114 FEA_MAP(SHUBCLK_DPM),
116 FEA_MAP_HALF_REVERSE(GFX),
132 FEA_MAP(RSMU_LOW_POWER),
133 FEA_MAP(SMN_LOW_POWER),
134 FEA_MAP(THM_LOW_POWER),
135 FEA_MAP(SMUIO_LOW_POWER),
136 FEA_MAP(MP1_LOW_POWER),
140 FEA_MAP(MSMU_LOW_POWER),
144 static struct cmn2asic_mapping yellow_carp_table_map[SMU_TABLE_COUNT] = {
145 TAB_MAP_VALID(WATERMARKS),
146 TAB_MAP_VALID(SMU_METRICS),
147 TAB_MAP_VALID(CUSTOM_DPM),
148 TAB_MAP_VALID(DPMCLOCKS),
151 static int yellow_carp_init_smc_tables(struct smu_context *smu)
153 struct smu_table_context *smu_table = &smu->smu_table;
154 struct smu_table *tables = smu_table->tables;
156 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
157 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
158 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
159 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
160 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
161 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
163 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
164 if (!smu_table->clocks_table)
167 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
168 if (!smu_table->metrics_table)
170 smu_table->metrics_time = 0;
172 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
173 if (!smu_table->watermarks_table)
176 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1);
177 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
178 if (!smu_table->gpu_metrics_table)
184 kfree(smu_table->watermarks_table);
186 kfree(smu_table->metrics_table);
188 kfree(smu_table->clocks_table);
193 static int yellow_carp_fini_smc_tables(struct smu_context *smu)
195 struct smu_table_context *smu_table = &smu->smu_table;
197 kfree(smu_table->clocks_table);
198 smu_table->clocks_table = NULL;
200 kfree(smu_table->metrics_table);
201 smu_table->metrics_table = NULL;
203 kfree(smu_table->watermarks_table);
204 smu_table->watermarks_table = NULL;
206 kfree(smu_table->gpu_metrics_table);
207 smu_table->gpu_metrics_table = NULL;
212 static int yellow_carp_system_features_control(struct smu_context *smu, bool en)
214 struct amdgpu_device *adev = smu->adev;
217 if (!en && !adev->in_s0ix)
218 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
223 static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
227 /* vcn dpm on is a prerequisite for vcn power gate messages */
229 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn,
232 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn,
238 static int yellow_carp_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
243 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg,
246 ret = smu_cmn_send_smc_msg_with_param(smu,
247 SMU_MSG_PowerDownJpeg, 0,
254 static bool yellow_carp_is_dpm_running(struct smu_context *smu)
257 uint64_t feature_enabled;
259 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
264 return !!(feature_enabled & SMC_DPM_FEATURE);
267 static int yellow_carp_post_smu_init(struct smu_context *smu)
269 struct amdgpu_device *adev = smu->adev;
272 /* allow message will be sent after enable message on Yellow Carp*/
273 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
275 dev_err(adev->dev, "Failed to Enable GfxOff!\n");
279 static int yellow_carp_mode_reset(struct smu_context *smu, int type)
283 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL);
285 dev_err(smu->adev->dev, "Failed to mode reset!\n");
290 static int yellow_carp_mode2_reset(struct smu_context *smu)
292 return yellow_carp_mode_reset(smu, SMU_RESET_MODE_2);
296 static void yellow_carp_get_ss_power_percent(SmuMetrics_t *metrics,
297 uint32_t *apu_percent, uint32_t *dgpu_percent)
299 uint32_t apu_boost = 0;
300 uint32_t dgpu_boost = 0;
301 uint16_t apu_limit = 0;
302 uint16_t dgpu_limit = 0;
303 uint16_t apu_power = 0;
304 uint16_t dgpu_power = 0;
306 /* APU and dGPU power values are reported in milli Watts
307 * and STAPM power limits are in Watts */
308 apu_power = metrics->ApuPower/1000;
309 apu_limit = metrics->StapmOpnLimit;
310 if (apu_power > apu_limit && apu_limit != 0)
311 apu_boost = ((apu_power - apu_limit) * 100) / apu_limit;
312 apu_boost = (apu_boost > 100) ? 100 : apu_boost;
314 dgpu_power = metrics->dGpuPower/1000;
315 if (metrics->StapmCurrentLimit > metrics->StapmOpnLimit)
316 dgpu_limit = metrics->StapmCurrentLimit - metrics->StapmOpnLimit;
317 if (dgpu_power > dgpu_limit && dgpu_limit != 0)
318 dgpu_boost = ((dgpu_power - dgpu_limit) * 100) / dgpu_limit;
319 dgpu_boost = (dgpu_boost > 100) ? 100 : dgpu_boost;
321 if (dgpu_boost >= apu_boost)
326 *apu_percent = apu_boost;
327 *dgpu_percent = dgpu_boost;
331 static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
332 MetricsMember_t member,
335 struct smu_table_context *smu_table = &smu->smu_table;
337 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
339 uint32_t apu_percent = 0;
340 uint32_t dgpu_percent = 0;
342 ret = smu_cmn_get_metrics_table(smu, NULL, false);
347 case METRICS_AVERAGE_GFXCLK:
348 *value = metrics->GfxclkFrequency;
350 case METRICS_AVERAGE_SOCCLK:
351 *value = metrics->SocclkFrequency;
353 case METRICS_AVERAGE_VCLK:
354 *value = metrics->VclkFrequency;
356 case METRICS_AVERAGE_DCLK:
357 *value = metrics->DclkFrequency;
359 case METRICS_AVERAGE_UCLK:
360 *value = metrics->MemclkFrequency;
362 case METRICS_AVERAGE_GFXACTIVITY:
363 *value = metrics->GfxActivity / 100;
365 case METRICS_AVERAGE_VCNACTIVITY:
366 *value = metrics->UvdActivity;
368 case METRICS_AVERAGE_SOCKETPOWER:
369 *value = (metrics->CurrentSocketPower << 8) / 1000;
371 case METRICS_TEMPERATURE_EDGE:
372 *value = metrics->GfxTemperature / 100 *
373 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
375 case METRICS_TEMPERATURE_HOTSPOT:
376 *value = metrics->SocTemperature / 100 *
377 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
379 case METRICS_THROTTLER_STATUS:
380 *value = metrics->ThrottlerStatus;
382 case METRICS_VOLTAGE_VDDGFX:
383 *value = metrics->Voltage[0];
385 case METRICS_VOLTAGE_VDDSOC:
386 *value = metrics->Voltage[1];
388 case METRICS_SS_APU_SHARE:
389 /* return the percentage of APU power boost
390 * with respect to APU's power limit.
392 yellow_carp_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent);
393 *value = apu_percent;
395 case METRICS_SS_DGPU_SHARE:
396 /* return the percentage of dGPU power boost
397 * with respect to dGPU's power limit.
399 yellow_carp_get_ss_power_percent(metrics, &apu_percent, &dgpu_percent);
400 *value = dgpu_percent;
410 static int yellow_carp_read_sensor(struct smu_context *smu,
411 enum amd_pp_sensors sensor,
412 void *data, uint32_t *size)
420 case AMDGPU_PP_SENSOR_GPU_LOAD:
421 ret = yellow_carp_get_smu_metrics_data(smu,
422 METRICS_AVERAGE_GFXACTIVITY,
426 case AMDGPU_PP_SENSOR_GPU_POWER:
427 ret = yellow_carp_get_smu_metrics_data(smu,
428 METRICS_AVERAGE_SOCKETPOWER,
432 case AMDGPU_PP_SENSOR_EDGE_TEMP:
433 ret = yellow_carp_get_smu_metrics_data(smu,
434 METRICS_TEMPERATURE_EDGE,
438 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
439 ret = yellow_carp_get_smu_metrics_data(smu,
440 METRICS_TEMPERATURE_HOTSPOT,
444 case AMDGPU_PP_SENSOR_GFX_MCLK:
445 ret = yellow_carp_get_smu_metrics_data(smu,
446 METRICS_AVERAGE_UCLK,
448 *(uint32_t *)data *= 100;
451 case AMDGPU_PP_SENSOR_GFX_SCLK:
452 ret = yellow_carp_get_smu_metrics_data(smu,
453 METRICS_AVERAGE_GFXCLK,
455 *(uint32_t *)data *= 100;
458 case AMDGPU_PP_SENSOR_VDDGFX:
459 ret = yellow_carp_get_smu_metrics_data(smu,
460 METRICS_VOLTAGE_VDDGFX,
464 case AMDGPU_PP_SENSOR_VDDNB:
465 ret = yellow_carp_get_smu_metrics_data(smu,
466 METRICS_VOLTAGE_VDDSOC,
470 case AMDGPU_PP_SENSOR_SS_APU_SHARE:
471 ret = yellow_carp_get_smu_metrics_data(smu,
472 METRICS_SS_APU_SHARE,
476 case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:
477 ret = yellow_carp_get_smu_metrics_data(smu,
478 METRICS_SS_DGPU_SHARE,
490 static int yellow_carp_set_watermarks_table(struct smu_context *smu,
491 struct pp_smu_wm_range_sets *clock_ranges)
495 Watermarks_t *table = smu->smu_table.watermarks_table;
497 if (!table || !clock_ranges)
501 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
502 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
505 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
506 table->WatermarkRow[WM_DCFCLK][i].MinClock =
507 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
508 table->WatermarkRow[WM_DCFCLK][i].MaxClock =
509 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
510 table->WatermarkRow[WM_DCFCLK][i].MinMclk =
511 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
512 table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
513 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
515 table->WatermarkRow[WM_DCFCLK][i].WmSetting =
516 clock_ranges->reader_wm_sets[i].wm_inst;
519 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
520 table->WatermarkRow[WM_SOCCLK][i].MinClock =
521 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
522 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
523 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
524 table->WatermarkRow[WM_SOCCLK][i].MinMclk =
525 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
526 table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
527 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
529 table->WatermarkRow[WM_SOCCLK][i].WmSetting =
530 clock_ranges->writer_wm_sets[i].wm_inst;
533 smu->watermarks_bitmap |= WATERMARKS_EXIST;
536 /* pass data to smu controller */
537 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
538 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
539 ret = smu_cmn_write_watermarks_table(smu);
541 dev_err(smu->adev->dev, "Failed to update WMTABLE!");
544 smu->watermarks_bitmap |= WATERMARKS_LOADED;
550 static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu,
553 struct smu_table_context *smu_table = &smu->smu_table;
554 struct gpu_metrics_v2_1 *gpu_metrics =
555 (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
556 SmuMetrics_t metrics;
559 ret = smu_cmn_get_metrics_table(smu, &metrics, true);
563 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
565 gpu_metrics->temperature_gfx = metrics.GfxTemperature;
566 gpu_metrics->temperature_soc = metrics.SocTemperature;
567 memcpy(&gpu_metrics->temperature_core[0],
568 &metrics.CoreTemperature[0],
569 sizeof(uint16_t) * 8);
570 gpu_metrics->temperature_l3[0] = metrics.L3Temperature;
572 gpu_metrics->average_gfx_activity = metrics.GfxActivity;
573 gpu_metrics->average_mm_activity = metrics.UvdActivity;
575 gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
576 gpu_metrics->average_gfx_power = metrics.Power[0];
577 gpu_metrics->average_soc_power = metrics.Power[1];
578 memcpy(&gpu_metrics->average_core_power[0],
579 &metrics.CorePower[0],
580 sizeof(uint16_t) * 8);
582 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
583 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
584 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
585 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
586 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
587 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
589 memcpy(&gpu_metrics->current_coreclk[0],
590 &metrics.CoreFrequency[0],
591 sizeof(uint16_t) * 8);
592 gpu_metrics->current_l3clk[0] = metrics.L3Frequency;
594 gpu_metrics->throttle_status = metrics.ThrottlerStatus;
596 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
598 *table = (void *)gpu_metrics;
600 return sizeof(struct gpu_metrics_v2_1);
604 * yellow_carp_get_gfxoff_status - get gfxoff status
606 * @smu: smu_context pointer
608 * This function will be used to get gfxoff status
610 * Returns 0=GFXOFF(default).
611 * Returns 1=Transition out of GFX State.
612 * Returns 2=Not in GFXOFF.
613 * Returns 3=Transition into GFXOFF.
615 static uint32_t yellow_carp_get_gfxoff_status(struct smu_context *smu)
618 uint32_t gfxoff_status = 0;
619 struct amdgpu_device *adev = smu->adev;
621 reg = RREG32_SOC15(SMUIO, 0, regSMUIO_GFX_MISC_CNTL);
622 gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
623 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
625 return gfxoff_status;
628 static int yellow_carp_set_default_dpm_tables(struct smu_context *smu)
630 struct smu_table_context *smu_table = &smu->smu_table;
632 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
635 static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
636 long input[], uint32_t size)
638 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
641 /* Only allowed in manual mode */
642 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
646 case PP_OD_EDIT_SCLK_VDDC_TABLE:
648 dev_err(smu->adev->dev, "Input parameter number not correct\n");
653 if (input[1] < smu->gfx_default_hard_min_freq) {
654 dev_warn(smu->adev->dev,
655 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
656 input[1], smu->gfx_default_hard_min_freq);
659 smu->gfx_actual_hard_min_freq = input[1];
660 } else if (input[0] == 1) {
661 if (input[1] > smu->gfx_default_soft_max_freq) {
662 dev_warn(smu->adev->dev,
663 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
664 input[1], smu->gfx_default_soft_max_freq);
667 smu->gfx_actual_soft_max_freq = input[1];
672 case PP_OD_RESTORE_DEFAULT_TABLE:
674 dev_err(smu->adev->dev, "Input parameter number not correct\n");
677 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
678 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
681 case PP_OD_COMMIT_DPM_TABLE:
683 dev_err(smu->adev->dev, "Input parameter number not correct\n");
686 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
687 dev_err(smu->adev->dev,
688 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
689 smu->gfx_actual_hard_min_freq,
690 smu->gfx_actual_soft_max_freq);
694 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
695 smu->gfx_actual_hard_min_freq, NULL);
697 dev_err(smu->adev->dev, "Set hard min sclk failed!");
701 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
702 smu->gfx_actual_soft_max_freq, NULL);
704 dev_err(smu->adev->dev, "Set soft max sclk failed!");
716 static int yellow_carp_get_current_clk_freq(struct smu_context *smu,
717 enum smu_clk_type clk_type,
720 MetricsMember_t member_type;
724 member_type = METRICS_AVERAGE_SOCCLK;
727 member_type = METRICS_AVERAGE_VCLK;
730 member_type = METRICS_AVERAGE_DCLK;
733 member_type = METRICS_AVERAGE_UCLK;
736 return smu_cmn_send_smc_msg_with_param(smu,
737 SMU_MSG_GetFclkFrequency, 0, value);
740 return smu_cmn_send_smc_msg_with_param(smu,
741 SMU_MSG_GetGfxclkFrequency, 0, value);
747 return yellow_carp_get_smu_metrics_data(smu, member_type, value);
750 static int yellow_carp_get_dpm_level_count(struct smu_context *smu,
751 enum smu_clk_type clk_type,
754 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
758 *count = clk_table->NumSocClkLevelsEnabled;
761 *count = clk_table->VcnClkLevelsEnabled;
764 *count = clk_table->VcnClkLevelsEnabled;
767 *count = clk_table->NumDfPstatesEnabled;
770 *count = clk_table->NumDfPstatesEnabled;
779 static int yellow_carp_get_dpm_freq_by_index(struct smu_context *smu,
780 enum smu_clk_type clk_type,
784 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
786 if (!clk_table || clk_type >= SMU_CLK_COUNT)
791 if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
793 *freq = clk_table->SocClocks[dpm_level];
796 if (dpm_level >= clk_table->VcnClkLevelsEnabled)
798 *freq = clk_table->VClocks[dpm_level];
801 if (dpm_level >= clk_table->VcnClkLevelsEnabled)
803 *freq = clk_table->DClocks[dpm_level];
807 if (dpm_level >= clk_table->NumDfPstatesEnabled)
809 *freq = clk_table->DfPstateTable[dpm_level].MemClk;
812 if (dpm_level >= clk_table->NumDfPstatesEnabled)
814 *freq = clk_table->DfPstateTable[dpm_level].FClk;
823 static bool yellow_carp_clk_dpm_is_enabled(struct smu_context *smu,
824 enum smu_clk_type clk_type)
826 enum smu_feature_mask feature_id = 0;
832 feature_id = SMU_FEATURE_DPM_FCLK_BIT;
836 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
839 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
843 feature_id = SMU_FEATURE_VCN_DPM_BIT;
849 return smu_cmn_feature_is_enabled(smu, feature_id);
852 static int yellow_carp_get_dpm_ultimate_freq(struct smu_context *smu,
853 enum smu_clk_type clk_type,
857 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
858 uint32_t clock_limit;
859 uint32_t max_dpm_level, min_dpm_level;
862 if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type)) {
866 clock_limit = smu->smu_table.boot_values.uclk;
869 clock_limit = smu->smu_table.boot_values.fclk;
873 clock_limit = smu->smu_table.boot_values.gfxclk;
876 clock_limit = smu->smu_table.boot_values.socclk;
879 clock_limit = smu->smu_table.boot_values.vclk;
882 clock_limit = smu->smu_table.boot_values.dclk;
889 /* clock in Mhz unit */
891 *min = clock_limit / 100;
893 *max = clock_limit / 100;
902 *max = clk_table->MaxGfxClk;
910 max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1;
914 max_dpm_level = clk_table->VcnClkLevelsEnabled - 1;
921 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
922 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
932 *min = clk_table->MinGfxClk;
937 min_dpm_level = clk_table->NumDfPstatesEnabled - 1;
951 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
952 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
962 static int yellow_carp_set_soft_freq_limited_range(struct smu_context *smu,
963 enum smu_clk_type clk_type,
967 enum smu_message_type msg_set_min, msg_set_max;
968 uint32_t min_clk = min;
969 uint32_t max_clk = max;
973 if (!yellow_carp_clk_dpm_is_enabled(smu, clk_type))
979 msg_set_min = SMU_MSG_SetHardMinGfxClk;
980 msg_set_max = SMU_MSG_SetSoftMaxGfxClk;
983 msg_set_min = SMU_MSG_SetHardMinFclkByFreq;
984 msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq;
987 msg_set_min = SMU_MSG_SetHardMinSocclkByFreq;
988 msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq;
992 msg_set_min = SMU_MSG_SetHardMinVcn;
993 msg_set_max = SMU_MSG_SetSoftMaxVcn;
999 if (clk_type == SMU_VCLK) {
1000 min_clk = min << SMU_13_VCLK_SHIFT;
1001 max_clk = max << SMU_13_VCLK_SHIFT;
1004 ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min_clk, NULL);
1009 ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max_clk, NULL);
1017 static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu,
1018 enum smu_clk_type clk_type)
1020 uint32_t clk_limit = 0;
1021 struct amdgpu_device *adev = smu->adev;
1026 if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))
1027 clk_limit = SMU_13_0_8_UMD_PSTATE_GFXCLK;
1028 if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||
1029 (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))
1030 clk_limit = SMU_13_0_1_UMD_PSTATE_GFXCLK;
1033 if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))
1034 clk_limit = SMU_13_0_8_UMD_PSTATE_SOCCLK;
1035 if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||
1036 (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))
1037 clk_limit = SMU_13_0_1_UMD_PSTATE_SOCCLK;
1040 if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 8))
1041 clk_limit = SMU_13_0_8_UMD_PSTATE_FCLK;
1042 if ((adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 1) ||
1043 (adev->ip_versions[MP1_HWIP][0]) == IP_VERSION(13, 0, 3))
1044 clk_limit = SMU_13_0_1_UMD_PSTATE_FCLK;
1053 static int yellow_carp_print_clk_levels(struct smu_context *smu,
1054 enum smu_clk_type clk_type, char *buf)
1056 int i, idx, size = 0, ret = 0;
1057 uint32_t cur_value = 0, value = 0, count = 0;
1059 uint32_t clk_limit = 0;
1061 smu_cmn_get_sysfs_buf(&buf, &size);
1065 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
1066 size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
1067 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
1068 size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
1069 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
1072 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
1073 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
1074 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
1081 ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value);
1085 ret = yellow_carp_get_dpm_level_count(smu, clk_type, &count);
1089 for (i = 0; i < count; i++) {
1090 idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
1091 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value);
1095 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
1096 cur_value == value ? "*" : "");
1101 clk_limit = yellow_carp_get_umd_pstate_clk_default(smu, clk_type);
1102 ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value);
1105 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
1106 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
1107 if (cur_value == max)
1109 else if (cur_value == min)
1113 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
1115 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
1116 i == 1 ? cur_value : clk_limit,
1118 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
1129 static int yellow_carp_force_clk_levels(struct smu_context *smu,
1130 enum smu_clk_type clk_type, uint32_t mask)
1132 uint32_t soft_min_level = 0, soft_max_level = 0;
1133 uint32_t min_freq = 0, max_freq = 0;
1136 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1137 soft_max_level = mask ? (fls(mask) - 1) : 0;
1144 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
1146 goto force_level_out;
1148 ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
1150 goto force_level_out;
1152 ret = yellow_carp_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
1154 goto force_level_out;
1165 static int yellow_carp_get_dpm_profile_freq(struct smu_context *smu,
1166 enum amd_dpm_forced_level level,
1167 enum smu_clk_type clk_type,
1172 uint32_t clk_limit = 0;
1174 clk_limit = yellow_carp_get_umd_pstate_clk_default(smu, clk_type);
1179 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1180 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit);
1181 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
1182 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL);
1185 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1186 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &clk_limit);
1189 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1190 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit);
1191 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
1192 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &clk_limit, NULL);
1195 yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &clk_limit);
1198 yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &clk_limit);
1204 *min_clk = *max_clk = clk_limit;
1208 static int yellow_carp_set_performance_level(struct smu_context *smu,
1209 enum amd_dpm_forced_level level)
1211 struct amdgpu_device *adev = smu->adev;
1212 uint32_t sclk_min = 0, sclk_max = 0;
1213 uint32_t fclk_min = 0, fclk_max = 0;
1214 uint32_t socclk_min = 0, socclk_max = 0;
1215 uint32_t vclk_min = 0, vclk_max = 0;
1216 uint32_t dclk_min = 0, dclk_max = 0;
1221 case AMD_DPM_FORCED_LEVEL_HIGH:
1222 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
1223 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
1224 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
1225 yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_max);
1226 yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_max);
1227 sclk_min = sclk_max;
1228 fclk_min = fclk_max;
1229 socclk_min = socclk_max;
1230 vclk_min = vclk_max;
1231 dclk_min = dclk_max;
1233 case AMD_DPM_FORCED_LEVEL_LOW:
1234 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
1235 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
1236 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
1237 yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, NULL);
1238 yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, NULL);
1239 sclk_max = sclk_min;
1240 fclk_max = fclk_min;
1241 socclk_max = socclk_min;
1242 vclk_max = vclk_min;
1243 dclk_max = dclk_min;
1245 case AMD_DPM_FORCED_LEVEL_AUTO:
1246 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
1247 yellow_carp_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
1248 yellow_carp_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
1249 yellow_carp_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, &vclk_max);
1250 yellow_carp_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, &dclk_max);
1252 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1253 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1254 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1255 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1256 yellow_carp_get_dpm_profile_freq(smu, level, SMU_SCLK, &sclk_min, &sclk_max);
1257 yellow_carp_get_dpm_profile_freq(smu, level, SMU_FCLK, &fclk_min, &fclk_max);
1258 yellow_carp_get_dpm_profile_freq(smu, level, SMU_SOCCLK, &socclk_min, &socclk_max);
1259 yellow_carp_get_dpm_profile_freq(smu, level, SMU_VCLK, &vclk_min, &vclk_max);
1260 yellow_carp_get_dpm_profile_freq(smu, level, SMU_DCLK, &dclk_min, &dclk_max);
1262 case AMD_DPM_FORCED_LEVEL_MANUAL:
1263 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1266 dev_err(adev->dev, "Invalid performance level %d\n", level);
1270 if (sclk_min && sclk_max) {
1271 ret = yellow_carp_set_soft_freq_limited_range(smu,
1278 smu->gfx_actual_hard_min_freq = sclk_min;
1279 smu->gfx_actual_soft_max_freq = sclk_max;
1282 if (fclk_min && fclk_max) {
1283 ret = yellow_carp_set_soft_freq_limited_range(smu,
1291 if (socclk_min && socclk_max) {
1292 ret = yellow_carp_set_soft_freq_limited_range(smu,
1300 if (vclk_min && vclk_max) {
1301 ret = yellow_carp_set_soft_freq_limited_range(smu,
1309 if (dclk_min && dclk_max) {
1310 ret = yellow_carp_set_soft_freq_limited_range(smu,
1321 static int yellow_carp_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
1323 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
1325 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
1326 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
1327 smu->gfx_actual_hard_min_freq = 0;
1328 smu->gfx_actual_soft_max_freq = 0;
1333 static const struct pptable_funcs yellow_carp_ppt_funcs = {
1334 .check_fw_status = smu_v13_0_check_fw_status,
1335 .check_fw_version = smu_v13_0_check_fw_version,
1336 .init_smc_tables = yellow_carp_init_smc_tables,
1337 .fini_smc_tables = yellow_carp_fini_smc_tables,
1338 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
1339 .system_features_control = yellow_carp_system_features_control,
1340 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
1341 .send_smc_msg = smu_cmn_send_smc_msg,
1342 .dpm_set_vcn_enable = yellow_carp_dpm_set_vcn_enable,
1343 .dpm_set_jpeg_enable = yellow_carp_dpm_set_jpeg_enable,
1344 .set_default_dpm_table = yellow_carp_set_default_dpm_tables,
1345 .read_sensor = yellow_carp_read_sensor,
1346 .is_dpm_running = yellow_carp_is_dpm_running,
1347 .set_watermarks_table = yellow_carp_set_watermarks_table,
1348 .get_gpu_metrics = yellow_carp_get_gpu_metrics,
1349 .get_enabled_mask = smu_cmn_get_enabled_mask,
1350 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
1351 .set_driver_table_location = smu_v13_0_set_driver_table_location,
1352 .gfx_off_control = smu_v13_0_gfx_off_control,
1353 .get_gfx_off_status = yellow_carp_get_gfxoff_status,
1354 .post_init = yellow_carp_post_smu_init,
1355 .mode2_reset = yellow_carp_mode2_reset,
1356 .get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq,
1357 .od_edit_dpm_table = yellow_carp_od_edit_dpm_table,
1358 .print_clk_levels = yellow_carp_print_clk_levels,
1359 .force_clk_levels = yellow_carp_force_clk_levels,
1360 .set_performance_level = yellow_carp_set_performance_level,
1361 .set_fine_grain_gfx_freq_parameters = yellow_carp_set_fine_grain_gfx_freq_parameters,
1364 void yellow_carp_set_ppt_funcs(struct smu_context *smu)
1366 smu->ppt_funcs = &yellow_carp_ppt_funcs;
1367 smu->message_map = yellow_carp_message_map;
1368 smu->feature_map = yellow_carp_feature_mask_map;
1369 smu->table_map = yellow_carp_table_map;
1371 smu->smc_driver_if_version = SMU13_YELLOW_CARP_DRIVER_IF_VERSION;
1372 smu_v13_0_set_smu_mailbox_registers(smu);