2 * Copyright 2020 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define SWSMU_CODE_LAYER_L4
26 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
31 * DO NOT use these for err/warn/info/debug messages.
32 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
33 * They are more MGPU friendly.
41 * Although these are defined in each ASIC's specific header file.
42 * They share the same definitions and values. That makes common
43 * APIs for SMC messages issuing for all ASICs possible.
45 #define mmMP1_SMN_C2PMSG_66 0x0282
46 #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
48 #define mmMP1_SMN_C2PMSG_82 0x0292
49 #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
51 #define mmMP1_SMN_C2PMSG_90 0x029a
52 #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
54 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
56 #undef __SMU_DUMMY_MAP
57 #define __SMU_DUMMY_MAP(type) #type
58 static const char * const __smu_message_names[] = {
62 static const char *smu_get_message_name(struct smu_context *smu,
63 enum smu_message_type type)
65 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
66 return "unknown smu message";
68 return __smu_message_names[type];
71 static void smu_cmn_read_arg(struct smu_context *smu,
74 struct amdgpu_device *adev = smu->adev;
76 *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
79 /* Redefine the SMU error codes here.
81 * Note that these definitions are redundant and should be removed
82 * when the SMU has exported a unified header file containing these
83 * macros, which header file we can just include and use the SMU's
84 * macros. At the moment, these error codes are defined by the SMU
85 * per-ASIC unfortunately, yet we're a one driver for all ASICs.
87 #define SMU_RESP_NONE 0
89 #define SMU_RESP_CMD_FAIL 0xFF
90 #define SMU_RESP_CMD_UNKNOWN 0xFE
91 #define SMU_RESP_CMD_BAD_PREREQ 0xFD
92 #define SMU_RESP_BUSY_OTHER 0xFC
93 #define SMU_RESP_DEBUG_END 0xFB
96 * __smu_cmn_poll_stat -- poll for a status from the SMU
97 * smu: a pointer to SMU context
99 * Returns the status of the SMU, which could be,
100 * 0, the SMU is busy with your previous command;
101 * 1, execution status: success, execution result: success;
102 * 0xFF, execution status: success, execution result: failure;
103 * 0xFE, unknown command;
104 * 0xFD, valid command, but bad (command) prerequisites;
105 * 0xFC, the command was rejected as the SMU is busy;
106 * 0xFB, "SMC_Result_DebugDataDumpEnd".
108 * The values here are not defined by macros, because I'd rather we
109 * include a single header file which defines them, which is
110 * maintained by the SMU FW team, so that we're impervious to firmware
111 * changes. At the moment those values are defined in various header
112 * files, one for each ASIC, yet here we're a single ASIC-agnostic
113 * interface. Such a change can be followed-up by a subsequent patch.
115 static u32 __smu_cmn_poll_stat(struct smu_context *smu)
117 struct amdgpu_device *adev = smu->adev;
118 int timeout = adev->usec_timeout * 20;
121 for ( ; timeout > 0; timeout--) {
122 reg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
123 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
132 static void __smu_cmn_reg_print_error(struct smu_context *smu,
136 enum smu_message_type msg)
138 struct amdgpu_device *adev = smu->adev;
139 const char *message = smu_get_message_name(smu, msg);
141 switch (reg_c2pmsg_90) {
143 dev_err_ratelimited(adev->dev,
144 "SMU: I'm not done with your previous command!");
147 /* The SMU executed the command. It completed with a
151 case SMU_RESP_CMD_FAIL:
152 /* The SMU executed the command. It completed with an
153 * unsuccessful result.
156 case SMU_RESP_CMD_UNKNOWN:
157 dev_err_ratelimited(adev->dev,
158 "SMU: unknown command: index:%d param:0x%08X message:%s",
159 msg_index, param, message);
161 case SMU_RESP_CMD_BAD_PREREQ:
162 dev_err_ratelimited(adev->dev,
163 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s",
164 msg_index, param, message);
166 case SMU_RESP_BUSY_OTHER:
167 dev_err_ratelimited(adev->dev,
168 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
169 msg_index, param, message);
171 case SMU_RESP_DEBUG_END:
172 dev_err_ratelimited(adev->dev,
173 "SMU: I'm debugging!");
176 dev_err_ratelimited(adev->dev,
177 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
178 reg_c2pmsg_90, msg_index, param, message);
183 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90)
187 switch (reg_c2pmsg_90) {
189 /* The SMU is busy--still executing your command.
196 case SMU_RESP_CMD_FAIL:
197 /* Command completed successfully, but the command
198 * status was failure.
202 case SMU_RESP_CMD_UNKNOWN:
203 /* Unknown command--ignored by the SMU.
207 case SMU_RESP_CMD_BAD_PREREQ:
208 /* Valid command--bad prerequisites.
212 case SMU_RESP_BUSY_OTHER:
213 /* The SMU is busy with other commands. The client
214 * should retry in 10 us.
219 /* Unknown or debug response from the SMU.
228 static void __smu_cmn_send_msg(struct smu_context *smu,
232 struct amdgpu_device *adev = smu->adev;
234 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
235 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
236 WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
240 * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status
241 * @smu: pointer to an SMU context
242 * @msg_index: message index
243 * @param: message parameter to send to the SMU
245 * Send a message to the SMU with the parameter passed. Do not wait
246 * for status/result of the message, thus the "without_waiting".
248 * Return 0 on success, -errno on error if we weren't able to _send_
249 * the message for some reason. See __smu_cmn_reg2errno() for details
252 int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
259 if (smu->adev->no_hw_access)
262 reg = __smu_cmn_poll_stat(smu);
263 res = __smu_cmn_reg2errno(smu, reg);
264 if (reg == SMU_RESP_NONE ||
265 reg == SMU_RESP_BUSY_OTHER ||
268 __smu_cmn_send_msg(smu, msg_index, param);
275 * smu_cmn_wait_for_response -- wait for response from the SMU
276 * @smu: pointer to an SMU context
278 * Wait for status from the SMU.
280 * Return 0 on success, -errno on error, indicating the execution
281 * status and result of the message being waited for. See
282 * __smu_cmn_reg2errno() for details of the -errno.
284 int smu_cmn_wait_for_response(struct smu_context *smu)
288 reg = __smu_cmn_poll_stat(smu);
289 return __smu_cmn_reg2errno(smu, reg);
293 * smu_cmn_send_smc_msg_with_param -- send a message with parameter
294 * @smu: pointer to an SMU context
295 * @msg: message to send
296 * @param: parameter to send to the SMU
297 * @read_arg: pointer to u32 to return a value from the SMU back
300 * Send the message @msg with parameter @param to the SMU, wait for
301 * completion of the command, and return back a value from the SMU in
304 * Return 0 on success, -errno on error, if we weren't able to send
305 * the message or if the message completed with some kind of
306 * error. See __smu_cmn_reg2errno() for details of the -errno.
308 * If we weren't able to send the message to the SMU, we also print
309 * the error to the standard log.
311 * Command completion status is printed only if the -errno is
312 * -EREMOTEIO, indicating that the SMU returned back an
313 * undefined/unknown/unspecified result. All other cases are
314 * well-defined, not printed, but instead given back to the client to
315 * decide what further to do.
317 * The return value, @read_arg is read back regardless, to give back
318 * more information to the client, which on error would most likely be
319 * @param, but we can't assume that. This also eliminates more
322 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
323 enum smu_message_type msg,
330 if (smu->adev->no_hw_access)
333 index = smu_cmn_to_asic_specific_index(smu,
334 CMN2ASIC_MAPPING_MSG,
337 return index == -EACCES ? 0 : index;
339 mutex_lock(&smu->message_lock);
340 reg = __smu_cmn_poll_stat(smu);
341 res = __smu_cmn_reg2errno(smu, reg);
342 if (reg == SMU_RESP_NONE ||
343 reg == SMU_RESP_BUSY_OTHER ||
345 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
348 __smu_cmn_send_msg(smu, (uint16_t) index, param);
349 reg = __smu_cmn_poll_stat(smu);
350 res = __smu_cmn_reg2errno(smu, reg);
351 if (res == -EREMOTEIO)
352 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
354 smu_cmn_read_arg(smu, read_arg);
356 mutex_unlock(&smu->message_lock);
360 int smu_cmn_send_smc_msg(struct smu_context *smu,
361 enum smu_message_type msg,
364 return smu_cmn_send_smc_msg_with_param(smu,
370 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
371 enum smu_cmn2asic_mapping_type type,
374 struct cmn2asic_msg_mapping msg_mapping;
375 struct cmn2asic_mapping mapping;
378 case CMN2ASIC_MAPPING_MSG:
379 if (index >= SMU_MSG_MAX_COUNT ||
383 msg_mapping = smu->message_map[index];
384 if (!msg_mapping.valid_mapping)
387 if (amdgpu_sriov_vf(smu->adev) &&
388 !msg_mapping.valid_in_vf)
391 return msg_mapping.map_to;
393 case CMN2ASIC_MAPPING_CLK:
394 if (index >= SMU_CLK_COUNT ||
398 mapping = smu->clock_map[index];
399 if (!mapping.valid_mapping)
402 return mapping.map_to;
404 case CMN2ASIC_MAPPING_FEATURE:
405 if (index >= SMU_FEATURE_COUNT ||
409 mapping = smu->feature_map[index];
410 if (!mapping.valid_mapping)
413 return mapping.map_to;
415 case CMN2ASIC_MAPPING_TABLE:
416 if (index >= SMU_TABLE_COUNT ||
420 mapping = smu->table_map[index];
421 if (!mapping.valid_mapping)
424 return mapping.map_to;
426 case CMN2ASIC_MAPPING_PWR:
427 if (index >= SMU_POWER_SOURCE_COUNT ||
431 mapping = smu->pwr_src_map[index];
432 if (!mapping.valid_mapping)
435 return mapping.map_to;
437 case CMN2ASIC_MAPPING_WORKLOAD:
438 if (index > PP_SMC_POWER_PROFILE_CUSTOM ||
442 mapping = smu->workload_map[index];
443 if (!mapping.valid_mapping)
446 return mapping.map_to;
453 int smu_cmn_feature_is_supported(struct smu_context *smu,
454 enum smu_feature_mask mask)
456 struct smu_feature *feature = &smu->smu_feature;
460 feature_id = smu_cmn_to_asic_specific_index(smu,
461 CMN2ASIC_MAPPING_FEATURE,
466 WARN_ON(feature_id > feature->feature_num);
468 mutex_lock(&feature->mutex);
469 ret = test_bit(feature_id, feature->supported);
470 mutex_unlock(&feature->mutex);
475 int smu_cmn_feature_is_enabled(struct smu_context *smu,
476 enum smu_feature_mask mask)
478 struct smu_feature *feature = &smu->smu_feature;
479 struct amdgpu_device *adev = smu->adev;
483 if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH)
486 feature_id = smu_cmn_to_asic_specific_index(smu,
487 CMN2ASIC_MAPPING_FEATURE,
492 WARN_ON(feature_id > feature->feature_num);
494 mutex_lock(&feature->mutex);
495 ret = test_bit(feature_id, feature->enabled);
496 mutex_unlock(&feature->mutex);
501 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
502 enum smu_clk_type clk_type)
504 enum smu_feature_mask feature_id = 0;
509 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
513 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
516 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
522 if (!smu_cmn_feature_is_enabled(smu, feature_id))
528 int smu_cmn_get_enabled_mask(struct smu_context *smu,
529 uint32_t *feature_mask,
532 uint32_t feature_mask_high = 0, feature_mask_low = 0;
533 struct smu_feature *feature = &smu->smu_feature;
536 if (!feature_mask || num < 2)
539 if (bitmap_empty(feature->enabled, feature->feature_num)) {
540 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
544 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
548 feature_mask[0] = feature_mask_low;
549 feature_mask[1] = feature_mask_high;
551 bitmap_copy((unsigned long *)feature_mask, feature->enabled,
552 feature->feature_num);
558 int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
559 uint32_t *feature_mask,
562 uint32_t feature_mask_en_low = 0;
563 uint32_t feature_mask_en_high = 0;
564 struct smu_feature *feature = &smu->smu_feature;
567 if (!feature_mask || num < 2)
570 if (bitmap_empty(feature->enabled, feature->feature_num)) {
571 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 0,
572 &feature_mask_en_low);
577 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 1,
578 &feature_mask_en_high);
583 feature_mask[0] = feature_mask_en_low;
584 feature_mask[1] = feature_mask_en_high;
587 bitmap_copy((unsigned long *)feature_mask, feature->enabled,
588 feature->feature_num);
595 uint64_t smu_cmn_get_indep_throttler_status(
596 const unsigned long dep_status,
597 const uint8_t *throttler_map)
599 uint64_t indep_status = 0;
602 for_each_set_bit(dep_bit, &dep_status, 32)
603 indep_status |= 1ULL << throttler_map[dep_bit];
608 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
609 uint64_t feature_mask,
612 struct smu_feature *feature = &smu->smu_feature;
616 ret = smu_cmn_send_smc_msg_with_param(smu,
617 SMU_MSG_EnableSmuFeaturesLow,
618 lower_32_bits(feature_mask),
622 ret = smu_cmn_send_smc_msg_with_param(smu,
623 SMU_MSG_EnableSmuFeaturesHigh,
624 upper_32_bits(feature_mask),
629 ret = smu_cmn_send_smc_msg_with_param(smu,
630 SMU_MSG_DisableSmuFeaturesLow,
631 lower_32_bits(feature_mask),
635 ret = smu_cmn_send_smc_msg_with_param(smu,
636 SMU_MSG_DisableSmuFeaturesHigh,
637 upper_32_bits(feature_mask),
643 mutex_lock(&feature->mutex);
645 bitmap_or(feature->enabled, feature->enabled,
646 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
648 bitmap_andnot(feature->enabled, feature->enabled,
649 (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
650 mutex_unlock(&feature->mutex);
655 int smu_cmn_feature_set_enabled(struct smu_context *smu,
656 enum smu_feature_mask mask,
659 struct smu_feature *feature = &smu->smu_feature;
662 feature_id = smu_cmn_to_asic_specific_index(smu,
663 CMN2ASIC_MAPPING_FEATURE,
668 WARN_ON(feature_id > feature->feature_num);
670 return smu_cmn_feature_update_enable_state(smu,
675 #undef __SMU_DUMMY_MAP
676 #define __SMU_DUMMY_MAP(fea) #fea
677 static const char* __smu_feature_names[] = {
681 static const char *smu_get_feature_name(struct smu_context *smu,
682 enum smu_feature_mask feature)
684 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
685 return "unknown smu feature";
686 return __smu_feature_names[feature];
689 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
692 uint32_t feature_mask[2] = { 0 };
693 int feature_index = 0;
695 int8_t sort_feature[SMU_FEATURE_COUNT];
700 ret = smu_cmn_get_enabled_mask(smu,
706 ret = smu_cmn_get_enabled_32_bits_mask(smu,
713 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
714 feature_mask[1], feature_mask[0]);
716 memset(sort_feature, -1, sizeof(sort_feature));
718 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
719 feature_index = smu_cmn_to_asic_specific_index(smu,
720 CMN2ASIC_MAPPING_FEATURE,
722 if (feature_index < 0)
725 sort_feature[feature_index] = i;
728 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",
729 "No", "Feature", "Bit", "State");
731 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
732 if (sort_feature[i] < 0)
735 size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
737 smu_get_feature_name(smu, sort_feature[i]),
739 !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
740 "enabled" : "disabled");
746 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
750 uint32_t feature_mask[2] = { 0 };
751 uint64_t feature_2_enabled = 0;
752 uint64_t feature_2_disabled = 0;
753 uint64_t feature_enables = 0;
755 ret = smu_cmn_get_enabled_mask(smu,
761 feature_enables = ((uint64_t)feature_mask[1] << 32 |
762 (uint64_t)feature_mask[0]);
764 feature_2_enabled = ~feature_enables & new_mask;
765 feature_2_disabled = feature_enables & ~new_mask;
767 if (feature_2_enabled) {
768 ret = smu_cmn_feature_update_enable_state(smu,
774 if (feature_2_disabled) {
775 ret = smu_cmn_feature_update_enable_state(smu,
786 * smu_cmn_disable_all_features_with_exception - disable all dpm features
787 * except this specified by
790 * @smu: smu_context pointer
791 * @no_hw_disablement: whether real dpm disablement should be performed
792 * true: update the cache(about dpm enablement state) only
793 * false: real dpm disablement plus cache update
794 * @mask: the dpm feature which should not be disabled
795 * SMU_FEATURE_COUNT: no exception, all dpm features
799 * 0 on success or a negative error code on failure.
801 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
802 bool no_hw_disablement,
803 enum smu_feature_mask mask)
805 struct smu_feature *feature = &smu->smu_feature;
806 uint64_t features_to_disable = U64_MAX;
807 int skipped_feature_id;
809 if (mask != SMU_FEATURE_COUNT) {
810 skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
811 CMN2ASIC_MAPPING_FEATURE,
813 if (skipped_feature_id < 0)
816 features_to_disable &= ~(1ULL << skipped_feature_id);
819 if (no_hw_disablement) {
820 mutex_lock(&feature->mutex);
821 bitmap_andnot(feature->enabled, feature->enabled,
822 (unsigned long *)(&features_to_disable), SMU_FEATURE_MAX);
823 mutex_unlock(&feature->mutex);
827 return smu_cmn_feature_update_enable_state(smu,
833 int smu_cmn_get_smc_version(struct smu_context *smu,
834 uint32_t *if_version,
835 uint32_t *smu_version)
839 if (!if_version && !smu_version)
842 if (smu->smc_fw_if_version && smu->smc_fw_version)
845 *if_version = smu->smc_fw_if_version;
848 *smu_version = smu->smc_fw_version;
854 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
858 smu->smc_fw_if_version = *if_version;
862 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
866 smu->smc_fw_version = *smu_version;
872 int smu_cmn_update_table(struct smu_context *smu,
873 enum smu_table_id table_index,
878 struct smu_table_context *smu_table = &smu->smu_table;
879 struct amdgpu_device *adev = smu->adev;
880 struct smu_table *table = &smu_table->driver_table;
881 int table_id = smu_cmn_to_asic_specific_index(smu,
882 CMN2ASIC_MAPPING_TABLE,
886 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
889 table_size = smu_table->tables[table_index].size;
892 memcpy(table->cpu_addr, table_data, table_size);
894 * Flush hdp cache: to guard the content seen by
895 * GPU is consitent with CPU.
897 amdgpu_asic_flush_hdp(adev, NULL);
900 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
901 SMU_MSG_TransferTableDram2Smu :
902 SMU_MSG_TransferTableSmu2Dram,
903 table_id | ((argument & 0xFFFF) << 16),
909 amdgpu_asic_invalidate_hdp(adev, NULL);
910 memcpy(table_data, table->cpu_addr, table_size);
916 int smu_cmn_write_watermarks_table(struct smu_context *smu)
918 void *watermarks_table = smu->smu_table.watermarks_table;
920 if (!watermarks_table)
923 return smu_cmn_update_table(smu,
924 SMU_TABLE_WATERMARKS,
930 int smu_cmn_write_pptable(struct smu_context *smu)
932 void *pptable = smu->smu_table.driver_pptable;
934 return smu_cmn_update_table(smu,
941 int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
945 struct smu_table_context *smu_table= &smu->smu_table;
946 uint32_t table_size =
947 smu_table->tables[SMU_TABLE_SMU_METRICS].size;
951 !smu_table->metrics_time ||
952 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
953 ret = smu_cmn_update_table(smu,
954 SMU_TABLE_SMU_METRICS,
956 smu_table->metrics_table,
959 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
962 smu_table->metrics_time = jiffies;
966 memcpy(metrics_table, smu_table->metrics_table, table_size);
971 int smu_cmn_get_metrics_table(struct smu_context *smu,
977 mutex_lock(&smu->metrics_lock);
978 ret = smu_cmn_get_metrics_table_locked(smu,
981 mutex_unlock(&smu->metrics_lock);
986 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
988 struct metrics_table_header *header = (struct metrics_table_header *)table;
989 uint16_t structure_size;
991 #define METRICS_VERSION(a, b) ((a << 16) | b )
993 switch (METRICS_VERSION(frev, crev)) {
994 case METRICS_VERSION(1, 0):
995 structure_size = sizeof(struct gpu_metrics_v1_0);
997 case METRICS_VERSION(1, 1):
998 structure_size = sizeof(struct gpu_metrics_v1_1);
1000 case METRICS_VERSION(1, 2):
1001 structure_size = sizeof(struct gpu_metrics_v1_2);
1003 case METRICS_VERSION(1, 3):
1004 structure_size = sizeof(struct gpu_metrics_v1_3);
1006 case METRICS_VERSION(2, 0):
1007 structure_size = sizeof(struct gpu_metrics_v2_0);
1009 case METRICS_VERSION(2, 1):
1010 structure_size = sizeof(struct gpu_metrics_v2_1);
1012 case METRICS_VERSION(2, 2):
1013 structure_size = sizeof(struct gpu_metrics_v2_2);
1019 #undef METRICS_VERSION
1021 memset(header, 0xFF, structure_size);
1023 header->format_revision = frev;
1024 header->content_revision = crev;
1025 header->structure_size = structure_size;
1029 int smu_cmn_set_mp1_state(struct smu_context *smu,
1030 enum pp_mp1_state mp1_state)
1032 enum smu_message_type msg;
1035 switch (mp1_state) {
1036 case PP_MP1_STATE_SHUTDOWN:
1037 msg = SMU_MSG_PrepareMp1ForShutdown;
1039 case PP_MP1_STATE_UNLOAD:
1040 msg = SMU_MSG_PrepareMp1ForUnload;
1042 case PP_MP1_STATE_RESET:
1043 msg = SMU_MSG_PrepareMp1ForReset;
1045 case PP_MP1_STATE_NONE:
1050 ret = smu_cmn_send_smc_msg(smu, msg, NULL);
1052 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");