2 * Copyright 2020 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #define SWSMU_CODE_LAYER_L4
26 #include "amdgpu_smu.h"
28 #include "soc15_common.h"
31 * DO NOT use these for err/warn/info/debug messages.
32 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
33 * They are more MGPU friendly.
40 #define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
42 const int link_speed[] = {25, 50, 80, 160, 320, 640};
44 #undef __SMU_DUMMY_MAP
45 #define __SMU_DUMMY_MAP(type) #type
46 static const char * const __smu_message_names[] = {
50 #define smu_cmn_call_asic_func(intf, smu, args...) \
51 ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \
52 (smu)->ppt_funcs->intf(smu, ##args) : \
56 static const char *smu_get_message_name(struct smu_context *smu,
57 enum smu_message_type type)
59 if (type < 0 || type >= SMU_MSG_MAX_COUNT)
60 return "unknown smu message";
62 return __smu_message_names[type];
65 static void smu_cmn_read_arg(struct smu_context *smu,
68 struct amdgpu_device *adev = smu->adev;
70 *arg = RREG32(smu->param_reg);
73 /* Redefine the SMU error codes here.
75 * Note that these definitions are redundant and should be removed
76 * when the SMU has exported a unified header file containing these
77 * macros, which header file we can just include and use the SMU's
78 * macros. At the moment, these error codes are defined by the SMU
79 * per-ASIC unfortunately, yet we're a one driver for all ASICs.
81 #define SMU_RESP_NONE 0
83 #define SMU_RESP_CMD_FAIL 0xFF
84 #define SMU_RESP_CMD_UNKNOWN 0xFE
85 #define SMU_RESP_CMD_BAD_PREREQ 0xFD
86 #define SMU_RESP_BUSY_OTHER 0xFC
87 #define SMU_RESP_DEBUG_END 0xFB
90 * __smu_cmn_poll_stat -- poll for a status from the SMU
91 * @smu: a pointer to SMU context
93 * Returns the status of the SMU, which could be,
94 * 0, the SMU is busy with your command;
95 * 1, execution status: success, execution result: success;
96 * 0xFF, execution status: success, execution result: failure;
97 * 0xFE, unknown command;
98 * 0xFD, valid command, but bad (command) prerequisites;
99 * 0xFC, the command was rejected as the SMU is busy;
100 * 0xFB, "SMC_Result_DebugDataDumpEnd".
102 * The values here are not defined by macros, because I'd rather we
103 * include a single header file which defines them, which is
104 * maintained by the SMU FW team, so that we're impervious to firmware
105 * changes. At the moment those values are defined in various header
106 * files, one for each ASIC, yet here we're a single ASIC-agnostic
107 * interface. Such a change can be followed-up by a subsequent patch.
109 static u32 __smu_cmn_poll_stat(struct smu_context *smu)
111 struct amdgpu_device *adev = smu->adev;
112 int timeout = adev->usec_timeout * 20;
115 for ( ; timeout > 0; timeout--) {
116 reg = RREG32(smu->resp_reg);
117 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
126 static void __smu_cmn_reg_print_error(struct smu_context *smu,
130 enum smu_message_type msg)
132 struct amdgpu_device *adev = smu->adev;
133 const char *message = smu_get_message_name(smu, msg);
136 switch (reg_c2pmsg_90) {
137 case SMU_RESP_NONE: {
138 msg_idx = RREG32(smu->msg_reg);
139 prm = RREG32(smu->param_reg);
140 dev_err_ratelimited(adev->dev,
141 "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
146 /* The SMU executed the command. It completed with a
150 case SMU_RESP_CMD_FAIL:
151 /* The SMU executed the command. It completed with an
152 * unsuccessful result.
155 case SMU_RESP_CMD_UNKNOWN:
156 dev_err_ratelimited(adev->dev,
157 "SMU: unknown command: index:%d param:0x%08X message:%s",
158 msg_index, param, message);
160 case SMU_RESP_CMD_BAD_PREREQ:
161 dev_err_ratelimited(adev->dev,
162 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s",
163 msg_index, param, message);
165 case SMU_RESP_BUSY_OTHER:
166 dev_err_ratelimited(adev->dev,
167 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
168 msg_index, param, message);
170 case SMU_RESP_DEBUG_END:
171 dev_err_ratelimited(adev->dev,
172 "SMU: I'm debugging!");
175 dev_err_ratelimited(adev->dev,
176 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
177 reg_c2pmsg_90, msg_index, param, message);
182 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90)
186 switch (reg_c2pmsg_90) {
188 /* The SMU is busy--still executing your command.
195 case SMU_RESP_CMD_FAIL:
196 /* Command completed successfully, but the command
197 * status was failure.
201 case SMU_RESP_CMD_UNKNOWN:
202 /* Unknown command--ignored by the SMU.
206 case SMU_RESP_CMD_BAD_PREREQ:
207 /* Valid command--bad prerequisites.
211 case SMU_RESP_BUSY_OTHER:
212 /* The SMU is busy with other commands. The client
213 * should retry in 10 us.
218 /* Unknown or debug response from the SMU.
227 static void __smu_cmn_send_msg(struct smu_context *smu,
231 struct amdgpu_device *adev = smu->adev;
233 WREG32(smu->resp_reg, 0);
234 WREG32(smu->param_reg, param);
235 WREG32(smu->msg_reg, msg);
238 static int __smu_cmn_send_debug_msg(struct smu_context *smu,
242 struct amdgpu_device *adev = smu->adev;
244 WREG32(smu->debug_param_reg, param);
245 WREG32(smu->debug_msg_reg, msg);
246 WREG32(smu->debug_resp_reg, 0);
251 * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status
252 * @smu: pointer to an SMU context
253 * @msg_index: message index
254 * @param: message parameter to send to the SMU
256 * Send a message to the SMU with the parameter passed. Do not wait
257 * for status/result of the message, thus the "without_waiting".
259 * Return 0 on success, -errno on error if we weren't able to _send_
260 * the message for some reason. See __smu_cmn_reg2errno() for details
263 int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
267 struct amdgpu_device *adev = smu->adev;
271 if (adev->no_hw_access)
274 reg = __smu_cmn_poll_stat(smu);
275 res = __smu_cmn_reg2errno(smu, reg);
276 if (reg == SMU_RESP_NONE ||
279 __smu_cmn_send_msg(smu, msg_index, param);
282 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
283 res && (res != -ETIME)) {
284 amdgpu_device_halt(adev);
292 * smu_cmn_wait_for_response -- wait for response from the SMU
293 * @smu: pointer to an SMU context
295 * Wait for status from the SMU.
297 * Return 0 on success, -errno on error, indicating the execution
298 * status and result of the message being waited for. See
299 * __smu_cmn_reg2errno() for details of the -errno.
301 int smu_cmn_wait_for_response(struct smu_context *smu)
306 reg = __smu_cmn_poll_stat(smu);
307 res = __smu_cmn_reg2errno(smu, reg);
309 if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
310 res && (res != -ETIME)) {
311 amdgpu_device_halt(smu->adev);
319 * smu_cmn_send_smc_msg_with_param -- send a message with parameter
320 * @smu: pointer to an SMU context
321 * @msg: message to send
322 * @param: parameter to send to the SMU
323 * @read_arg: pointer to u32 to return a value from the SMU back
326 * Send the message @msg with parameter @param to the SMU, wait for
327 * completion of the command, and return back a value from the SMU in
330 * Return 0 on success, -errno when a problem is encountered sending
331 * message or receiving reply. If there is a PCI bus recovery or
332 * the destination is a virtual GPU which does not allow this message
333 * type, the message is simply dropped and success is also returned.
334 * See __smu_cmn_reg2errno() for details of the -errno.
336 * If we weren't able to send the message to the SMU, we also print
337 * the error to the standard log.
339 * Command completion status is printed only if the -errno is
340 * -EREMOTEIO, indicating that the SMU returned back an
341 * undefined/unknown/unspecified result. All other cases are
342 * well-defined, not printed, but instead given back to the client to
343 * decide what further to do.
345 * The return value, @read_arg is read back regardless, to give back
346 * more information to the client, which on error would most likely be
347 * @param, but we can't assume that. This also eliminates more
350 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
351 enum smu_message_type msg,
355 struct amdgpu_device *adev = smu->adev;
359 if (adev->no_hw_access)
362 index = smu_cmn_to_asic_specific_index(smu,
363 CMN2ASIC_MAPPING_MSG,
366 return index == -EACCES ? 0 : index;
368 mutex_lock(&smu->message_lock);
369 reg = __smu_cmn_poll_stat(smu);
370 res = __smu_cmn_reg2errno(smu, reg);
371 if (reg == SMU_RESP_NONE ||
373 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
376 __smu_cmn_send_msg(smu, (uint16_t) index, param);
377 reg = __smu_cmn_poll_stat(smu);
378 res = __smu_cmn_reg2errno(smu, reg);
380 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
382 smu_cmn_read_arg(smu, read_arg);
384 if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
385 amdgpu_device_halt(adev);
389 mutex_unlock(&smu->message_lock);
393 int smu_cmn_send_smc_msg(struct smu_context *smu,
394 enum smu_message_type msg,
397 return smu_cmn_send_smc_msg_with_param(smu,
403 int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
406 return __smu_cmn_send_debug_msg(smu, msg, 0);
409 int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
410 uint32_t msg, uint32_t param)
412 return __smu_cmn_send_debug_msg(smu, msg, param);
415 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
416 enum smu_cmn2asic_mapping_type type,
419 struct cmn2asic_msg_mapping msg_mapping;
420 struct cmn2asic_mapping mapping;
423 case CMN2ASIC_MAPPING_MSG:
424 if (index >= SMU_MSG_MAX_COUNT ||
428 msg_mapping = smu->message_map[index];
429 if (!msg_mapping.valid_mapping)
432 if (amdgpu_sriov_vf(smu->adev) &&
433 !msg_mapping.valid_in_vf)
436 return msg_mapping.map_to;
438 case CMN2ASIC_MAPPING_CLK:
439 if (index >= SMU_CLK_COUNT ||
443 mapping = smu->clock_map[index];
444 if (!mapping.valid_mapping)
447 return mapping.map_to;
449 case CMN2ASIC_MAPPING_FEATURE:
450 if (index >= SMU_FEATURE_COUNT ||
454 mapping = smu->feature_map[index];
455 if (!mapping.valid_mapping)
458 return mapping.map_to;
460 case CMN2ASIC_MAPPING_TABLE:
461 if (index >= SMU_TABLE_COUNT ||
465 mapping = smu->table_map[index];
466 if (!mapping.valid_mapping)
469 return mapping.map_to;
471 case CMN2ASIC_MAPPING_PWR:
472 if (index >= SMU_POWER_SOURCE_COUNT ||
476 mapping = smu->pwr_src_map[index];
477 if (!mapping.valid_mapping)
480 return mapping.map_to;
482 case CMN2ASIC_MAPPING_WORKLOAD:
483 if (index >= PP_SMC_POWER_PROFILE_COUNT ||
487 mapping = smu->workload_map[index];
488 if (!mapping.valid_mapping)
491 return mapping.map_to;
498 int smu_cmn_feature_is_supported(struct smu_context *smu,
499 enum smu_feature_mask mask)
501 struct smu_feature *feature = &smu->smu_feature;
504 feature_id = smu_cmn_to_asic_specific_index(smu,
505 CMN2ASIC_MAPPING_FEATURE,
510 WARN_ON(feature_id > feature->feature_num);
512 return test_bit(feature_id, feature->supported);
515 static int __smu_get_enabled_features(struct smu_context *smu,
516 uint64_t *enabled_features)
518 return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);
521 int smu_cmn_feature_is_enabled(struct smu_context *smu,
522 enum smu_feature_mask mask)
524 struct amdgpu_device *adev = smu->adev;
525 uint64_t enabled_features;
528 if (__smu_get_enabled_features(smu, &enabled_features)) {
529 dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
534 * For Renoir and Cyan Skillfish, they are assumed to have all features
535 * enabled. Also considering they have no feature_map available, the
536 * check here can avoid unwanted feature_map check below.
538 if (enabled_features == ULLONG_MAX)
541 feature_id = smu_cmn_to_asic_specific_index(smu,
542 CMN2ASIC_MAPPING_FEATURE,
547 return test_bit(feature_id, (unsigned long *)&enabled_features);
550 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
551 enum smu_clk_type clk_type)
553 enum smu_feature_mask feature_id = 0;
558 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
562 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
565 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
569 feature_id = SMU_FEATURE_DPM_VCLK_BIT;
573 feature_id = SMU_FEATURE_DPM_DCLK_BIT;
576 feature_id = SMU_FEATURE_DPM_FCLK_BIT;
582 if (!smu_cmn_feature_is_enabled(smu, feature_id))
588 int smu_cmn_get_enabled_mask(struct smu_context *smu,
589 uint64_t *feature_mask)
591 uint32_t *feature_mask_high;
592 uint32_t *feature_mask_low;
593 int ret = 0, index = 0;
598 feature_mask_low = &((uint32_t *)feature_mask)[0];
599 feature_mask_high = &((uint32_t *)feature_mask)[1];
601 index = smu_cmn_to_asic_specific_index(smu,
602 CMN2ASIC_MAPPING_MSG,
603 SMU_MSG_GetEnabledSmuFeatures);
605 ret = smu_cmn_send_smc_msg_with_param(smu,
606 SMU_MSG_GetEnabledSmuFeatures,
612 ret = smu_cmn_send_smc_msg_with_param(smu,
613 SMU_MSG_GetEnabledSmuFeatures,
617 ret = smu_cmn_send_smc_msg(smu,
618 SMU_MSG_GetEnabledSmuFeaturesHigh,
623 ret = smu_cmn_send_smc_msg(smu,
624 SMU_MSG_GetEnabledSmuFeaturesLow,
631 uint64_t smu_cmn_get_indep_throttler_status(
632 const unsigned long dep_status,
633 const uint8_t *throttler_map)
635 uint64_t indep_status = 0;
638 for_each_set_bit(dep_bit, &dep_status, 32)
639 indep_status |= 1ULL << throttler_map[dep_bit];
644 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
645 uint64_t feature_mask,
651 ret = smu_cmn_send_smc_msg_with_param(smu,
652 SMU_MSG_EnableSmuFeaturesLow,
653 lower_32_bits(feature_mask),
657 ret = smu_cmn_send_smc_msg_with_param(smu,
658 SMU_MSG_EnableSmuFeaturesHigh,
659 upper_32_bits(feature_mask),
662 ret = smu_cmn_send_smc_msg_with_param(smu,
663 SMU_MSG_DisableSmuFeaturesLow,
664 lower_32_bits(feature_mask),
668 ret = smu_cmn_send_smc_msg_with_param(smu,
669 SMU_MSG_DisableSmuFeaturesHigh,
670 upper_32_bits(feature_mask),
677 int smu_cmn_feature_set_enabled(struct smu_context *smu,
678 enum smu_feature_mask mask,
683 feature_id = smu_cmn_to_asic_specific_index(smu,
684 CMN2ASIC_MAPPING_FEATURE,
689 return smu_cmn_feature_update_enable_state(smu,
694 #undef __SMU_DUMMY_MAP
695 #define __SMU_DUMMY_MAP(fea) #fea
696 static const char *__smu_feature_names[] = {
700 static const char *smu_get_feature_name(struct smu_context *smu,
701 enum smu_feature_mask feature)
703 if (feature < 0 || feature >= SMU_FEATURE_COUNT)
704 return "unknown smu feature";
705 return __smu_feature_names[feature];
708 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
711 int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
712 uint64_t feature_mask;
713 int i, feature_index;
717 if (__smu_get_enabled_features(smu, &feature_mask))
720 size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
721 upper_32_bits(feature_mask), lower_32_bits(feature_mask));
723 memset(sort_feature, -1, sizeof(sort_feature));
725 for (i = 0; i < SMU_FEATURE_COUNT; i++) {
726 feature_index = smu_cmn_to_asic_specific_index(smu,
727 CMN2ASIC_MAPPING_FEATURE,
729 if (feature_index < 0)
732 sort_feature[feature_index] = i;
735 size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",
736 "No", "Feature", "Bit", "State");
738 for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) {
739 if (sort_feature[feature_index] < 0)
742 size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
744 smu_get_feature_name(smu, sort_feature[feature_index]),
746 !!test_bit(feature_index, (unsigned long *)&feature_mask) ?
747 "enabled" : "disabled");
753 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
757 uint64_t feature_mask;
758 uint64_t feature_2_enabled = 0;
759 uint64_t feature_2_disabled = 0;
761 ret = __smu_get_enabled_features(smu, &feature_mask);
765 feature_2_enabled = ~feature_mask & new_mask;
766 feature_2_disabled = feature_mask & ~new_mask;
768 if (feature_2_enabled) {
769 ret = smu_cmn_feature_update_enable_state(smu,
775 if (feature_2_disabled) {
776 ret = smu_cmn_feature_update_enable_state(smu,
787 * smu_cmn_disable_all_features_with_exception - disable all dpm features
788 * except this specified by
791 * @smu: smu_context pointer
792 * @mask: the dpm feature which should not be disabled
793 * SMU_FEATURE_COUNT: no exception, all dpm features
797 * 0 on success or a negative error code on failure.
799 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
800 enum smu_feature_mask mask)
802 uint64_t features_to_disable = U64_MAX;
803 int skipped_feature_id;
805 if (mask != SMU_FEATURE_COUNT) {
806 skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
807 CMN2ASIC_MAPPING_FEATURE,
809 if (skipped_feature_id < 0)
812 features_to_disable &= ~(1ULL << skipped_feature_id);
815 return smu_cmn_feature_update_enable_state(smu,
820 int smu_cmn_get_smc_version(struct smu_context *smu,
821 uint32_t *if_version,
822 uint32_t *smu_version)
826 if (!if_version && !smu_version)
829 if (smu->smc_fw_if_version && smu->smc_fw_version)
832 *if_version = smu->smc_fw_if_version;
835 *smu_version = smu->smc_fw_version;
841 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
845 smu->smc_fw_if_version = *if_version;
849 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
853 smu->smc_fw_version = *smu_version;
859 int smu_cmn_update_table(struct smu_context *smu,
860 enum smu_table_id table_index,
865 struct smu_table_context *smu_table = &smu->smu_table;
866 struct amdgpu_device *adev = smu->adev;
867 struct smu_table *table = &smu_table->driver_table;
868 int table_id = smu_cmn_to_asic_specific_index(smu,
869 CMN2ASIC_MAPPING_TABLE,
873 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
876 table_size = smu_table->tables[table_index].size;
879 memcpy(table->cpu_addr, table_data, table_size);
881 * Flush hdp cache: to guard the content seen by
882 * GPU is consitent with CPU.
884 amdgpu_asic_flush_hdp(adev, NULL);
887 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
888 SMU_MSG_TransferTableDram2Smu :
889 SMU_MSG_TransferTableSmu2Dram,
890 table_id | ((argument & 0xFFFF) << 16),
896 amdgpu_asic_invalidate_hdp(adev, NULL);
897 memcpy(table_data, table->cpu_addr, table_size);
903 int smu_cmn_write_watermarks_table(struct smu_context *smu)
905 void *watermarks_table = smu->smu_table.watermarks_table;
907 if (!watermarks_table)
910 return smu_cmn_update_table(smu,
911 SMU_TABLE_WATERMARKS,
917 int smu_cmn_write_pptable(struct smu_context *smu)
919 void *pptable = smu->smu_table.driver_pptable;
921 return smu_cmn_update_table(smu,
928 int smu_cmn_get_metrics_table(struct smu_context *smu,
932 struct smu_table_context *smu_table = &smu->smu_table;
933 uint32_t table_size =
934 smu_table->tables[SMU_TABLE_SMU_METRICS].size;
938 !smu_table->metrics_time ||
939 time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
940 ret = smu_cmn_update_table(smu,
941 SMU_TABLE_SMU_METRICS,
943 smu_table->metrics_table,
946 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
949 smu_table->metrics_time = jiffies;
953 memcpy(metrics_table, smu_table->metrics_table, table_size);
958 int smu_cmn_get_combo_pptable(struct smu_context *smu)
960 void *pptable = smu->smu_table.combo_pptable;
962 return smu_cmn_update_table(smu,
963 SMU_TABLE_COMBO_PPTABLE,
969 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
971 struct metrics_table_header *header = (struct metrics_table_header *)table;
972 uint16_t structure_size;
974 #define METRICS_VERSION(a, b) ((a << 16) | b)
976 switch (METRICS_VERSION(frev, crev)) {
977 case METRICS_VERSION(1, 0):
978 structure_size = sizeof(struct gpu_metrics_v1_0);
980 case METRICS_VERSION(1, 1):
981 structure_size = sizeof(struct gpu_metrics_v1_1);
983 case METRICS_VERSION(1, 2):
984 structure_size = sizeof(struct gpu_metrics_v1_2);
986 case METRICS_VERSION(1, 3):
987 structure_size = sizeof(struct gpu_metrics_v1_3);
989 case METRICS_VERSION(2, 0):
990 structure_size = sizeof(struct gpu_metrics_v2_0);
992 case METRICS_VERSION(2, 1):
993 structure_size = sizeof(struct gpu_metrics_v2_1);
995 case METRICS_VERSION(2, 2):
996 structure_size = sizeof(struct gpu_metrics_v2_2);
998 case METRICS_VERSION(2, 3):
999 structure_size = sizeof(struct gpu_metrics_v2_3);
1001 case METRICS_VERSION(2, 4):
1002 structure_size = sizeof(struct gpu_metrics_v2_4);
1008 #undef METRICS_VERSION
1010 memset(header, 0xFF, structure_size);
1012 header->format_revision = frev;
1013 header->content_revision = crev;
1014 header->structure_size = structure_size;
1018 int smu_cmn_set_mp1_state(struct smu_context *smu,
1019 enum pp_mp1_state mp1_state)
1021 enum smu_message_type msg;
1024 switch (mp1_state) {
1025 case PP_MP1_STATE_SHUTDOWN:
1026 msg = SMU_MSG_PrepareMp1ForShutdown;
1028 case PP_MP1_STATE_UNLOAD:
1029 msg = SMU_MSG_PrepareMp1ForUnload;
1031 case PP_MP1_STATE_RESET:
1032 msg = SMU_MSG_PrepareMp1ForReset;
1034 case PP_MP1_STATE_NONE:
1039 ret = smu_cmn_send_smc_msg(smu, msg, NULL);
1041 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1046 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
1048 struct pci_dev *p = NULL;
1049 bool snd_driver_loaded;
1052 * If the ASIC comes with no audio function, we always assume
1055 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
1056 adev->pdev->bus->number, 1);
1060 snd_driver_loaded = pci_is_enabled(p) ? true : false;
1064 return snd_driver_loaded;