Merge tag 'drm-misc-next-fixes-2023-09-01' of git://anongit.freedesktop.org/drm/drm...
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / amd / pm / swsmu / smu_cmn.c
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #define SWSMU_CODE_LAYER_L4
24
25 #include "amdgpu.h"
26 #include "amdgpu_smu.h"
27 #include "smu_cmn.h"
28 #include "soc15_common.h"
29
30 /*
31  * DO NOT use these for err/warn/info/debug messages.
32  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
33  * They are more MGPU friendly.
34  */
35 #undef pr_err
36 #undef pr_warn
37 #undef pr_info
38 #undef pr_debug
39
40 #define MP1_C2PMSG_90__CONTENT_MASK                                                                    0xFFFFFFFFL
41
42 const int link_speed[] = {25, 50, 80, 160, 320, 640};
43
44 #undef __SMU_DUMMY_MAP
45 #define __SMU_DUMMY_MAP(type)   #type
46 static const char * const __smu_message_names[] = {
47         SMU_MESSAGE_TYPES
48 };
49
50 #define smu_cmn_call_asic_func(intf, smu, args...)                             \
51         ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ?                          \
52                                      (smu)->ppt_funcs->intf(smu, ##args) :     \
53                                      -ENOTSUPP) :                              \
54                             -EINVAL)
55
56 static const char *smu_get_message_name(struct smu_context *smu,
57                                         enum smu_message_type type)
58 {
59         if (type < 0 || type >= SMU_MSG_MAX_COUNT)
60                 return "unknown smu message";
61
62         return __smu_message_names[type];
63 }
64
65 static void smu_cmn_read_arg(struct smu_context *smu,
66                              uint32_t *arg)
67 {
68         struct amdgpu_device *adev = smu->adev;
69
70         *arg = RREG32(smu->param_reg);
71 }
72
73 /* Redefine the SMU error codes here.
74  *
75  * Note that these definitions are redundant and should be removed
76  * when the SMU has exported a unified header file containing these
77  * macros, which header file we can just include and use the SMU's
78  * macros. At the moment, these error codes are defined by the SMU
79  * per-ASIC unfortunately, yet we're a one driver for all ASICs.
80  */
81 #define SMU_RESP_NONE           0
82 #define SMU_RESP_OK             1
83 #define SMU_RESP_CMD_FAIL       0xFF
84 #define SMU_RESP_CMD_UNKNOWN    0xFE
85 #define SMU_RESP_CMD_BAD_PREREQ 0xFD
86 #define SMU_RESP_BUSY_OTHER     0xFC
87 #define SMU_RESP_DEBUG_END      0xFB
88
89 /**
90  * __smu_cmn_poll_stat -- poll for a status from the SMU
91  * @smu: a pointer to SMU context
92  *
93  * Returns the status of the SMU, which could be,
94  *    0, the SMU is busy with your command;
95  *    1, execution status: success, execution result: success;
96  * 0xFF, execution status: success, execution result: failure;
97  * 0xFE, unknown command;
98  * 0xFD, valid command, but bad (command) prerequisites;
99  * 0xFC, the command was rejected as the SMU is busy;
100  * 0xFB, "SMC_Result_DebugDataDumpEnd".
101  *
102  * The values here are not defined by macros, because I'd rather we
103  * include a single header file which defines them, which is
104  * maintained by the SMU FW team, so that we're impervious to firmware
105  * changes. At the moment those values are defined in various header
106  * files, one for each ASIC, yet here we're a single ASIC-agnostic
107  * interface. Such a change can be followed-up by a subsequent patch.
108  */
109 static u32 __smu_cmn_poll_stat(struct smu_context *smu)
110 {
111         struct amdgpu_device *adev = smu->adev;
112         int timeout = adev->usec_timeout * 20;
113         u32 reg;
114
115         for ( ; timeout > 0; timeout--) {
116                 reg = RREG32(smu->resp_reg);
117                 if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
118                         break;
119
120                 udelay(1);
121         }
122
123         return reg;
124 }
125
126 static void __smu_cmn_reg_print_error(struct smu_context *smu,
127                                       u32 reg_c2pmsg_90,
128                                       int msg_index,
129                                       u32 param,
130                                       enum smu_message_type msg)
131 {
132         struct amdgpu_device *adev = smu->adev;
133         const char *message = smu_get_message_name(smu, msg);
134         u32 msg_idx, prm;
135
136         switch (reg_c2pmsg_90) {
137         case SMU_RESP_NONE: {
138                 msg_idx = RREG32(smu->msg_reg);
139                 prm     = RREG32(smu->param_reg);
140                 dev_err_ratelimited(adev->dev,
141                                     "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
142                                     msg_idx, prm);
143                 }
144                 break;
145         case SMU_RESP_OK:
146                 /* The SMU executed the command. It completed with a
147                  * successful result.
148                  */
149                 break;
150         case SMU_RESP_CMD_FAIL:
151                 /* The SMU executed the command. It completed with an
152                  * unsuccessful result.
153                  */
154                 break;
155         case SMU_RESP_CMD_UNKNOWN:
156                 dev_err_ratelimited(adev->dev,
157                                     "SMU: unknown command: index:%d param:0x%08X message:%s",
158                                     msg_index, param, message);
159                 break;
160         case SMU_RESP_CMD_BAD_PREREQ:
161                 dev_err_ratelimited(adev->dev,
162                                     "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s",
163                                     msg_index, param, message);
164                 break;
165         case SMU_RESP_BUSY_OTHER:
166                 dev_err_ratelimited(adev->dev,
167                                     "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
168                                     msg_index, param, message);
169                 break;
170         case SMU_RESP_DEBUG_END:
171                 dev_err_ratelimited(adev->dev,
172                                     "SMU: I'm debugging!");
173                 break;
174         default:
175                 dev_err_ratelimited(adev->dev,
176                                     "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
177                                     reg_c2pmsg_90, msg_index, param, message);
178                 break;
179         }
180 }
181
182 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90)
183 {
184         int res;
185
186         switch (reg_c2pmsg_90) {
187         case SMU_RESP_NONE:
188                 /* The SMU is busy--still executing your command.
189                  */
190                 res = -ETIME;
191                 break;
192         case SMU_RESP_OK:
193                 res = 0;
194                 break;
195         case SMU_RESP_CMD_FAIL:
196                 /* Command completed successfully, but the command
197                  * status was failure.
198                  */
199                 res = -EIO;
200                 break;
201         case SMU_RESP_CMD_UNKNOWN:
202                 /* Unknown command--ignored by the SMU.
203                  */
204                 res = -EOPNOTSUPP;
205                 break;
206         case SMU_RESP_CMD_BAD_PREREQ:
207                 /* Valid command--bad prerequisites.
208                  */
209                 res = -EINVAL;
210                 break;
211         case SMU_RESP_BUSY_OTHER:
212                 /* The SMU is busy with other commands. The client
213                  * should retry in 10 us.
214                  */
215                 res = -EBUSY;
216                 break;
217         default:
218                 /* Unknown or debug response from the SMU.
219                  */
220                 res = -EREMOTEIO;
221                 break;
222         }
223
224         return res;
225 }
226
227 static void __smu_cmn_send_msg(struct smu_context *smu,
228                                u16 msg,
229                                u32 param)
230 {
231         struct amdgpu_device *adev = smu->adev;
232
233         WREG32(smu->resp_reg, 0);
234         WREG32(smu->param_reg, param);
235         WREG32(smu->msg_reg, msg);
236 }
237
238 static int __smu_cmn_send_debug_msg(struct smu_context *smu,
239                                u32 msg,
240                                u32 param)
241 {
242         struct amdgpu_device *adev = smu->adev;
243
244         WREG32(smu->debug_param_reg, param);
245         WREG32(smu->debug_msg_reg, msg);
246         WREG32(smu->debug_resp_reg, 0);
247
248         return 0;
249 }
250 /**
251  * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status
252  * @smu: pointer to an SMU context
253  * @msg_index: message index
254  * @param: message parameter to send to the SMU
255  *
256  * Send a message to the SMU with the parameter passed. Do not wait
257  * for status/result of the message, thus the "without_waiting".
258  *
259  * Return 0 on success, -errno on error if we weren't able to _send_
260  * the message for some reason. See __smu_cmn_reg2errno() for details
261  * of the -errno.
262  */
263 int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
264                                      uint16_t msg_index,
265                                      uint32_t param)
266 {
267         struct amdgpu_device *adev = smu->adev;
268         u32 reg;
269         int res;
270
271         if (adev->no_hw_access)
272                 return 0;
273
274         reg = __smu_cmn_poll_stat(smu);
275         res = __smu_cmn_reg2errno(smu, reg);
276         if (reg == SMU_RESP_NONE ||
277             res == -EREMOTEIO)
278                 goto Out;
279         __smu_cmn_send_msg(smu, msg_index, param);
280         res = 0;
281 Out:
282         if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
283             res && (res != -ETIME)) {
284                 amdgpu_device_halt(adev);
285                 WARN_ON(1);
286         }
287
288         return res;
289 }
290
291 /**
292  * smu_cmn_wait_for_response -- wait for response from the SMU
293  * @smu: pointer to an SMU context
294  *
295  * Wait for status from the SMU.
296  *
297  * Return 0 on success, -errno on error, indicating the execution
298  * status and result of the message being waited for. See
299  * __smu_cmn_reg2errno() for details of the -errno.
300  */
301 int smu_cmn_wait_for_response(struct smu_context *smu)
302 {
303         u32 reg;
304         int res;
305
306         reg = __smu_cmn_poll_stat(smu);
307         res = __smu_cmn_reg2errno(smu, reg);
308
309         if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
310             res && (res != -ETIME)) {
311                 amdgpu_device_halt(smu->adev);
312                 WARN_ON(1);
313         }
314
315         return res;
316 }
317
318 /**
319  * smu_cmn_send_smc_msg_with_param -- send a message with parameter
320  * @smu: pointer to an SMU context
321  * @msg: message to send
322  * @param: parameter to send to the SMU
323  * @read_arg: pointer to u32 to return a value from the SMU back
324  *            to the caller
325  *
326  * Send the message @msg with parameter @param to the SMU, wait for
327  * completion of the command, and return back a value from the SMU in
328  * @read_arg pointer.
329  *
330  * Return 0 on success, -errno when a problem is encountered sending
331  * message or receiving reply. If there is a PCI bus recovery or
332  * the destination is a virtual GPU which does not allow this message
333  * type, the message is simply dropped and success is also returned.
334  * See __smu_cmn_reg2errno() for details of the -errno.
335  *
336  * If we weren't able to send the message to the SMU, we also print
337  * the error to the standard log.
338  *
339  * Command completion status is printed only if the -errno is
340  * -EREMOTEIO, indicating that the SMU returned back an
341  * undefined/unknown/unspecified result. All other cases are
342  * well-defined, not printed, but instead given back to the client to
343  * decide what further to do.
344  *
345  * The return value, @read_arg is read back regardless, to give back
346  * more information to the client, which on error would most likely be
347  * @param, but we can't assume that. This also eliminates more
348  * conditionals.
349  */
350 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
351                                     enum smu_message_type msg,
352                                     uint32_t param,
353                                     uint32_t *read_arg)
354 {
355         struct amdgpu_device *adev = smu->adev;
356         int res, index;
357         u32 reg;
358
359         if (adev->no_hw_access)
360                 return 0;
361
362         index = smu_cmn_to_asic_specific_index(smu,
363                                                CMN2ASIC_MAPPING_MSG,
364                                                msg);
365         if (index < 0)
366                 return index == -EACCES ? 0 : index;
367
368         mutex_lock(&smu->message_lock);
369         reg = __smu_cmn_poll_stat(smu);
370         res = __smu_cmn_reg2errno(smu, reg);
371         if (reg == SMU_RESP_NONE ||
372             res == -EREMOTEIO) {
373                 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
374                 goto Out;
375         }
376         __smu_cmn_send_msg(smu, (uint16_t) index, param);
377         reg = __smu_cmn_poll_stat(smu);
378         res = __smu_cmn_reg2errno(smu, reg);
379         if (res != 0)
380                 __smu_cmn_reg_print_error(smu, reg, index, param, msg);
381         if (read_arg)
382                 smu_cmn_read_arg(smu, read_arg);
383 Out:
384         if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && res) {
385                 amdgpu_device_halt(adev);
386                 WARN_ON(1);
387         }
388
389         mutex_unlock(&smu->message_lock);
390         return res;
391 }
392
393 int smu_cmn_send_smc_msg(struct smu_context *smu,
394                          enum smu_message_type msg,
395                          uint32_t *read_arg)
396 {
397         return smu_cmn_send_smc_msg_with_param(smu,
398                                                msg,
399                                                0,
400                                                read_arg);
401 }
402
403 int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
404                          uint32_t msg)
405 {
406         return __smu_cmn_send_debug_msg(smu, msg, 0);
407 }
408
409 int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
410                          uint32_t msg, uint32_t param)
411 {
412         return __smu_cmn_send_debug_msg(smu, msg, param);
413 }
414
415 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
416                                    enum smu_cmn2asic_mapping_type type,
417                                    uint32_t index)
418 {
419         struct cmn2asic_msg_mapping msg_mapping;
420         struct cmn2asic_mapping mapping;
421
422         switch (type) {
423         case CMN2ASIC_MAPPING_MSG:
424                 if (index >= SMU_MSG_MAX_COUNT ||
425                     !smu->message_map)
426                         return -EINVAL;
427
428                 msg_mapping = smu->message_map[index];
429                 if (!msg_mapping.valid_mapping)
430                         return -EINVAL;
431
432                 if (amdgpu_sriov_vf(smu->adev) &&
433                     !msg_mapping.valid_in_vf)
434                         return -EACCES;
435
436                 return msg_mapping.map_to;
437
438         case CMN2ASIC_MAPPING_CLK:
439                 if (index >= SMU_CLK_COUNT ||
440                     !smu->clock_map)
441                         return -EINVAL;
442
443                 mapping = smu->clock_map[index];
444                 if (!mapping.valid_mapping)
445                         return -EINVAL;
446
447                 return mapping.map_to;
448
449         case CMN2ASIC_MAPPING_FEATURE:
450                 if (index >= SMU_FEATURE_COUNT ||
451                     !smu->feature_map)
452                         return -EINVAL;
453
454                 mapping = smu->feature_map[index];
455                 if (!mapping.valid_mapping)
456                         return -EINVAL;
457
458                 return mapping.map_to;
459
460         case CMN2ASIC_MAPPING_TABLE:
461                 if (index >= SMU_TABLE_COUNT ||
462                     !smu->table_map)
463                         return -EINVAL;
464
465                 mapping = smu->table_map[index];
466                 if (!mapping.valid_mapping)
467                         return -EINVAL;
468
469                 return mapping.map_to;
470
471         case CMN2ASIC_MAPPING_PWR:
472                 if (index >= SMU_POWER_SOURCE_COUNT ||
473                     !smu->pwr_src_map)
474                         return -EINVAL;
475
476                 mapping = smu->pwr_src_map[index];
477                 if (!mapping.valid_mapping)
478                         return -EINVAL;
479
480                 return mapping.map_to;
481
482         case CMN2ASIC_MAPPING_WORKLOAD:
483                 if (index >= PP_SMC_POWER_PROFILE_COUNT ||
484                     !smu->workload_map)
485                         return -EINVAL;
486
487                 mapping = smu->workload_map[index];
488                 if (!mapping.valid_mapping)
489                         return -ENOTSUPP;
490
491                 return mapping.map_to;
492
493         default:
494                 return -EINVAL;
495         }
496 }
497
498 int smu_cmn_feature_is_supported(struct smu_context *smu,
499                                  enum smu_feature_mask mask)
500 {
501         struct smu_feature *feature = &smu->smu_feature;
502         int feature_id;
503
504         feature_id = smu_cmn_to_asic_specific_index(smu,
505                                                     CMN2ASIC_MAPPING_FEATURE,
506                                                     mask);
507         if (feature_id < 0)
508                 return 0;
509
510         WARN_ON(feature_id > feature->feature_num);
511
512         return test_bit(feature_id, feature->supported);
513 }
514
515 static int __smu_get_enabled_features(struct smu_context *smu,
516                                uint64_t *enabled_features)
517 {
518         return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);
519 }
520
521 int smu_cmn_feature_is_enabled(struct smu_context *smu,
522                                enum smu_feature_mask mask)
523 {
524         struct amdgpu_device *adev = smu->adev;
525         uint64_t enabled_features;
526         int feature_id;
527
528         if (__smu_get_enabled_features(smu, &enabled_features)) {
529                 dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
530                 return 0;
531         }
532
533         /*
534          * For Renoir and Cyan Skillfish, they are assumed to have all features
535          * enabled. Also considering they have no feature_map available, the
536          * check here can avoid unwanted feature_map check below.
537          */
538         if (enabled_features == ULLONG_MAX)
539                 return 1;
540
541         feature_id = smu_cmn_to_asic_specific_index(smu,
542                                                     CMN2ASIC_MAPPING_FEATURE,
543                                                     mask);
544         if (feature_id < 0)
545                 return 0;
546
547         return test_bit(feature_id, (unsigned long *)&enabled_features);
548 }
549
550 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
551                                 enum smu_clk_type clk_type)
552 {
553         enum smu_feature_mask feature_id = 0;
554
555         switch (clk_type) {
556         case SMU_MCLK:
557         case SMU_UCLK:
558                 feature_id = SMU_FEATURE_DPM_UCLK_BIT;
559                 break;
560         case SMU_GFXCLK:
561         case SMU_SCLK:
562                 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
563                 break;
564         case SMU_SOCCLK:
565                 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
566                 break;
567         case SMU_VCLK:
568         case SMU_VCLK1:
569                 feature_id = SMU_FEATURE_DPM_VCLK_BIT;
570                 break;
571         case SMU_DCLK:
572         case SMU_DCLK1:
573                 feature_id = SMU_FEATURE_DPM_DCLK_BIT;
574                 break;
575         case SMU_FCLK:
576                 feature_id = SMU_FEATURE_DPM_FCLK_BIT;
577                 break;
578         default:
579                 return true;
580         }
581
582         if (!smu_cmn_feature_is_enabled(smu, feature_id))
583                 return false;
584
585         return true;
586 }
587
588 int smu_cmn_get_enabled_mask(struct smu_context *smu,
589                              uint64_t *feature_mask)
590 {
591         uint32_t *feature_mask_high;
592         uint32_t *feature_mask_low;
593         int ret = 0, index = 0;
594
595         if (!feature_mask)
596                 return -EINVAL;
597
598         feature_mask_low = &((uint32_t *)feature_mask)[0];
599         feature_mask_high = &((uint32_t *)feature_mask)[1];
600
601         index = smu_cmn_to_asic_specific_index(smu,
602                                                 CMN2ASIC_MAPPING_MSG,
603                                                 SMU_MSG_GetEnabledSmuFeatures);
604         if (index > 0) {
605                 ret = smu_cmn_send_smc_msg_with_param(smu,
606                                                       SMU_MSG_GetEnabledSmuFeatures,
607                                                       0,
608                                                       feature_mask_low);
609                 if (ret)
610                         return ret;
611
612                 ret = smu_cmn_send_smc_msg_with_param(smu,
613                                                       SMU_MSG_GetEnabledSmuFeatures,
614                                                       1,
615                                                       feature_mask_high);
616         } else {
617                 ret = smu_cmn_send_smc_msg(smu,
618                                            SMU_MSG_GetEnabledSmuFeaturesHigh,
619                                            feature_mask_high);
620                 if (ret)
621                         return ret;
622
623                 ret = smu_cmn_send_smc_msg(smu,
624                                            SMU_MSG_GetEnabledSmuFeaturesLow,
625                                            feature_mask_low);
626         }
627
628         return ret;
629 }
630
631 uint64_t smu_cmn_get_indep_throttler_status(
632                                         const unsigned long dep_status,
633                                         const uint8_t *throttler_map)
634 {
635         uint64_t indep_status = 0;
636         uint8_t dep_bit = 0;
637
638         for_each_set_bit(dep_bit, &dep_status, 32)
639                 indep_status |= 1ULL << throttler_map[dep_bit];
640
641         return indep_status;
642 }
643
644 int smu_cmn_feature_update_enable_state(struct smu_context *smu,
645                                         uint64_t feature_mask,
646                                         bool enabled)
647 {
648         int ret = 0;
649
650         if (enabled) {
651                 ret = smu_cmn_send_smc_msg_with_param(smu,
652                                                   SMU_MSG_EnableSmuFeaturesLow,
653                                                   lower_32_bits(feature_mask),
654                                                   NULL);
655                 if (ret)
656                         return ret;
657                 ret = smu_cmn_send_smc_msg_with_param(smu,
658                                                   SMU_MSG_EnableSmuFeaturesHigh,
659                                                   upper_32_bits(feature_mask),
660                                                   NULL);
661         } else {
662                 ret = smu_cmn_send_smc_msg_with_param(smu,
663                                                   SMU_MSG_DisableSmuFeaturesLow,
664                                                   lower_32_bits(feature_mask),
665                                                   NULL);
666                 if (ret)
667                         return ret;
668                 ret = smu_cmn_send_smc_msg_with_param(smu,
669                                                   SMU_MSG_DisableSmuFeaturesHigh,
670                                                   upper_32_bits(feature_mask),
671                                                   NULL);
672         }
673
674         return ret;
675 }
676
677 int smu_cmn_feature_set_enabled(struct smu_context *smu,
678                                 enum smu_feature_mask mask,
679                                 bool enable)
680 {
681         int feature_id;
682
683         feature_id = smu_cmn_to_asic_specific_index(smu,
684                                                     CMN2ASIC_MAPPING_FEATURE,
685                                                     mask);
686         if (feature_id < 0)
687                 return -EINVAL;
688
689         return smu_cmn_feature_update_enable_state(smu,
690                                                1ULL << feature_id,
691                                                enable);
692 }
693
694 #undef __SMU_DUMMY_MAP
695 #define __SMU_DUMMY_MAP(fea)    #fea
696 static const char *__smu_feature_names[] = {
697         SMU_FEATURE_MASKS
698 };
699
700 static const char *smu_get_feature_name(struct smu_context *smu,
701                                         enum smu_feature_mask feature)
702 {
703         if (feature < 0 || feature >= SMU_FEATURE_COUNT)
704                 return "unknown smu feature";
705         return __smu_feature_names[feature];
706 }
707
708 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
709                                    char *buf)
710 {
711         int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
712         uint64_t feature_mask;
713         int i, feature_index;
714         uint32_t count = 0;
715         size_t size = 0;
716
717         if (__smu_get_enabled_features(smu, &feature_mask))
718                 return 0;
719
720         size =  sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
721                         upper_32_bits(feature_mask), lower_32_bits(feature_mask));
722
723         memset(sort_feature, -1, sizeof(sort_feature));
724
725         for (i = 0; i < SMU_FEATURE_COUNT; i++) {
726                 feature_index = smu_cmn_to_asic_specific_index(smu,
727                                                                CMN2ASIC_MAPPING_FEATURE,
728                                                                i);
729                 if (feature_index < 0)
730                         continue;
731
732                 sort_feature[feature_index] = i;
733         }
734
735         size += sysfs_emit_at(buf, size, "%-2s. %-20s  %-3s : %-s\n",
736                         "No", "Feature", "Bit", "State");
737
738         for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) {
739                 if (sort_feature[feature_index] < 0)
740                         continue;
741
742                 size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
743                                 count++,
744                                 smu_get_feature_name(smu, sort_feature[feature_index]),
745                                 feature_index,
746                                 !!test_bit(feature_index, (unsigned long *)&feature_mask) ?
747                                 "enabled" : "disabled");
748         }
749
750         return size;
751 }
752
753 int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
754                                 uint64_t new_mask)
755 {
756         int ret = 0;
757         uint64_t feature_mask;
758         uint64_t feature_2_enabled = 0;
759         uint64_t feature_2_disabled = 0;
760
761         ret = __smu_get_enabled_features(smu, &feature_mask);
762         if (ret)
763                 return ret;
764
765         feature_2_enabled  = ~feature_mask & new_mask;
766         feature_2_disabled = feature_mask & ~new_mask;
767
768         if (feature_2_enabled) {
769                 ret = smu_cmn_feature_update_enable_state(smu,
770                                                           feature_2_enabled,
771                                                           true);
772                 if (ret)
773                         return ret;
774         }
775         if (feature_2_disabled) {
776                 ret = smu_cmn_feature_update_enable_state(smu,
777                                                           feature_2_disabled,
778                                                           false);
779                 if (ret)
780                         return ret;
781         }
782
783         return ret;
784 }
785
786 /**
787  * smu_cmn_disable_all_features_with_exception - disable all dpm features
788  *                                               except this specified by
789  *                                               @mask
790  *
791  * @smu:               smu_context pointer
792  * @mask:              the dpm feature which should not be disabled
793  *                     SMU_FEATURE_COUNT: no exception, all dpm features
794  *                     to disable
795  *
796  * Returns:
797  * 0 on success or a negative error code on failure.
798  */
799 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
800                                                 enum smu_feature_mask mask)
801 {
802         uint64_t features_to_disable = U64_MAX;
803         int skipped_feature_id;
804
805         if (mask != SMU_FEATURE_COUNT) {
806                 skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
807                                                                     CMN2ASIC_MAPPING_FEATURE,
808                                                                     mask);
809                 if (skipped_feature_id < 0)
810                         return -EINVAL;
811
812                 features_to_disable &= ~(1ULL << skipped_feature_id);
813         }
814
815         return smu_cmn_feature_update_enable_state(smu,
816                                                    features_to_disable,
817                                                    0);
818 }
819
820 int smu_cmn_get_smc_version(struct smu_context *smu,
821                             uint32_t *if_version,
822                             uint32_t *smu_version)
823 {
824         int ret = 0;
825
826         if (!if_version && !smu_version)
827                 return -EINVAL;
828
829         if (smu->smc_fw_if_version && smu->smc_fw_version)
830         {
831                 if (if_version)
832                         *if_version = smu->smc_fw_if_version;
833
834                 if (smu_version)
835                         *smu_version = smu->smc_fw_version;
836
837                 return 0;
838         }
839
840         if (if_version) {
841                 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
842                 if (ret)
843                         return ret;
844
845                 smu->smc_fw_if_version = *if_version;
846         }
847
848         if (smu_version) {
849                 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
850                 if (ret)
851                         return ret;
852
853                 smu->smc_fw_version = *smu_version;
854         }
855
856         return ret;
857 }
858
859 int smu_cmn_update_table(struct smu_context *smu,
860                          enum smu_table_id table_index,
861                          int argument,
862                          void *table_data,
863                          bool drv2smu)
864 {
865         struct smu_table_context *smu_table = &smu->smu_table;
866         struct amdgpu_device *adev = smu->adev;
867         struct smu_table *table = &smu_table->driver_table;
868         int table_id = smu_cmn_to_asic_specific_index(smu,
869                                                       CMN2ASIC_MAPPING_TABLE,
870                                                       table_index);
871         uint32_t table_size;
872         int ret = 0;
873         if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
874                 return -EINVAL;
875
876         table_size = smu_table->tables[table_index].size;
877
878         if (drv2smu) {
879                 memcpy(table->cpu_addr, table_data, table_size);
880                 /*
881                  * Flush hdp cache: to guard the content seen by
882                  * GPU is consitent with CPU.
883                  */
884                 amdgpu_asic_flush_hdp(adev, NULL);
885         }
886
887         ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
888                                           SMU_MSG_TransferTableDram2Smu :
889                                           SMU_MSG_TransferTableSmu2Dram,
890                                           table_id | ((argument & 0xFFFF) << 16),
891                                           NULL);
892         if (ret)
893                 return ret;
894
895         if (!drv2smu) {
896                 amdgpu_asic_invalidate_hdp(adev, NULL);
897                 memcpy(table_data, table->cpu_addr, table_size);
898         }
899
900         return 0;
901 }
902
903 int smu_cmn_write_watermarks_table(struct smu_context *smu)
904 {
905         void *watermarks_table = smu->smu_table.watermarks_table;
906
907         if (!watermarks_table)
908                 return -EINVAL;
909
910         return smu_cmn_update_table(smu,
911                                     SMU_TABLE_WATERMARKS,
912                                     0,
913                                     watermarks_table,
914                                     true);
915 }
916
917 int smu_cmn_write_pptable(struct smu_context *smu)
918 {
919         void *pptable = smu->smu_table.driver_pptable;
920
921         return smu_cmn_update_table(smu,
922                                     SMU_TABLE_PPTABLE,
923                                     0,
924                                     pptable,
925                                     true);
926 }
927
928 int smu_cmn_get_metrics_table(struct smu_context *smu,
929                               void *metrics_table,
930                               bool bypass_cache)
931 {
932         struct smu_table_context *smu_table = &smu->smu_table;
933         uint32_t table_size =
934                 smu_table->tables[SMU_TABLE_SMU_METRICS].size;
935         int ret = 0;
936
937         if (bypass_cache ||
938             !smu_table->metrics_time ||
939             time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
940                 ret = smu_cmn_update_table(smu,
941                                        SMU_TABLE_SMU_METRICS,
942                                        0,
943                                        smu_table->metrics_table,
944                                        false);
945                 if (ret) {
946                         dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
947                         return ret;
948                 }
949                 smu_table->metrics_time = jiffies;
950         }
951
952         if (metrics_table)
953                 memcpy(metrics_table, smu_table->metrics_table, table_size);
954
955         return 0;
956 }
957
958 int smu_cmn_get_combo_pptable(struct smu_context *smu)
959 {
960         void *pptable = smu->smu_table.combo_pptable;
961
962         return smu_cmn_update_table(smu,
963                                     SMU_TABLE_COMBO_PPTABLE,
964                                     0,
965                                     pptable,
966                                     false);
967 }
968
969 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
970 {
971         struct metrics_table_header *header = (struct metrics_table_header *)table;
972         uint16_t structure_size;
973
974 #define METRICS_VERSION(a, b)   ((a << 16) | b)
975
976         switch (METRICS_VERSION(frev, crev)) {
977         case METRICS_VERSION(1, 0):
978                 structure_size = sizeof(struct gpu_metrics_v1_0);
979                 break;
980         case METRICS_VERSION(1, 1):
981                 structure_size = sizeof(struct gpu_metrics_v1_1);
982                 break;
983         case METRICS_VERSION(1, 2):
984                 structure_size = sizeof(struct gpu_metrics_v1_2);
985                 break;
986         case METRICS_VERSION(1, 3):
987                 structure_size = sizeof(struct gpu_metrics_v1_3);
988                 break;
989         case METRICS_VERSION(2, 0):
990                 structure_size = sizeof(struct gpu_metrics_v2_0);
991                 break;
992         case METRICS_VERSION(2, 1):
993                 structure_size = sizeof(struct gpu_metrics_v2_1);
994                 break;
995         case METRICS_VERSION(2, 2):
996                 structure_size = sizeof(struct gpu_metrics_v2_2);
997                 break;
998         case METRICS_VERSION(2, 3):
999                 structure_size = sizeof(struct gpu_metrics_v2_3);
1000                 break;
1001         case METRICS_VERSION(2, 4):
1002                 structure_size = sizeof(struct gpu_metrics_v2_4);
1003                 break;
1004         default:
1005                 return;
1006         }
1007
1008 #undef METRICS_VERSION
1009
1010         memset(header, 0xFF, structure_size);
1011
1012         header->format_revision = frev;
1013         header->content_revision = crev;
1014         header->structure_size = structure_size;
1015
1016 }
1017
1018 int smu_cmn_set_mp1_state(struct smu_context *smu,
1019                           enum pp_mp1_state mp1_state)
1020 {
1021         enum smu_message_type msg;
1022         int ret;
1023
1024         switch (mp1_state) {
1025         case PP_MP1_STATE_SHUTDOWN:
1026                 msg = SMU_MSG_PrepareMp1ForShutdown;
1027                 break;
1028         case PP_MP1_STATE_UNLOAD:
1029                 msg = SMU_MSG_PrepareMp1ForUnload;
1030                 break;
1031         case PP_MP1_STATE_RESET:
1032                 msg = SMU_MSG_PrepareMp1ForReset;
1033                 break;
1034         case PP_MP1_STATE_NONE:
1035         default:
1036                 return 0;
1037         }
1038
1039         ret = smu_cmn_send_smc_msg(smu, msg, NULL);
1040         if (ret)
1041                 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1042
1043         return ret;
1044 }
1045
1046 bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
1047 {
1048         struct pci_dev *p = NULL;
1049         bool snd_driver_loaded;
1050
1051         /*
1052          * If the ASIC comes with no audio function, we always assume
1053          * it is "enabled".
1054          */
1055         p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
1056                         adev->pdev->bus->number, 1);
1057         if (!p)
1058                 return true;
1059
1060         snd_driver_loaded = pci_is_enabled(p) ? true : false;
1061
1062         pci_dev_put(p);
1063
1064         return snd_driver_loaded;
1065 }