1 // SPDX-License-Identifier: GPL-2.0
3 * AMD Platform Management Framework (PMF) Driver
5 * Copyright (c) 2022, Advanced Micro Devices, Inc.
8 * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
13 static struct amd_pmf_static_slider_granular config_store;
15 #ifdef CONFIG_AMD_PMF_DEBUG
16 static const char *slider_as_str(unsigned int state)
19 case POWER_MODE_PERFORMANCE:
21 case POWER_MODE_BALANCED_POWER:
22 return "BALANCED_POWER";
23 case POWER_MODE_POWER_SAVER:
26 return "Unknown Slider State";
30 static const char *source_as_str(unsigned int state)
38 return "Unknown Power State";
42 static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data)
46 pr_debug("Static Slider Data - BEGIN\n");
48 for (i = 0; i < POWER_SOURCE_MAX; i++) {
49 for (j = 0; j < POWER_MODE_MAX; j++) {
50 pr_debug("--- Source:%s Mode:%s ---\n", source_as_str(i), slider_as_str(j));
51 pr_debug("SPL: %u mW\n", data->prop[i][j].spl);
52 pr_debug("SPPT: %u mW\n", data->prop[i][j].sppt);
53 pr_debug("SPPT_ApuOnly: %u mW\n", data->prop[i][j].sppt_apu_only);
54 pr_debug("FPPT: %u mW\n", data->prop[i][j].fppt);
55 pr_debug("STTMinLimit: %u mW\n", data->prop[i][j].stt_min);
56 pr_debug("STT_SkinTempLimit_APU: %u C\n",
57 data->prop[i][j].stt_skin_temp[STT_TEMP_APU]);
58 pr_debug("STT_SkinTempLimit_HS2: %u C\n",
59 data->prop[i][j].stt_skin_temp[STT_TEMP_HS2]);
63 pr_debug("Static Slider Data - END\n");
66 static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data) {}
69 static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
71 struct apmf_static_slider_granular_output output;
74 memset(&config_store, 0, sizeof(config_store));
75 apmf_get_static_slider_granular(dev, &output);
77 for (i = 0; i < POWER_SOURCE_MAX; i++) {
78 for (j = 0; j < POWER_MODE_MAX; j++) {
79 config_store.prop[i][j].spl = output.prop[idx].spl;
80 config_store.prop[i][j].sppt = output.prop[idx].sppt;
81 config_store.prop[i][j].sppt_apu_only =
82 output.prop[idx].sppt_apu_only;
83 config_store.prop[i][j].fppt = output.prop[idx].fppt;
84 config_store.prop[i][j].stt_min = output.prop[idx].stt_min;
85 config_store.prop[i][j].stt_skin_temp[STT_TEMP_APU] =
86 output.prop[idx].stt_skin_temp[STT_TEMP_APU];
87 config_store.prop[i][j].stt_skin_temp[STT_TEMP_HS2] =
88 output.prop[idx].stt_skin_temp[STT_TEMP_HS2];
89 config_store.prop[i][j].fan_id = output.prop[idx].fan_id;
93 amd_pmf_dump_sps_defaults(&config_store);
96 void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
97 struct amd_pmf_static_slider_granular *table)
99 int src = amd_pmf_get_power_source();
101 if (op == SLIDER_OP_SET) {
102 amd_pmf_send_cmd(dev, SET_SPL, false, config_store.prop[src][idx].spl, NULL);
103 amd_pmf_send_cmd(dev, SET_FPPT, false, config_store.prop[src][idx].fppt, NULL);
104 amd_pmf_send_cmd(dev, SET_SPPT, false, config_store.prop[src][idx].sppt, NULL);
105 amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false,
106 config_store.prop[src][idx].sppt_apu_only, NULL);
107 amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
108 config_store.prop[src][idx].stt_min, NULL);
109 amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
110 config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU], NULL);
111 amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
112 config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2], NULL);
113 } else if (op == SLIDER_OP_GET) {
114 amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl);
115 amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt);
116 amd_pmf_send_cmd(dev, GET_SPPT, true, ARG_NONE, &table->prop[src][idx].sppt);
117 amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, true, ARG_NONE,
118 &table->prop[src][idx].sppt_apu_only);
119 amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, true, ARG_NONE,
120 &table->prop[src][idx].stt_min);
121 amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, true, ARG_NONE,
122 (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]);
123 amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, true, ARG_NONE,
124 (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]);
128 int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
132 mode = amd_pmf_get_pprof_modes(pmf);
136 amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
141 bool is_pprof_balanced(struct amd_pmf_dev *pmf)
143 return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
146 static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
147 enum platform_profile_option *profile)
149 struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
151 *profile = pmf->current_profile;
155 int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
159 switch (pmf->current_profile) {
160 case PLATFORM_PROFILE_PERFORMANCE:
161 mode = POWER_MODE_PERFORMANCE;
163 case PLATFORM_PROFILE_BALANCED:
164 mode = POWER_MODE_BALANCED_POWER;
166 case PLATFORM_PROFILE_LOW_POWER:
167 mode = POWER_MODE_POWER_SAVER;
170 dev_err(pmf->dev, "Unknown Platform Profile.\n");
177 static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
178 enum platform_profile_option profile)
180 struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
182 pmf->current_profile = profile;
184 return amd_pmf_set_sps_power_limits(pmf);
187 int amd_pmf_init_sps(struct amd_pmf_dev *dev)
191 dev->current_profile = PLATFORM_PROFILE_BALANCED;
192 amd_pmf_load_defaults_sps(dev);
194 /* update SPS balanced power mode thermals */
195 amd_pmf_set_sps_power_limits(dev);
197 dev->pprof.profile_get = amd_pmf_profile_get;
198 dev->pprof.profile_set = amd_pmf_profile_set;
200 /* Setup supported modes */
201 set_bit(PLATFORM_PROFILE_LOW_POWER, dev->pprof.choices);
202 set_bit(PLATFORM_PROFILE_BALANCED, dev->pprof.choices);
203 set_bit(PLATFORM_PROFILE_PERFORMANCE, dev->pprof.choices);
205 /* Create platform_profile structure and register */
206 err = platform_profile_register(&dev->pprof);
208 dev_err(dev->dev, "Failed to register SPS support, this is most likely an SBIOS bug: %d\n",
214 void amd_pmf_deinit_sps(struct amd_pmf_dev *dev)
216 platform_profile_remove();