2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/delay.h>
26 #include "dm_services.h"
27 #include "dcn20/dcn20_hubbub.h"
28 #include "dcn21_hubbub.h"
29 #include "reg_helper.h"
34 hubbub1->base.ctx->logger
39 #define FN(reg_name, field_name) \
40 hubbub1->shifts->field_name, hubbub1->masks->field_name
49 #define FN(reg_name, field_name) \
50 hubbub1->shifts->field_name, hubbub1->masks->field_name
52 static uint32_t convert_and_clamp(
58 ret_val = wm_ns * refclk_mhz;
61 if (ret_val > clamp_value)
62 ret_val = clamp_value;
67 void dcn21_dchvm_init(struct hubbub *hubbub)
69 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
70 uint32_t riommu_active, prefetch_done;
73 REG_GET(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, &prefetch_done);
76 hubbub->riommu_active = true;
80 REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
82 //Poll until RIOMMU_ACTIVE = 1
83 for (i = 0; i < 100; i++) {
84 REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
93 //Reflect the power status of DCHUBBUB
94 REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
96 //Start rIOMMU prefetching
97 REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
99 // Enable dynamic clock gating
100 REG_UPDATE_4(DCHVM_CLK_CTRL,
101 HVM_DISPCLK_R_GATE_DIS, 0,
102 HVM_DISPCLK_G_GATE_DIS, 0,
103 HVM_DCFCLK_R_GATE_DIS, 0,
104 HVM_DCFCLK_G_GATE_DIS, 0);
106 //Poll until HOSTVM_PREFETCH_DONE = 1
107 REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
109 hubbub->riommu_active = true;
113 int hubbub21_init_dchub(struct hubbub *hubbub,
114 struct dcn_hubbub_phys_addr_config *pa_config)
116 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
117 struct dcn_vmid_page_table_config phys_config;
119 REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
120 FB_BASE, pa_config->system_aperture.fb_base >> 24);
121 REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
122 FB_TOP, pa_config->system_aperture.fb_top >> 24);
123 REG_SET(DCN_VM_FB_OFFSET, 0,
124 FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
125 REG_SET(DCN_VM_AGP_BOT, 0,
126 AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
127 REG_SET(DCN_VM_AGP_TOP, 0,
128 AGP_TOP, pa_config->system_aperture.agp_top >> 24);
129 REG_SET(DCN_VM_AGP_BASE, 0,
130 AGP_BASE, pa_config->system_aperture.agp_base >> 24);
132 if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
133 phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
134 phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
135 phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack
136 phys_config.depth = 0;
137 phys_config.block_size = 0;
138 // Init VMID 0 based on PA config
139 dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
142 dcn21_dchvm_init(hubbub);
144 return hubbub1->num_vmid;
147 bool hubbub21_program_urgent_watermarks(
148 struct hubbub *hubbub,
149 struct dcn_watermark_set *watermarks,
150 unsigned int refclk_mhz,
153 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
154 uint32_t prog_wm_value;
155 bool wm_pending = false;
157 /* Repeat for water mark set A, B, C and D. */
159 if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) {
160 hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
161 prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
162 refclk_mhz, 0x1fffff);
163 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
164 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value,
165 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value);
167 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
168 "HW register value = 0x%x\n",
169 watermarks->a.urgent_ns, prog_wm_value);
170 } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
173 /* determine the transfer time for a quantity of data for a particular requestor.*/
174 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
175 > hubbub1->watermarks.a.frac_urg_bw_flip) {
176 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
178 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
179 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
180 } else if (watermarks->a.frac_urg_bw_flip
181 < hubbub1->watermarks.a.frac_urg_bw_flip)
184 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
185 > hubbub1->watermarks.a.frac_urg_bw_nom) {
186 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
188 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
189 DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
190 } else if (watermarks->a.frac_urg_bw_nom
191 < hubbub1->watermarks.a.frac_urg_bw_nom)
194 if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
195 hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
196 prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
197 refclk_mhz, 0x1fffff);
198 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
199 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
200 } else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns)
204 if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
205 hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
206 prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
207 refclk_mhz, 0x1fffff);
208 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
209 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value,
210 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, prog_wm_value);
212 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
213 "HW register value = 0x%x\n",
214 watermarks->b.urgent_ns, prog_wm_value);
215 } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
218 /* determine the transfer time for a quantity of data for a particular requestor.*/
219 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
220 > hubbub1->watermarks.a.frac_urg_bw_flip) {
221 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
223 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
224 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
225 } else if (watermarks->a.frac_urg_bw_flip
226 < hubbub1->watermarks.a.frac_urg_bw_flip)
229 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
230 > hubbub1->watermarks.a.frac_urg_bw_nom) {
231 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
233 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
234 DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
235 } else if (watermarks->a.frac_urg_bw_nom
236 < hubbub1->watermarks.a.frac_urg_bw_nom)
239 if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
240 hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
241 prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
242 refclk_mhz, 0x1fffff);
243 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
244 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
245 } else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns)
249 if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
250 hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
251 prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
252 refclk_mhz, 0x1fffff);
253 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
254 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value,
255 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, prog_wm_value);
257 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
258 "HW register value = 0x%x\n",
259 watermarks->c.urgent_ns, prog_wm_value);
260 } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
263 /* determine the transfer time for a quantity of data for a particular requestor.*/
264 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
265 > hubbub1->watermarks.a.frac_urg_bw_flip) {
266 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
268 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
269 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
270 } else if (watermarks->a.frac_urg_bw_flip
271 < hubbub1->watermarks.a.frac_urg_bw_flip)
274 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
275 > hubbub1->watermarks.a.frac_urg_bw_nom) {
276 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
278 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
279 DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
280 } else if (watermarks->a.frac_urg_bw_nom
281 < hubbub1->watermarks.a.frac_urg_bw_nom)
284 if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
285 hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
286 prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
287 refclk_mhz, 0x1fffff);
288 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
289 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
290 } else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns)
294 if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
295 hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
296 prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
297 refclk_mhz, 0x1fffff);
298 REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
299 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value,
300 DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, prog_wm_value);
302 DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
303 "HW register value = 0x%x\n",
304 watermarks->d.urgent_ns, prog_wm_value);
305 } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
308 /* determine the transfer time for a quantity of data for a particular requestor.*/
309 if (safe_to_lower || watermarks->a.frac_urg_bw_flip
310 > hubbub1->watermarks.a.frac_urg_bw_flip) {
311 hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip;
313 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
314 DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
315 } else if (watermarks->a.frac_urg_bw_flip
316 < hubbub1->watermarks.a.frac_urg_bw_flip)
319 if (safe_to_lower || watermarks->a.frac_urg_bw_nom
320 > hubbub1->watermarks.a.frac_urg_bw_nom) {
321 hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom;
323 REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
324 DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
325 } else if (watermarks->a.frac_urg_bw_nom
326 < hubbub1->watermarks.a.frac_urg_bw_nom)
329 if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
330 hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
331 prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
332 refclk_mhz, 0x1fffff);
333 REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
334 DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
335 } else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns)
341 bool hubbub21_program_stutter_watermarks(
342 struct hubbub *hubbub,
343 struct dcn_watermark_set *watermarks,
344 unsigned int refclk_mhz,
347 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
348 uint32_t prog_wm_value;
349 bool wm_pending = false;
352 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
353 > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) {
354 hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
355 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
356 prog_wm_value = convert_and_clamp(
357 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
358 refclk_mhz, 0x1fffff);
359 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
360 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value,
361 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
362 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
363 "HW register value = 0x%x\n",
364 watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
365 } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
366 < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
369 if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
370 > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
371 hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns =
372 watermarks->a.cstate_pstate.cstate_exit_ns;
373 prog_wm_value = convert_and_clamp(
374 watermarks->a.cstate_pstate.cstate_exit_ns,
375 refclk_mhz, 0x1fffff);
376 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
377 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value,
378 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
379 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
380 "HW register value = 0x%x\n",
381 watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
382 } else if (watermarks->a.cstate_pstate.cstate_exit_ns
383 < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
387 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
388 > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) {
389 hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
390 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
391 prog_wm_value = convert_and_clamp(
392 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
393 refclk_mhz, 0x1fffff);
394 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
395 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value,
396 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
397 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
398 "HW register value = 0x%x\n",
399 watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
400 } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
401 < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
404 if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
405 > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
406 hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns =
407 watermarks->b.cstate_pstate.cstate_exit_ns;
408 prog_wm_value = convert_and_clamp(
409 watermarks->b.cstate_pstate.cstate_exit_ns,
410 refclk_mhz, 0x1fffff);
411 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
412 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value,
413 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
414 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
415 "HW register value = 0x%x\n",
416 watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
417 } else if (watermarks->b.cstate_pstate.cstate_exit_ns
418 < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
422 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
423 > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) {
424 hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
425 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
426 prog_wm_value = convert_and_clamp(
427 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
428 refclk_mhz, 0x1fffff);
429 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
430 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value,
431 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
432 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
433 "HW register value = 0x%x\n",
434 watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
435 } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
436 < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
439 if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
440 > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
441 hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns =
442 watermarks->c.cstate_pstate.cstate_exit_ns;
443 prog_wm_value = convert_and_clamp(
444 watermarks->c.cstate_pstate.cstate_exit_ns,
445 refclk_mhz, 0x1fffff);
446 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
447 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value,
448 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
449 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
450 "HW register value = 0x%x\n",
451 watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
452 } else if (watermarks->c.cstate_pstate.cstate_exit_ns
453 < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
457 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
458 > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) {
459 hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
460 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
461 prog_wm_value = convert_and_clamp(
462 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
463 refclk_mhz, 0x1fffff);
464 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
465 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value,
466 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
467 DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
468 "HW register value = 0x%x\n",
469 watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
470 } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
471 < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
474 if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
475 > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
476 hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns =
477 watermarks->d.cstate_pstate.cstate_exit_ns;
478 prog_wm_value = convert_and_clamp(
479 watermarks->d.cstate_pstate.cstate_exit_ns,
480 refclk_mhz, 0x1fffff);
481 REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
482 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value,
483 DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
484 DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
485 "HW register value = 0x%x\n",
486 watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
487 } else if (watermarks->d.cstate_pstate.cstate_exit_ns
488 < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
494 bool hubbub21_program_pstate_watermarks(
495 struct hubbub *hubbub,
496 struct dcn_watermark_set *watermarks,
497 unsigned int refclk_mhz,
500 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
501 uint32_t prog_wm_value;
503 bool wm_pending = false;
506 if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
507 > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
508 hubbub1->watermarks.a.cstate_pstate.pstate_change_ns =
509 watermarks->a.cstate_pstate.pstate_change_ns;
510 prog_wm_value = convert_and_clamp(
511 watermarks->a.cstate_pstate.pstate_change_ns,
512 refclk_mhz, 0x1fffff);
513 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
514 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value,
515 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
516 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
517 "HW register value = 0x%x\n\n",
518 watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
519 } else if (watermarks->a.cstate_pstate.pstate_change_ns
520 < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
524 if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
525 > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) {
526 hubbub1->watermarks.b.cstate_pstate.pstate_change_ns =
527 watermarks->b.cstate_pstate.pstate_change_ns;
528 prog_wm_value = convert_and_clamp(
529 watermarks->b.cstate_pstate.pstate_change_ns,
530 refclk_mhz, 0x1fffff);
531 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
532 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value,
533 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
534 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
535 "HW register value = 0x%x\n\n",
536 watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
537 } else if (watermarks->b.cstate_pstate.pstate_change_ns
538 < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
542 if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
543 > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) {
544 hubbub1->watermarks.c.cstate_pstate.pstate_change_ns =
545 watermarks->c.cstate_pstate.pstate_change_ns;
546 prog_wm_value = convert_and_clamp(
547 watermarks->c.cstate_pstate.pstate_change_ns,
548 refclk_mhz, 0x1fffff);
549 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
550 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value,
551 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
552 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
553 "HW register value = 0x%x\n\n",
554 watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
555 } else if (watermarks->c.cstate_pstate.pstate_change_ns
556 < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
560 if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
561 > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) {
562 hubbub1->watermarks.d.cstate_pstate.pstate_change_ns =
563 watermarks->d.cstate_pstate.pstate_change_ns;
564 prog_wm_value = convert_and_clamp(
565 watermarks->d.cstate_pstate.pstate_change_ns,
566 refclk_mhz, 0x1fffff);
567 REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
568 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value,
569 DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
570 DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
571 "HW register value = 0x%x\n\n",
572 watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
573 } else if (watermarks->d.cstate_pstate.pstate_change_ns
574 < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
580 bool hubbub21_program_watermarks(
581 struct hubbub *hubbub,
582 struct dcn_watermark_set *watermarks,
583 unsigned int refclk_mhz,
586 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
587 bool wm_pending = false;
589 if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
592 if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
595 if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
599 * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
600 * If the memory controller is fully utilized and the DCHub requestors are
601 * well ahead of their amortized schedule, then it is safe to prevent the next winner
602 * from being committed and sent to the fabric.
603 * The utilization of the memory controller is approximated by ensuring that
604 * the number of outstanding requests is greater than a threshold specified
605 * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule,
606 * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles.
608 * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF)
609 * to turn off it for now.
611 REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
612 DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
613 REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
614 DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF,
615 DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);
616 REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL,
617 DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
619 hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
624 void hubbub21_wm_read_state(struct hubbub *hubbub,
625 struct dcn_hubbub_wm *wm)
627 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
628 struct dcn_hubbub_wm_set *s;
630 memset(wm, 0, sizeof(struct dcn_hubbub_wm));
634 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A,
635 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent);
637 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A,
638 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter);
640 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A,
641 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit);
643 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A,
644 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_chanage);
648 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B,
649 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent);
651 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B,
652 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter);
654 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B,
655 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit);
657 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B,
658 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_chanage);
662 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C,
663 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent);
665 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C,
666 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter);
668 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C,
669 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit);
671 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C,
672 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_chanage);
676 REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D,
677 DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent);
679 REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D,
680 DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter);
682 REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D,
683 DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit);
685 REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D,
686 DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);
689 static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)
691 struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
692 uint32_t prog_wm_value;
694 prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
695 REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
698 static const struct hubbub_funcs hubbub21_funcs = {
699 .update_dchub = hubbub2_update_dchub,
700 .init_dchub_sys_ctx = hubbub21_init_dchub,
701 .init_vm_ctx = hubbub2_init_vm_ctx,
702 .dcc_support_swizzle = hubbub2_dcc_support_swizzle,
703 .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
704 .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
705 .wm_read_state = hubbub21_wm_read_state,
706 .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
707 .program_watermarks = hubbub21_program_watermarks,
708 .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
709 .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
710 .hubbub_read_state = hubbub2_read_state,
713 void hubbub21_construct(struct dcn20_hubbub *hubbub,
714 struct dc_context *ctx,
715 const struct dcn_hubbub_registers *hubbub_regs,
716 const struct dcn_hubbub_shift *hubbub_shift,
717 const struct dcn_hubbub_mask *hubbub_mask)
719 hubbub->base.ctx = ctx;
721 hubbub->base.funcs = &hubbub21_funcs;
723 hubbub->regs = hubbub_regs;
724 hubbub->shifts = hubbub_shift;
725 hubbub->masks = hubbub_mask;
727 hubbub->debug_test_index_pstate = 0xB;
728 hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */