1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
5 #include <linux/interconnect.h>
6 #include <linux/pm_domain.h>
7 #include <linux/pm_opp.h>
8 #include <soc/qcom/cmd-db.h>
9 #include <drm/drm_gem.h>
12 #include "a6xx_gmu.xml.h"
14 #include "msm_gpu_trace.h"
17 static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
19 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
20 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
21 struct msm_gpu *gpu = &adreno_gpu->base;
23 /* FIXME: add a banner here */
26 /* Turn off the hangcheck timer while we are resetting */
27 del_timer(&gpu->hangcheck_timer);
29 /* Queue the GPU handler because we need to treat this as a recovery */
30 kthread_queue_work(gpu->worker, &gpu->recover_work);
33 static irqreturn_t a6xx_gmu_irq(int irq, void *data)
35 struct a6xx_gmu *gmu = data;
38 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
39 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
41 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
42 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
47 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
48 dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
50 if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
51 dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
52 gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
57 static irqreturn_t a6xx_hfi_irq(int irq, void *data)
59 struct a6xx_gmu *gmu = data;
62 status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
63 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
65 if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
66 dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
74 bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
78 /* This can be called from gpu state code so make sure GMU is valid */
79 if (!gmu->initialized)
82 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
85 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
86 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
89 /* Check to see if the GX rail is still powered */
90 bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
94 /* This can be called from gpu state code so make sure GMU is valid */
95 if (!gmu->initialized)
98 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
101 (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
102 A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
105 void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
107 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
108 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
109 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
111 unsigned long gpu_freq;
114 gpu_freq = dev_pm_opp_get_freq(opp);
116 if (gpu_freq == gmu->freq)
119 for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++)
120 if (gpu_freq == gmu->gpu_freqs[perf_index])
123 gmu->current_perf_index = perf_index;
124 gmu->freq = gmu->gpu_freqs[perf_index];
126 trace_msm_gmu_freq_change(gmu->freq, perf_index);
129 * This can get called from devfreq while the hardware is idle. Don't
130 * bring up the power if it isn't already active
132 if (pm_runtime_get_if_in_use(gmu->dev) == 0)
136 a6xx_hfi_set_freq(gmu, perf_index);
137 dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
138 pm_runtime_put(gmu->dev);
142 gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
144 gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
145 ((3 & 0xf) << 28) | perf_index);
148 * Send an invalid index as a vote for the bus bandwidth and let the
149 * firmware decide on the right vote
151 gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
153 /* Set and clear the OOB for DCVS to trigger the GMU */
154 a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
155 a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
157 ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
159 dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
161 dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
162 pm_runtime_put(gmu->dev);
165 unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
167 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
168 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
169 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
174 static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
177 int local = gmu->idle_level;
179 /* SPTP and IFPC both report as IFPC */
180 if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
181 local = GMU_IDLE_STATE_IFPC;
183 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
186 if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
187 !a6xx_gmu_gx_is_on(gmu))
194 /* Wait for the GMU to get to its most idle state */
195 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
197 return spin_until(a6xx_gmu_check_idle_level(gmu));
200 static int a6xx_gmu_start(struct a6xx_gmu *gmu)
206 val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8);
207 if (val <= 0x20010004) {
209 reset_val = 0xbabeface;
215 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
217 /* Set the log wptr index
218 * note: downstream saves the value in poweroff and restores it here
220 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
222 gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
224 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
225 (val & mask) == reset_val, 100, 10000);
228 DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
233 static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
238 gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
240 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
241 val & 1, 100, 10000);
243 DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
248 struct a6xx_gmu_oob_bits {
249 int set, ack, set_new, ack_new, clear, clear_new;
253 /* These are the interrupt / ack bits for each OOB request that are set
254 * in a6xx_gmu_set_oob and a6xx_clear_oob
256 static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
257 [GMU_OOB_GPU_SET] = {
267 [GMU_OOB_PERFCOUNTER_SET] = {
268 .name = "PERFCOUNTER",
277 [GMU_OOB_BOOT_SLUMBER] = {
278 .name = "BOOT_SLUMBER",
284 [GMU_OOB_DCVS_SET] = {
292 /* Trigger a OOB (out of band) request to the GMU */
293 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
299 if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
303 request = a6xx_gmu_oob_bits[state].set;
304 ack = a6xx_gmu_oob_bits[state].ack;
306 request = a6xx_gmu_oob_bits[state].set_new;
307 ack = a6xx_gmu_oob_bits[state].ack_new;
308 if (!request || !ack) {
309 DRM_DEV_ERROR(gmu->dev,
310 "Invalid non-legacy GMU request %s\n",
311 a6xx_gmu_oob_bits[state].name);
316 /* Trigger the equested OOB operation */
317 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
319 /* Wait for the acknowledge interrupt */
320 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
321 val & (1 << ack), 100, 10000);
324 DRM_DEV_ERROR(gmu->dev,
325 "Timeout waiting for GMU OOB set %s: 0x%x\n",
326 a6xx_gmu_oob_bits[state].name,
327 gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
329 /* Clear the acknowledge interrupt */
330 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
335 /* Clear a pending OOB state in the GMU */
336 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
340 if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
344 bit = a6xx_gmu_oob_bits[state].clear;
346 bit = a6xx_gmu_oob_bits[state].clear_new;
348 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
351 /* Enable CPU control of SPTP power power collapse */
352 static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
360 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
362 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
363 (val & 0x38) == 0x28, 1, 100);
366 DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
367 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
373 /* Disable CPU control of SPTP power power collapse */
374 static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
382 /* Make sure retention is on */
383 gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
385 gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
387 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
388 (val & 0x04), 100, 10000);
391 DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
392 gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
395 /* Let the GMU know we are starting a boot sequence */
396 static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
400 /* Let the GMU know we are getting ready for boot */
401 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
403 /* Choose the "default" power level as the highest available */
404 vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
406 gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
407 gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
409 /* Let the GMU know the boot sequence has started */
410 return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
413 /* Let the GMU know that we are about to go into slumber */
414 static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
418 /* Disable the power counter so the GMU isn't busy */
419 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
421 /* Disable SPTP_PC if the CPU is responsible for it */
422 if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
423 a6xx_sptprac_disable(gmu);
426 ret = a6xx_hfi_send_prep_slumber(gmu);
430 /* Tell the GMU to get ready to slumber */
431 gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
433 ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
434 a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
437 /* Check to see if the GMU really did slumber */
438 if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
440 DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
446 /* Put fence into allow mode */
447 gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
451 static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
456 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
457 /* Wait for the register to finish posting */
460 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
461 val & (1 << 1), 100, 10000);
463 DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
467 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
471 DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
475 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
477 /* Set up CX GMU counter 0 to count busy ticks */
478 gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
479 gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
481 /* Enable the power counter */
482 gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
486 static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
491 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
493 ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
494 val, val & (1 << 16), 100, 10000);
496 DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
498 gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
501 static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
503 return msm_writel(value, ptr + (offset << 2));
506 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
509 static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
511 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
512 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
513 struct platform_device *pdev = to_platform_device(gmu->dev);
514 void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
515 void __iomem *seqptr;
516 uint32_t pdc_address_offset;
517 bool pdc_in_aop = false;
522 if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
524 else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
525 pdc_address_offset = 0x30090;
527 pdc_address_offset = 0x30080;
530 seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
535 /* Disable SDE clock gating */
536 gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
538 /* Setup RSC PDC handshake for sleep and wakeup */
539 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
540 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
541 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
542 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
543 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
544 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
545 gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
546 gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
547 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
548 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
549 gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
551 /* Load RSC sequencer uCode for sleep and wakeup */
552 if (adreno_is_a650_family(adreno_gpu)) {
553 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
554 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
555 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
556 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
557 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
559 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
560 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
561 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
562 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
563 gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
569 /* Load PDC sequencer uCode for power up and power down sequence */
570 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
571 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
572 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
573 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
574 pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
576 /* Set TCS commands used by PDC sequence for low power modes */
577 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
578 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
579 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
580 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
581 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
582 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
583 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
584 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
585 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
587 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
588 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
589 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
591 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
592 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
593 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
594 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
595 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
596 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
598 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
599 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
600 if (adreno_is_a618(adreno_gpu) || adreno_is_a650_family(adreno_gpu))
601 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
603 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
604 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
605 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
606 pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
610 pdc_write(pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
611 pdc_write(pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
613 /* ensure no writes happen before the uCode is fully written */
617 if (!IS_ERR_OR_NULL(pdcptr))
619 if (!IS_ERR_OR_NULL(seqptr))
624 * The lowest 16 bits of this value are the number of XO clock cycles for main
625 * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
626 * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
629 #define GMU_PWR_COL_HYST 0x000a1680
631 /* Set up the idle state for the GMU */
632 static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
634 /* Disable GMU WB/RB buffer */
635 gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
636 gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
637 gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
639 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
641 switch (gmu->idle_level) {
642 case GMU_IDLE_STATE_IFPC:
643 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
645 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
646 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
647 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
649 case GMU_IDLE_STATE_SPTP:
650 gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
652 gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
653 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
654 A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
657 /* Enable RPMh GPU client */
658 gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
659 A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
660 A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
661 A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
662 A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
663 A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
664 A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
667 struct block_header {
675 /* this should be a general kernel helper */
676 static int in_range(u32 addr, u32 start, u32 size)
678 return addr >= start && addr < start + size;
681 static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
683 if (!in_range(blk->addr, bo->iova, bo->size))
686 memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
690 static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
692 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
693 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
694 const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
695 const struct block_header *blk;
698 u32 itcm_base = 0x00000000;
699 u32 dtcm_base = 0x00040000;
701 if (adreno_is_a650_family(adreno_gpu))
702 dtcm_base = 0x10004000;
705 /* Sanity check the size of the firmware that was loaded */
706 if (fw_image->size > 0x8000) {
707 DRM_DEV_ERROR(gmu->dev,
708 "GMU firmware is bigger than the available region\n");
712 gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
713 (u32*) fw_image->data, fw_image->size);
718 for (blk = (const struct block_header *) fw_image->data;
719 (const u8*) blk < fw_image->data + fw_image->size;
720 blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
724 if (in_range(blk->addr, itcm_base, SZ_16K)) {
725 reg_offset = (blk->addr - itcm_base) >> 2;
727 REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
728 blk->data, blk->size);
729 } else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
730 reg_offset = (blk->addr - dtcm_base) >> 2;
732 REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
733 blk->data, blk->size);
734 } else if (!fw_block_mem(&gmu->icache, blk) &&
735 !fw_block_mem(&gmu->dcache, blk) &&
736 !fw_block_mem(&gmu->dummy, blk)) {
737 DRM_DEV_ERROR(gmu->dev,
738 "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
739 blk->addr, blk->size, blk->data[0]);
746 static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
748 static bool rpmh_init;
749 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
750 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
754 if (adreno_is_a650_family(adreno_gpu)) {
755 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
756 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
759 if (state == GMU_WARM_BOOT) {
760 ret = a6xx_rpmh_start(gmu);
764 if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
765 "GMU firmware is not loaded\n"))
768 /* Turn on register retention */
769 gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
771 /* We only need to load the RPMh microcode once */
773 a6xx_gmu_rpmh_init(gmu);
776 ret = a6xx_rpmh_start(gmu);
781 ret = a6xx_gmu_fw_load(gmu);
786 gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
787 gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
789 /* Write the iova of the HFI table */
790 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
791 gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
793 gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
794 (1 << 31) | (0xa << 18) | (0xa0));
796 chipid = adreno_gpu->rev.core << 24;
797 chipid |= adreno_gpu->rev.major << 16;
798 chipid |= adreno_gpu->rev.minor << 12;
799 chipid |= adreno_gpu->rev.patchid << 8;
801 gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
803 gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
804 gmu->log.iova | (gmu->log.size / SZ_4K - 1));
806 /* Set up the lowest idle level on the GMU */
807 a6xx_gmu_power_config(gmu);
809 ret = a6xx_gmu_start(gmu);
814 ret = a6xx_gmu_gfx_rail_on(gmu);
819 /* Enable SPTP_PC if the CPU is responsible for it */
820 if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
821 ret = a6xx_sptprac_enable(gmu);
826 ret = a6xx_gmu_hfi_start(gmu);
830 /* FIXME: Do we need this wmb() here? */
836 #define A6XX_HFI_IRQ_MASK \
837 (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
839 #define A6XX_GMU_IRQ_MASK \
840 (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
841 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
842 A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
844 static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
846 disable_irq(gmu->gmu_irq);
847 disable_irq(gmu->hfi_irq);
849 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
850 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
853 static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
857 /* Make sure there are no outstanding RPMh votes */
858 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
859 (val & 1), 100, 10000);
860 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
861 (val & 1), 100, 10000);
862 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
863 (val & 1), 100, 10000);
864 gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
865 (val & 1), 100, 1000);
868 /* Force the GMU off in case it isn't responsive */
869 static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
871 /* Flush all the queues */
874 /* Stop the interrupts */
875 a6xx_gmu_irq_disable(gmu);
877 /* Force off SPTP in case the GMU is managing it */
878 a6xx_sptprac_disable(gmu);
880 /* Make sure there are no outstanding RPMh votes */
881 a6xx_gmu_rpmh_off(gmu);
884 static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
886 struct dev_pm_opp *gpu_opp;
887 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
889 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
890 if (IS_ERR_OR_NULL(gpu_opp))
893 gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
894 a6xx_gmu_set_freq(gpu, gpu_opp);
895 dev_pm_opp_put(gpu_opp);
898 static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
900 struct dev_pm_opp *gpu_opp;
901 unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
903 gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
904 if (IS_ERR_OR_NULL(gpu_opp))
907 dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
908 dev_pm_opp_put(gpu_opp);
911 int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
913 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
914 struct msm_gpu *gpu = &adreno_gpu->base;
915 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
918 if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
923 /* Turn on the resources */
924 pm_runtime_get_sync(gmu->dev);
927 * "enable" the GX power domain which won't actually do anything but it
928 * will make sure that the refcounting is correct in case we need to
929 * bring down the GX after a GMU failure
931 if (!IS_ERR_OR_NULL(gmu->gxpd))
932 pm_runtime_get_sync(gmu->gxpd);
934 /* Use a known rate to bring up the GMU */
935 clk_set_rate(gmu->core_clk, 200000000);
936 clk_set_rate(gmu->hub_clk, 150000000);
937 ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
939 pm_runtime_put(gmu->gxpd);
940 pm_runtime_put(gmu->dev);
944 /* Set the bus quota to a reasonable value for boot */
945 a6xx_gmu_set_initial_bw(gpu, gmu);
947 /* Enable the GMU interrupt */
948 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
949 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
950 enable_irq(gmu->gmu_irq);
952 /* Check to see if we are doing a cold or warm boot */
953 status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
954 GMU_WARM_BOOT : GMU_COLD_BOOT;
957 * Warm boot path does not work on newer GPUs
958 * Presumably this is because icache/dcache regions must be restored
961 status = GMU_COLD_BOOT;
963 ret = a6xx_gmu_fw_start(gmu, status);
967 ret = a6xx_hfi_start(gmu, status);
972 * Turn on the GMU firmware fault interrupt after we know the boot
973 * sequence is successful
975 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
976 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
977 enable_irq(gmu->hfi_irq);
979 /* Set the GPU to the current freq */
980 a6xx_gmu_set_initial_freq(gpu, gmu);
983 /* On failure, shut down the GMU to leave it in a good state */
985 disable_irq(gmu->gmu_irq);
987 pm_runtime_put(gmu->gxpd);
988 pm_runtime_put(gmu->dev);
994 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
998 if (!gmu->initialized)
1001 reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
1003 if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
1009 #define GBIF_CLIENT_HALT_MASK BIT(0)
1010 #define GBIF_ARB_HALT_MASK BIT(1)
1012 static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
1014 struct msm_gpu *gpu = &adreno_gpu->base;
1016 if (!a6xx_has_gbif(adreno_gpu)) {
1017 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
1018 spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
1020 gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
1025 /* Halt new client requests on GBIF */
1026 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
1027 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
1028 (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
1030 /* Halt all AXI requests on GBIF */
1031 gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
1032 spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
1033 (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
1035 /* The GBIF halt needs to be explicitly cleared */
1036 gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
1039 /* Gracefully try to shut down the GMU and by extension the GPU */
1040 static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
1042 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1043 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1047 * The GMU may still be in slumber unless the GPU started so check and
1048 * skip putting it back into slumber if so
1050 val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
1053 int ret = a6xx_gmu_wait_for_idle(gmu);
1055 /* If the GMU isn't responding assume it is hung */
1057 a6xx_gmu_force_off(gmu);
1061 a6xx_bus_clear_pending_transactions(adreno_gpu);
1063 /* tell the GMU we want to slumber */
1064 a6xx_gmu_notify_slumber(gmu);
1066 ret = gmu_poll_timeout(gmu,
1067 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
1068 !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
1072 * Let the user know we failed to slumber but don't worry too
1073 * much because we are powering down anyway
1077 DRM_DEV_ERROR(gmu->dev,
1078 "Unable to slumber GMU: status = 0%x/0%x\n",
1080 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
1082 REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
1088 /* Stop the interrupts and mask the hardware */
1089 a6xx_gmu_irq_disable(gmu);
1091 /* Tell RPMh to power off the GPU */
1092 a6xx_rpmh_stop(gmu);
1096 int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
1098 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1099 struct msm_gpu *gpu = &a6xx_gpu->base.base;
1101 if (!pm_runtime_active(gmu->dev))
1105 * Force the GMU off if we detected a hang, otherwise try to shut it
1109 a6xx_gmu_force_off(gmu);
1111 a6xx_gmu_shutdown(gmu);
1113 /* Remove the bus vote */
1114 dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
1117 * Make sure the GX domain is off before turning off the GMU (CX)
1118 * domain. Usually the GMU does this but only if the shutdown sequence
1121 if (!IS_ERR_OR_NULL(gmu->gxpd))
1122 pm_runtime_put_sync(gmu->gxpd);
1124 clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
1126 pm_runtime_put_sync(gmu->dev);
1131 static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
1133 msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace);
1134 msm_gem_kernel_put(gmu->debug.obj, gmu->aspace);
1135 msm_gem_kernel_put(gmu->icache.obj, gmu->aspace);
1136 msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace);
1137 msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace);
1138 msm_gem_kernel_put(gmu->log.obj, gmu->aspace);
1140 gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
1141 msm_gem_address_space_put(gmu->aspace);
1144 static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
1145 size_t size, u64 iova)
1147 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1148 struct drm_device *dev = a6xx_gpu->base.base.dev;
1149 uint32_t flags = MSM_BO_WC;
1150 u64 range_start, range_end;
1153 size = PAGE_ALIGN(size);
1155 /* no fixed address - use GMU's uncached range */
1156 range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
1157 range_end = 0x80000000;
1159 /* range for fixed address */
1161 range_end = iova + size;
1162 /* use IOMMU_PRIV for icache/dcache */
1163 flags |= MSM_BO_MAP_PRIV;
1166 bo->obj = msm_gem_new(dev, size, flags);
1167 if (IS_ERR(bo->obj))
1168 return PTR_ERR(bo->obj);
1170 ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
1171 range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT);
1173 drm_gem_object_put(bo->obj);
1177 bo->virt = msm_gem_get_vaddr(bo->obj);
1183 static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
1185 struct iommu_domain *domain;
1186 struct msm_mmu *mmu;
1188 domain = iommu_domain_alloc(&platform_bus_type);
1192 mmu = msm_iommu_new(gmu->dev, domain);
1193 gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
1194 if (IS_ERR(gmu->aspace)) {
1195 iommu_domain_free(domain);
1196 return PTR_ERR(gmu->aspace);
1202 /* Return the 'arc-level' for the given frequency */
1203 static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
1206 struct dev_pm_opp *opp;
1212 opp = dev_pm_opp_find_freq_exact(dev, freq, true);
1216 val = dev_pm_opp_get_level(opp);
1218 dev_pm_opp_put(opp);
1223 static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
1224 unsigned long *freqs, int freqs_count, const char *id)
1227 const u16 *pri, *sec;
1228 size_t pri_count, sec_count;
1230 pri = cmd_db_read_aux_data(id, &pri_count);
1232 return PTR_ERR(pri);
1234 * The data comes back as an array of unsigned shorts so adjust the
1241 sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
1243 return PTR_ERR(sec);
1249 /* Construct a vote for each frequency */
1250 for (i = 0; i < freqs_count; i++) {
1251 u8 pindex = 0, sindex = 0;
1252 unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
1254 /* Get the primary index that matches the arc level */
1255 for (j = 0; j < pri_count; j++) {
1256 if (pri[j] >= level) {
1262 if (j == pri_count) {
1264 "Level %u not found in the RPMh list\n",
1266 DRM_DEV_ERROR(dev, "Available levels:\n");
1267 for (j = 0; j < pri_count; j++)
1268 DRM_DEV_ERROR(dev, " %u\n", pri[j]);
1274 * Look for a level in in the secondary list that matches. If
1275 * nothing fits, use the maximum non zero vote
1278 for (j = 0; j < sec_count; j++) {
1279 if (sec[j] >= level) {
1282 } else if (sec[j]) {
1287 /* Construct the vote */
1288 votes[i] = ((pri[pindex] & 0xffff) << 16) |
1289 (sindex << 8) | pindex;
1296 * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
1297 * to construct the list of votes on the CPU and send it over. Query the RPMh
1298 * voltage levels and build the votes
1301 static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
1303 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1304 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1305 struct msm_gpu *gpu = &adreno_gpu->base;
1308 /* Build the GX votes */
1309 ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
1310 gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
1312 /* Build the CX votes */
1313 ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
1314 gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
1319 static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
1322 int count = dev_pm_opp_get_opp_count(dev);
1323 struct dev_pm_opp *opp;
1325 unsigned long freq = 1;
1328 * The OPP table doesn't contain the "off" frequency level so we need to
1329 * add 1 to the table size to account for it
1332 if (WARN(count + 1 > size,
1333 "The GMU frequency table is being truncated\n"))
1336 /* Set the "off" frequency */
1339 for (i = 0; i < count; i++) {
1340 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
1344 dev_pm_opp_put(opp);
1345 freqs[index++] = freq++;
1351 static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
1353 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
1354 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1355 struct msm_gpu *gpu = &adreno_gpu->base;
1360 * The GMU handles its own frequency switching so build a list of
1361 * available frequencies to send during initialization
1363 ret = devm_pm_opp_of_add_table(gmu->dev);
1365 DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
1369 gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
1370 gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
1373 * The GMU also handles GPU frequency switching so build a list
1374 * from the GPU OPP table
1376 gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
1377 gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
1379 gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
1381 /* Build the list of RPMh votes that we'll send to the GMU */
1382 return a6xx_gmu_rpmh_votes_init(gmu);
1385 static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
1387 int ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
1392 gmu->nr_clocks = ret;
1394 gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
1395 gmu->nr_clocks, "gmu");
1397 gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks,
1398 gmu->nr_clocks, "hub");
1403 static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
1407 struct resource *res = platform_get_resource_byname(pdev,
1408 IORESOURCE_MEM, name);
1411 DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
1412 return ERR_PTR(-EINVAL);
1415 ret = ioremap(res->start, resource_size(res));
1417 DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
1418 return ERR_PTR(-EINVAL);
1424 static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
1425 const char *name, irq_handler_t handler)
1429 irq = platform_get_irq_byname(pdev, name);
1431 ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
1433 DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
1443 void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1445 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1446 struct platform_device *pdev = to_platform_device(gmu->dev);
1448 if (!gmu->initialized)
1451 pm_runtime_force_suspend(gmu->dev);
1453 if (!IS_ERR_OR_NULL(gmu->gxpd)) {
1454 pm_runtime_disable(gmu->gxpd);
1455 dev_pm_domain_detach(gmu->gxpd, false);
1459 if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1464 a6xx_gmu_memory_free(gmu);
1466 free_irq(gmu->gmu_irq, gmu);
1467 free_irq(gmu->hfi_irq, gmu);
1469 /* Drop reference taken in of_find_device_by_node */
1470 put_device(gmu->dev);
1472 gmu->initialized = false;
1475 int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1477 struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
1478 struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
1479 struct platform_device *pdev = of_find_device_by_node(node);
1485 gmu->dev = &pdev->dev;
1487 of_dma_configure(gmu->dev, node, true);
1489 /* Fow now, don't do anything fancy until we get our feet under us */
1490 gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
1492 pm_runtime_enable(gmu->dev);
1494 /* Get the list of clocks */
1495 ret = a6xx_gmu_clocks_probe(gmu);
1497 goto err_put_device;
1499 ret = a6xx_gmu_memory_probe(gmu);
1501 goto err_put_device;
1504 /* A660 now requires handling "prealloc requests" in GMU firmware
1505 * For now just hardcode allocations based on the known firmware.
1506 * note: there is no indication that these correspond to "dummy" or
1507 * "debug" regions, but this "guess" allows reusing these BOs which
1508 * are otherwise unused by a660.
1510 gmu->dummy.size = SZ_4K;
1511 if (adreno_is_a660_family(adreno_gpu)) {
1512 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000);
1516 gmu->dummy.size = SZ_8K;
1519 /* Allocate memory for the GMU dummy page */
1520 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, 0x60000000);
1524 if (adreno_is_a650_family(adreno_gpu)) {
1525 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1526 SZ_16M - SZ_16K, 0x04000);
1529 } else if (adreno_is_a640_family(adreno_gpu)) {
1530 ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
1531 SZ_256K - SZ_16K, 0x04000);
1535 ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
1536 SZ_256K - SZ_16K, 0x44000);
1540 /* HFI v1, has sptprac */
1543 /* Allocate memory for the GMU debug region */
1544 ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0);
1549 /* Allocate memory for for the HFI queues */
1550 ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0);
1554 /* Allocate memory for the GMU log region */
1555 ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0);
1559 /* Map the GMU registers */
1560 gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1561 if (IS_ERR(gmu->mmio)) {
1562 ret = PTR_ERR(gmu->mmio);
1566 if (adreno_is_a650_family(adreno_gpu)) {
1567 gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
1568 if (IS_ERR(gmu->rscc))
1571 gmu->rscc = gmu->mmio + 0x23000;
1574 /* Get the HFI and GMU interrupts */
1575 gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
1576 gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
1578 if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
1582 * Get a link to the GX power domain to reset the GPU in case of GMU
1585 gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
1587 /* Get the power levels for the GMU and GPU */
1588 a6xx_gmu_pwrlevels_probe(gmu);
1590 /* Set up the HFI queues */
1593 gmu->initialized = true;
1599 if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
1601 free_irq(gmu->gmu_irq, gmu);
1602 free_irq(gmu->hfi_irq, gmu);
1607 a6xx_gmu_memory_free(gmu);
1609 /* Drop reference taken in of_find_device_by_node */
1610 put_device(gmu->dev);