Merge tag 'drm-msm-fixes-2023-03-09' of https://gitlab.freedesktop.org/drm/msm into...
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / msm / adreno / a5xx_gpu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
3  */
4
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/cpumask.h>
8 #include <linux/firmware/qcom/qcom_scm.h>
9 #include <linux/pm_opp.h>
10 #include <linux/nvmem-consumer.h>
11 #include <linux/slab.h>
12 #include "msm_gem.h"
13 #include "msm_mmu.h"
14 #include "a5xx_gpu.h"
15
16 extern bool hang_debug;
17 static void a5xx_dump(struct msm_gpu *gpu);
18
19 #define GPU_PAS_ID 13
20
21 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
22 {
23         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
24         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
25
26         if (a5xx_gpu->has_whereami) {
27                 OUT_PKT7(ring, CP_WHERE_AM_I, 2);
28                 OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
29                 OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
30         }
31 }
32
33 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
34                 bool sync)
35 {
36         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
37         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
38         uint32_t wptr;
39         unsigned long flags;
40
41         /*
42          * Most flush operations need to issue a WHERE_AM_I opcode to sync up
43          * the rptr shadow
44          */
45         if (sync)
46                 update_shadow_rptr(gpu, ring);
47
48         spin_lock_irqsave(&ring->preempt_lock, flags);
49
50         /* Copy the shadow to the actual register */
51         ring->cur = ring->next;
52
53         /* Make sure to wrap wptr if we need to */
54         wptr = get_wptr(ring);
55
56         spin_unlock_irqrestore(&ring->preempt_lock, flags);
57
58         /* Make sure everything is posted before making a decision */
59         mb();
60
61         /* Update HW if this is the current ring and we are not in preempt */
62         if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
63                 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
64 }
65
66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
67 {
68         struct msm_ringbuffer *ring = submit->ring;
69         struct msm_gem_object *obj;
70         uint32_t *ptr, dwords;
71         unsigned int i;
72
73         for (i = 0; i < submit->nr_cmds; i++) {
74                 switch (submit->cmd[i].type) {
75                 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
76                         break;
77                 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
78                         if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
79                                 break;
80                         fallthrough;
81                 case MSM_SUBMIT_CMD_BUF:
82                         /* copy commands into RB: */
83                         obj = submit->bos[submit->cmd[i].idx].obj;
84                         dwords = submit->cmd[i].size;
85
86                         ptr = msm_gem_get_vaddr(&obj->base);
87
88                         /* _get_vaddr() shouldn't fail at this point,
89                          * since we've already mapped it once in
90                          * submit_reloc()
91                          */
92                         if (WARN_ON(!ptr))
93                                 return;
94
95                         for (i = 0; i < dwords; i++) {
96                                 /* normally the OUT_PKTn() would wait
97                                  * for space for the packet.  But since
98                                  * we just OUT_RING() the whole thing,
99                                  * need to call adreno_wait_ring()
100                                  * ourself:
101                                  */
102                                 adreno_wait_ring(ring, 1);
103                                 OUT_RING(ring, ptr[i]);
104                         }
105
106                         msm_gem_put_vaddr(&obj->base);
107
108                         break;
109                 }
110         }
111
112         a5xx_flush(gpu, ring, true);
113         a5xx_preempt_trigger(gpu);
114
115         /* we might not necessarily have a cmd from userspace to
116          * trigger an event to know that submit has completed, so
117          * do this manually:
118          */
119         a5xx_idle(gpu, ring);
120         ring->memptrs->fence = submit->seqno;
121         msm_gpu_retire(gpu);
122 }
123
124 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
125 {
126         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
127         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
128         struct msm_ringbuffer *ring = submit->ring;
129         unsigned int i, ibs = 0;
130
131         if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
132                 gpu->cur_ctx_seqno = 0;
133                 a5xx_submit_in_rb(gpu, submit);
134                 return;
135         }
136
137         OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
138         OUT_RING(ring, 0x02);
139
140         /* Turn off protected mode to write to special registers */
141         OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
142         OUT_RING(ring, 0);
143
144         /* Set the save preemption record for the ring/command */
145         OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
146         OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
147         OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id]));
148
149         /* Turn back on protected mode */
150         OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
151         OUT_RING(ring, 1);
152
153         /* Enable local preemption for finegrain preemption */
154         OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
155         OUT_RING(ring, 0x1);
156
157         /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
158         OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
159         OUT_RING(ring, 0x02);
160
161         /* Submit the commands */
162         for (i = 0; i < submit->nr_cmds; i++) {
163                 switch (submit->cmd[i].type) {
164                 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
165                         break;
166                 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
167                         if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
168                                 break;
169                         fallthrough;
170                 case MSM_SUBMIT_CMD_BUF:
171                         OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
172                         OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
173                         OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
174                         OUT_RING(ring, submit->cmd[i].size);
175                         ibs++;
176                         break;
177                 }
178
179                 /*
180                  * Periodically update shadow-wptr if needed, so that we
181                  * can see partial progress of submits with large # of
182                  * cmds.. otherwise we could needlessly stall waiting for
183                  * ringbuffer state, simply due to looking at a shadow
184                  * rptr value that has not been updated
185                  */
186                 if ((ibs % 32) == 0)
187                         update_shadow_rptr(gpu, ring);
188         }
189
190         /*
191          * Write the render mode to NULL (0) to indicate to the CP that the IBs
192          * are done rendering - otherwise a lucky preemption would start
193          * replaying from the last checkpoint
194          */
195         OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
196         OUT_RING(ring, 0);
197         OUT_RING(ring, 0);
198         OUT_RING(ring, 0);
199         OUT_RING(ring, 0);
200         OUT_RING(ring, 0);
201
202         /* Turn off IB level preemptions */
203         OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
204         OUT_RING(ring, 0x01);
205
206         /* Write the fence to the scratch register */
207         OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
208         OUT_RING(ring, submit->seqno);
209
210         /*
211          * Execute a CACHE_FLUSH_TS event. This will ensure that the
212          * timestamp is written to the memory and then triggers the interrupt
213          */
214         OUT_PKT7(ring, CP_EVENT_WRITE, 4);
215         OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS) |
216                 CP_EVENT_WRITE_0_IRQ);
217         OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
218         OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
219         OUT_RING(ring, submit->seqno);
220
221         /* Yield the floor on command completion */
222         OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
223         /*
224          * If dword[2:1] are non zero, they specify an address for the CP to
225          * write the value of dword[3] to on preemption complete. Write 0 to
226          * skip the write
227          */
228         OUT_RING(ring, 0x00);
229         OUT_RING(ring, 0x00);
230         /* Data value - not used if the address above is 0 */
231         OUT_RING(ring, 0x01);
232         /* Set bit 0 to trigger an interrupt on preempt complete */
233         OUT_RING(ring, 0x01);
234
235         /* A WHERE_AM_I packet is not needed after a YIELD */
236         a5xx_flush(gpu, ring, false);
237
238         /* Check to see if we need to start preemption */
239         a5xx_preempt_trigger(gpu);
240 }
241
242 static const struct adreno_five_hwcg_regs {
243         u32 offset;
244         u32 value;
245 } a5xx_hwcg[] = {
246         {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
247         {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
248         {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
249         {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
250         {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
251         {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
252         {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
253         {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
254         {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
255         {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
256         {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
257         {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
258         {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
259         {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
260         {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
261         {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
262         {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
263         {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
264         {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
265         {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
266         {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
267         {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
268         {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
269         {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
270         {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
271         {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
272         {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
273         {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
274         {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
275         {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
276         {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
277         {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
278         {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
279         {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
280         {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
281         {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
282         {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
283         {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
284         {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
285         {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
286         {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
287         {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
288         {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
289         {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
290         {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
291         {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
292         {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
293         {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
294         {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
295         {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
296         {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
297         {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
298         {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
299         {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
300         {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
301         {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
302         {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
303         {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
304         {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
305         {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
306         {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
307         {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
308         {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
309         {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
310         {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
311         {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
312         {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
313         {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
314         {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
315         {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
316         {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
317         {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
318         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
319         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
320         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
321         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
322         {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
323         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
324         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
325         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
326         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
327         {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
328         {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
329         {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
330         {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
331         {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
332         {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
333         {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
334         {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
335         {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
336         {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
337         {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
338 }, a50x_hwcg[] = {
339         {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
340         {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
341         {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
342         {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
343         {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
344         {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
345         {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
346         {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
347         {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
348         {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
349         {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
350         {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
351         {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
352         {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
353         {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
354         {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
355         {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
356         {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00FFFFF4},
357         {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
358         {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
359         {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
360         {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
361         {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
362         {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
363         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
364         {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
365         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
366         {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
367         {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
368         {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
369         {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
370         {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
371         {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
372         {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
373         {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
374         {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
375         {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
376         {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
377 }, a512_hwcg[] = {
378         {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
379         {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
380         {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
381         {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
382         {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
383         {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
384         {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
385         {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
386         {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
387         {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
388         {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
389         {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
390         {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
391         {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
392         {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
393         {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
394         {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
395         {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
396         {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
397         {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
398         {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
399         {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
400         {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
401         {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
402         {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
403         {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
404         {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
405         {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
406         {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
407         {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
408         {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
409         {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
410         {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
411         {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
412         {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
413         {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
414         {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
415         {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
416         {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
417         {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
418         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
419         {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
420         {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
421         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
422         {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
423         {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
424         {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
425         {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
426         {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
427         {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
428         {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
429         {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
430         {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
431         {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
432         {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
433         {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
434 };
435
436 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
437 {
438         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
439         const struct adreno_five_hwcg_regs *regs;
440         unsigned int i, sz;
441
442         if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) {
443                 regs = a50x_hwcg;
444                 sz = ARRAY_SIZE(a50x_hwcg);
445         } else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) {
446                 regs = a512_hwcg;
447                 sz = ARRAY_SIZE(a512_hwcg);
448         } else {
449                 regs = a5xx_hwcg;
450                 sz = ARRAY_SIZE(a5xx_hwcg);
451         }
452
453         for (i = 0; i < sz; i++)
454                 gpu_write(gpu, regs[i].offset,
455                           state ? regs[i].value : 0);
456
457         if (adreno_is_a540(adreno_gpu)) {
458                 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU, state ? 0x00000770 : 0);
459                 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU, state ? 0x00000004 : 0);
460         }
461
462         gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
463         gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
464 }
465
466 static int a5xx_me_init(struct msm_gpu *gpu)
467 {
468         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
469         struct msm_ringbuffer *ring = gpu->rb[0];
470
471         OUT_PKT7(ring, CP_ME_INIT, 8);
472
473         OUT_RING(ring, 0x0000002F);
474
475         /* Enable multiple hardware contexts */
476         OUT_RING(ring, 0x00000003);
477
478         /* Enable error detection */
479         OUT_RING(ring, 0x20000000);
480
481         /* Don't enable header dump */
482         OUT_RING(ring, 0x00000000);
483         OUT_RING(ring, 0x00000000);
484
485         /* Specify workarounds for various microcode issues */
486         if (adreno_is_a506(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
487                 /* Workaround for token end syncs
488                  * Force a WFI after every direct-render 3D mode draw and every
489                  * 2D mode 3 draw
490                  */
491                 OUT_RING(ring, 0x0000000B);
492         } else if (adreno_is_a510(adreno_gpu)) {
493                 /* Workaround for token and syncs */
494                 OUT_RING(ring, 0x00000001);
495         } else {
496                 /* No workarounds enabled */
497                 OUT_RING(ring, 0x00000000);
498         }
499
500         OUT_RING(ring, 0x00000000);
501         OUT_RING(ring, 0x00000000);
502
503         a5xx_flush(gpu, ring, true);
504         return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
505 }
506
507 static int a5xx_preempt_start(struct msm_gpu *gpu)
508 {
509         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
510         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
511         struct msm_ringbuffer *ring = gpu->rb[0];
512
513         if (gpu->nr_rings == 1)
514                 return 0;
515
516         /* Turn off protected mode to write to special registers */
517         OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
518         OUT_RING(ring, 0);
519
520         /* Set the save preemption record for the ring/command */
521         OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
522         OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
523         OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
524
525         /* Turn back on protected mode */
526         OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
527         OUT_RING(ring, 1);
528
529         OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
530         OUT_RING(ring, 0x00);
531
532         OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
533         OUT_RING(ring, 0x01);
534
535         OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
536         OUT_RING(ring, 0x01);
537
538         /* Yield the floor on command completion */
539         OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
540         OUT_RING(ring, 0x00);
541         OUT_RING(ring, 0x00);
542         OUT_RING(ring, 0x01);
543         OUT_RING(ring, 0x01);
544
545         /* The WHERE_AMI_I packet is not needed after a YIELD is issued */
546         a5xx_flush(gpu, ring, false);
547
548         return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
549 }
550
551 static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
552                 struct drm_gem_object *obj)
553 {
554         u32 *buf = msm_gem_get_vaddr(obj);
555
556         if (IS_ERR(buf))
557                 return;
558
559         /*
560          * If the lowest nibble is 0xa that is an indication that this microcode
561          * has been patched. The actual version is in dword [3] but we only care
562          * about the patchlevel which is the lowest nibble of dword [3]
563          */
564         if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
565                 a5xx_gpu->has_whereami = true;
566
567         msm_gem_put_vaddr(obj);
568 }
569
570 static int a5xx_ucode_init(struct msm_gpu *gpu)
571 {
572         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
573         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
574         int ret;
575
576         if (!a5xx_gpu->pm4_bo) {
577                 a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
578                         adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
579
580
581                 if (IS_ERR(a5xx_gpu->pm4_bo)) {
582                         ret = PTR_ERR(a5xx_gpu->pm4_bo);
583                         a5xx_gpu->pm4_bo = NULL;
584                         DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
585                                 ret);
586                         return ret;
587                 }
588
589                 msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
590         }
591
592         if (!a5xx_gpu->pfp_bo) {
593                 a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu,
594                         adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova);
595
596                 if (IS_ERR(a5xx_gpu->pfp_bo)) {
597                         ret = PTR_ERR(a5xx_gpu->pfp_bo);
598                         a5xx_gpu->pfp_bo = NULL;
599                         DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
600                                 ret);
601                         return ret;
602                 }
603
604                 msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
605                 a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
606         }
607
608         gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova);
609
610         gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova);
611
612         return 0;
613 }
614
615 #define SCM_GPU_ZAP_SHADER_RESUME 0
616
617 static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
618 {
619         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
620         int ret;
621
622         /*
623          * Adreno 506 have CPZ Retention feature and doesn't require
624          * to resume zap shader
625          */
626         if (adreno_is_a506(adreno_gpu))
627                 return 0;
628
629         ret = qcom_scm_set_remote_state(SCM_GPU_ZAP_SHADER_RESUME, GPU_PAS_ID);
630         if (ret)
631                 DRM_ERROR("%s: zap-shader resume failed: %d\n",
632                         gpu->name, ret);
633
634         return ret;
635 }
636
637 static int a5xx_zap_shader_init(struct msm_gpu *gpu)
638 {
639         static bool loaded;
640         int ret;
641
642         /*
643          * If the zap shader is already loaded into memory we just need to kick
644          * the remote processor to reinitialize it
645          */
646         if (loaded)
647                 return a5xx_zap_shader_resume(gpu);
648
649         ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
650
651         loaded = !ret;
652         return ret;
653 }
654
655 #define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
656           A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
657           A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
658           A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
659           A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
660           A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
661           A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
662           A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
663           A5XX_RBBM_INT_0_MASK_CP_SW | \
664           A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
665           A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
666           A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
667
668 static int a5xx_hw_init(struct msm_gpu *gpu)
669 {
670         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
671         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
672         u32 regbit;
673         int ret;
674
675         gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
676
677         if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
678             adreno_is_a540(adreno_gpu))
679                 gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
680
681         /* Make all blocks contribute to the GPU BUSY perf counter */
682         gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
683
684         /* Enable RBBM error reporting bits */
685         gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
686
687         if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
688                 /*
689                  * Mask out the activity signals from RB1-3 to avoid false
690                  * positives
691                  */
692
693                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
694                         0xF0000000);
695                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
696                         0xFFFFFFFF);
697                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
698                         0xFFFFFFFF);
699                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
700                         0xFFFFFFFF);
701                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
702                         0xFFFFFFFF);
703                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
704                         0xFFFFFFFF);
705                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
706                         0xFFFFFFFF);
707                 gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
708                         0xFFFFFFFF);
709         }
710
711         /* Enable fault detection */
712         gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
713                 (1 << 30) | 0xFFFF);
714
715         /* Turn on performance counters */
716         gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
717
718         /* Select CP0 to always count cycles */
719         gpu_write(gpu, REG_A5XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
720
721         /* Select RBBM0 to countable 6 to get the busy status for devfreq */
722         gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
723
724         /* Increase VFD cache access so LRZ and other data gets evicted less */
725         gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
726
727         /* Disable L2 bypass in the UCHE */
728         gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
729         gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
730         gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
731         gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
732
733         /* Set the GMEM VA range (0 to gpu->gmem) */
734         gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
735         gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00000000);
736         gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
737                 0x00100000 + adreno_gpu->gmem - 1);
738         gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
739
740         if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
741             adreno_is_a510(adreno_gpu)) {
742                 gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
743                 if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
744                         gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
745                 else
746                         gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
747                 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x40000030);
748                 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x20100D0A);
749         } else {
750                 gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
751                 if (adreno_is_a530(adreno_gpu))
752                         gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
753                 else
754                         gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
755                 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
756                 gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
757         }
758
759         if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
760                 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
761                           (0x100 << 11 | 0x100 << 22));
762         else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
763                  adreno_is_a512(adreno_gpu))
764                 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
765                           (0x200 << 11 | 0x200 << 22));
766         else
767                 gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
768                           (0x400 << 11 | 0x300 << 22));
769
770         if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
771                 gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
772
773         /*
774          * Disable the RB sampler datapath DP2 clock gating optimization
775          * for 1-SP GPUs, as it is enabled by default.
776          */
777         if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
778             adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu))
779                 gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9));
780
781         /* Disable UCHE global filter as SP can invalidate/flush independently */
782         gpu_write(gpu, REG_A5XX_UCHE_MODE_CNTL, BIT(29));
783
784         /* Enable USE_RETENTION_FLOPS */
785         gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
786
787         /* Enable ME/PFP split notification */
788         gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
789
790         /*
791          *  In A5x, CCU can send context_done event of a particular context to
792          *  UCHE which ultimately reaches CP even when there is valid
793          *  transaction of that context inside CCU. This can let CP to program
794          *  config registers, which will make the "valid transaction" inside
795          *  CCU to be interpreted differently. This can cause gpu fault. This
796          *  bug is fixed in latest A510 revision. To enable this bug fix -
797          *  bit[11] of RB_DBG_ECO_CNTL need to be set to 0, default is 1
798          *  (disable). For older A510 version this bit is unused.
799          */
800         if (adreno_is_a510(adreno_gpu))
801                 gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, (1 << 11), 0);
802
803         /* Enable HWCG */
804         a5xx_set_hwcg(gpu, true);
805
806         gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
807
808         /* Set the highest bank bit */
809         if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu))
810                 regbit = 2;
811         else
812                 regbit = 1;
813
814         gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, regbit << 7);
815         gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, regbit << 1);
816
817         if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu) ||
818             adreno_is_a540(adreno_gpu))
819                 gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2, regbit);
820
821         /* Disable All flat shading optimization (ALLFLATOPTDIS) */
822         gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, (1 << 10));
823
824         /* Protect registers from the CP */
825         gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
826
827         /* RBBM */
828         gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
829         gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
830         gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
831         gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
832         gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
833         gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
834
835         /* Content protect */
836         gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
837                 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
838                         16));
839         gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
840                 ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
841
842         /* CP */
843         gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
844         gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
845         gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
846         gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
847
848         /* RB */
849         gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
850         gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
851
852         /* VPC */
853         gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
854         gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 16));
855
856         /* UCHE */
857         gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
858
859         /* SMMU */
860         gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
861                         ADRENO_PROTECT_RW(0x10000, 0x8000));
862
863         gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
864         /*
865          * Disable the trusted memory range - we don't actually supported secure
866          * memory rendering at this point in time and we don't want to block off
867          * part of the virtual memory space.
868          */
869         gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 0x00000000);
870         gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
871
872         /* Put the GPU into 64 bit by default */
873         gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
874         gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
875         gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
876         gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
877         gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
878         gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
879         gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
880         gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
881         gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
882         gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
883         gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
884         gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
885
886         /*
887          * VPC corner case with local memory load kill leads to corrupt
888          * internal state. Normal Disable does not work for all a5x chips.
889          * So do the following setting to disable it.
890          */
891         if (adreno_gpu->info->quirks & ADRENO_QUIRK_LMLOADKILL_DISABLE) {
892                 gpu_rmw(gpu, REG_A5XX_VPC_DBG_ECO_CNTL, 0, BIT(23));
893                 gpu_rmw(gpu, REG_A5XX_HLSQ_DBG_ECO_CNTL, BIT(18), 0);
894         }
895
896         ret = adreno_hw_init(gpu);
897         if (ret)
898                 return ret;
899
900         if (adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))
901                 a5xx_gpmu_ucode_init(gpu);
902
903         ret = a5xx_ucode_init(gpu);
904         if (ret)
905                 return ret;
906
907         /* Set the ringbuffer address */
908         gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova);
909
910         /*
911          * If the microcode supports the WHERE_AM_I opcode then we can use that
912          * in lieu of the RPTR shadow and enable preemption. Otherwise, we
913          * can't safely use the RPTR shadow or preemption. In either case, the
914          * RPTR shadow should be disabled in hardware.
915          */
916         gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
917                 MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
918
919         /* Create a privileged buffer for the RPTR shadow */
920         if (a5xx_gpu->has_whereami) {
921                 if (!a5xx_gpu->shadow_bo) {
922                         a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
923                                 sizeof(u32) * gpu->nr_rings,
924                                 MSM_BO_WC | MSM_BO_MAP_PRIV,
925                                 gpu->aspace, &a5xx_gpu->shadow_bo,
926                                 &a5xx_gpu->shadow_iova);
927
928                         if (IS_ERR(a5xx_gpu->shadow))
929                                 return PTR_ERR(a5xx_gpu->shadow);
930
931                         msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow");
932                 }
933
934                 gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
935                             shadowptr(a5xx_gpu, gpu->rb[0]));
936         } else if (gpu->nr_rings > 1) {
937                 /* Disable preemption if WHERE_AM_I isn't available */
938                 a5xx_preempt_fini(gpu);
939                 gpu->nr_rings = 1;
940         }
941
942         a5xx_preempt_hw_init(gpu);
943
944         /* Disable the interrupts through the initial bringup stage */
945         gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
946
947         /* Clear ME_HALT to start the micro engine */
948         gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
949         ret = a5xx_me_init(gpu);
950         if (ret)
951                 return ret;
952
953         ret = a5xx_power_init(gpu);
954         if (ret)
955                 return ret;
956
957         /*
958          * Send a pipeline event stat to get misbehaving counters to start
959          * ticking correctly
960          */
961         if (adreno_is_a530(adreno_gpu)) {
962                 OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
963                 OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
964
965                 a5xx_flush(gpu, gpu->rb[0], true);
966                 if (!a5xx_idle(gpu, gpu->rb[0]))
967                         return -EINVAL;
968         }
969
970         /*
971          * If the chip that we are using does support loading one, then
972          * try to load a zap shader into the secure world. If successful
973          * we can use the CP to switch out of secure mode. If not then we
974          * have no resource but to try to switch ourselves out manually. If we
975          * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
976          * be blocked and a permissions violation will soon follow.
977          */
978         ret = a5xx_zap_shader_init(gpu);
979         if (!ret) {
980                 OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
981                 OUT_RING(gpu->rb[0], 0x00000000);
982
983                 a5xx_flush(gpu, gpu->rb[0], true);
984                 if (!a5xx_idle(gpu, gpu->rb[0]))
985                         return -EINVAL;
986         } else if (ret == -ENODEV) {
987                 /*
988                  * This device does not use zap shader (but print a warning
989                  * just in case someone got their dt wrong.. hopefully they
990                  * have a debug UART to realize the error of their ways...
991                  * if you mess this up you are about to crash horribly)
992                  */
993                 dev_warn_once(gpu->dev->dev,
994                         "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
995                 gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
996         } else {
997                 return ret;
998         }
999
1000         /* Last step - yield the ringbuffer */
1001         a5xx_preempt_start(gpu);
1002
1003         return 0;
1004 }
1005
1006 static void a5xx_recover(struct msm_gpu *gpu)
1007 {
1008         int i;
1009
1010         adreno_dump_info(gpu);
1011
1012         for (i = 0; i < 8; i++) {
1013                 printk("CP_SCRATCH_REG%d: %u\n", i,
1014                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(i)));
1015         }
1016
1017         if (hang_debug)
1018                 a5xx_dump(gpu);
1019
1020         gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
1021         gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
1022         gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
1023         adreno_recover(gpu);
1024 }
1025
1026 static void a5xx_destroy(struct msm_gpu *gpu)
1027 {
1028         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1029         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1030
1031         DBG("%s", gpu->name);
1032
1033         a5xx_preempt_fini(gpu);
1034
1035         if (a5xx_gpu->pm4_bo) {
1036                 msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
1037                 drm_gem_object_put(a5xx_gpu->pm4_bo);
1038         }
1039
1040         if (a5xx_gpu->pfp_bo) {
1041                 msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
1042                 drm_gem_object_put(a5xx_gpu->pfp_bo);
1043         }
1044
1045         if (a5xx_gpu->gpmu_bo) {
1046                 msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
1047                 drm_gem_object_put(a5xx_gpu->gpmu_bo);
1048         }
1049
1050         if (a5xx_gpu->shadow_bo) {
1051                 msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
1052                 drm_gem_object_put(a5xx_gpu->shadow_bo);
1053         }
1054
1055         adreno_gpu_cleanup(adreno_gpu);
1056         kfree(a5xx_gpu);
1057 }
1058
1059 static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
1060 {
1061         if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
1062                 return false;
1063
1064         /*
1065          * Nearly every abnormality ends up pausing the GPU and triggering a
1066          * fault so we can safely just watch for this one interrupt to fire
1067          */
1068         return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
1069                 A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
1070 }
1071
1072 bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
1073 {
1074         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1075         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1076
1077         if (ring != a5xx_gpu->cur_ring) {
1078                 WARN(1, "Tried to idle a non-current ringbuffer\n");
1079                 return false;
1080         }
1081
1082         /* wait for CP to drain ringbuffer: */
1083         if (!adreno_idle(gpu, ring))
1084                 return false;
1085
1086         if (spin_until(_a5xx_check_idle(gpu))) {
1087                 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
1088                         gpu->name, __builtin_return_address(0),
1089                         gpu_read(gpu, REG_A5XX_RBBM_STATUS),
1090                         gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS),
1091                         gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
1092                         gpu_read(gpu, REG_A5XX_CP_RB_WPTR));
1093                 return false;
1094         }
1095
1096         return true;
1097 }
1098
1099 static int a5xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
1100 {
1101         struct msm_gpu *gpu = arg;
1102         pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
1103                         iova, flags,
1104                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)),
1105                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)),
1106                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)),
1107                         gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7)));
1108
1109         return 0;
1110 }
1111
1112 static void a5xx_cp_err_irq(struct msm_gpu *gpu)
1113 {
1114         u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
1115
1116         if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
1117                 u32 val;
1118
1119                 gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
1120
1121                 /*
1122                  * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
1123                  * read it twice
1124                  */
1125
1126                 gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
1127                 val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
1128
1129                 dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
1130                         val);
1131         }
1132
1133         if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
1134                 dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
1135                         gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
1136
1137         if (status & A5XX_CP_INT_CP_DMA_ERROR)
1138                 dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
1139
1140         if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
1141                 u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
1142
1143                 dev_err_ratelimited(gpu->dev->dev,
1144                         "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
1145                         val & (1 << 24) ? "WRITE" : "READ",
1146                         (val & 0xFFFFF) >> 2, val);
1147         }
1148
1149         if (status & A5XX_CP_INT_CP_AHB_ERROR) {
1150                 u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
1151                 const char *access[16] = { "reserved", "reserved",
1152                         "timestamp lo", "timestamp hi", "pfp read", "pfp write",
1153                         "", "", "me read", "me write", "", "", "crashdump read",
1154                         "crashdump write" };
1155
1156                 dev_err_ratelimited(gpu->dev->dev,
1157                         "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
1158                         status & 0xFFFFF, access[(status >> 24) & 0xF],
1159                         (status & (1 << 31)), status);
1160         }
1161 }
1162
1163 static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
1164 {
1165         if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
1166                 u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
1167
1168                 dev_err_ratelimited(gpu->dev->dev,
1169                         "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
1170                         val & (1 << 28) ? "WRITE" : "READ",
1171                         (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
1172                         (val >> 24) & 0xF);
1173
1174                 /* Clear the error */
1175                 gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
1176
1177                 /* Clear the interrupt */
1178                 gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
1179                         A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
1180         }
1181
1182         if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
1183                 dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
1184
1185         if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
1186                 dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
1187                         gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
1188
1189         if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
1190                 dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
1191                         gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
1192
1193         if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
1194                 dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
1195                         gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
1196
1197         if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
1198                 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
1199
1200         if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
1201                 dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
1202 }
1203
1204 static void a5xx_uche_err_irq(struct msm_gpu *gpu)
1205 {
1206         uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
1207
1208         addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
1209
1210         dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
1211                 addr);
1212 }
1213
1214 static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
1215 {
1216         dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
1217 }
1218
1219 static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
1220 {
1221         struct drm_device *dev = gpu->dev;
1222         struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
1223
1224         /*
1225          * If stalled on SMMU fault, we could trip the GPU's hang detection,
1226          * but the fault handler will trigger the devcore dump, and we want
1227          * to otherwise resume normally rather than killing the submit, so
1228          * just bail.
1229          */
1230         if (gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24))
1231                 return;
1232
1233         DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
1234                 ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
1235                 gpu_read(gpu, REG_A5XX_RBBM_STATUS),
1236                 gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
1237                 gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
1238                 gpu_read64(gpu, REG_A5XX_CP_IB1_BASE),
1239                 gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
1240                 gpu_read64(gpu, REG_A5XX_CP_IB2_BASE),
1241                 gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
1242
1243         /* Turn off the hangcheck timer to keep it from bothering us */
1244         del_timer(&gpu->hangcheck_timer);
1245
1246         kthread_queue_work(gpu->worker, &gpu->recover_work);
1247 }
1248
1249 #define RBBM_ERROR_MASK \
1250         (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
1251         A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
1252         A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
1253         A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
1254         A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
1255         A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
1256
1257 static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
1258 {
1259         struct msm_drm_private *priv = gpu->dev->dev_private;
1260         u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
1261
1262         /*
1263          * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
1264          * before the source is cleared the interrupt will storm.
1265          */
1266         gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
1267                 status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
1268
1269         if (priv->disable_err_irq) {
1270                 status &= A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS |
1271                           A5XX_RBBM_INT_0_MASK_CP_SW;
1272         }
1273
1274         /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
1275         if (status & RBBM_ERROR_MASK)
1276                 a5xx_rbbm_err_irq(gpu, status);
1277
1278         if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
1279                 a5xx_cp_err_irq(gpu);
1280
1281         if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
1282                 a5xx_fault_detect_irq(gpu);
1283
1284         if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
1285                 a5xx_uche_err_irq(gpu);
1286
1287         if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
1288                 a5xx_gpmu_err_irq(gpu);
1289
1290         if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
1291                 a5xx_preempt_trigger(gpu);
1292                 msm_gpu_retire(gpu);
1293         }
1294
1295         if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
1296                 a5xx_preempt_irq(gpu);
1297
1298         return IRQ_HANDLED;
1299 }
1300
1301 static const u32 a5xx_registers[] = {
1302         0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
1303         0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
1304         0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
1305         0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
1306         0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
1307         0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
1308         0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
1309         0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
1310         0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
1311         0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
1312         0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
1313         0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
1314         0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
1315         0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
1316         0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
1317         0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
1318         0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
1319         0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
1320         0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
1321         0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
1322         0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
1323         0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
1324         0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
1325         0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
1326         0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
1327         0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D,
1328         0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5,
1329         0xAC60, 0xAC60, ~0,
1330 };
1331
1332 static void a5xx_dump(struct msm_gpu *gpu)
1333 {
1334         DRM_DEV_INFO(gpu->dev->dev, "status:   %08x\n",
1335                 gpu_read(gpu, REG_A5XX_RBBM_STATUS));
1336         adreno_dump(gpu);
1337 }
1338
1339 static int a5xx_pm_resume(struct msm_gpu *gpu)
1340 {
1341         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1342         int ret;
1343
1344         /* Turn on the core power */
1345         ret = msm_gpu_pm_resume(gpu);
1346         if (ret)
1347                 return ret;
1348
1349         /* Adreno 506, 508, 509, 510, 512 needs manual RBBM sus/res control */
1350         if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) {
1351                 /* Halt the sp_input_clk at HM level */
1352                 gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
1353                 a5xx_set_hwcg(gpu, true);
1354                 /* Turn on sp_input_clk at HM level */
1355                 gpu_rmw(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xff, 0);
1356                 return 0;
1357         }
1358
1359         /* Turn the RBCCU domain first to limit the chances of voltage droop */
1360         gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
1361
1362         /* Wait 3 usecs before polling */
1363         udelay(3);
1364
1365         ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
1366                 (1 << 20), (1 << 20));
1367         if (ret) {
1368                 DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
1369                         gpu->name,
1370                         gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
1371                 return ret;
1372         }
1373
1374         /* Turn on the SP domain */
1375         gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
1376         ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
1377                 (1 << 20), (1 << 20));
1378         if (ret)
1379                 DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
1380                         gpu->name);
1381
1382         return ret;
1383 }
1384
1385 static int a5xx_pm_suspend(struct msm_gpu *gpu)
1386 {
1387         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1388         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1389         u32 mask = 0xf;
1390         int i, ret;
1391
1392         /* A506, A508, A510 have 3 XIN ports in VBIF */
1393         if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
1394             adreno_is_a510(adreno_gpu))
1395                 mask = 0x7;
1396
1397         /* Clear the VBIF pipe before shutting down */
1398         gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, mask);
1399         spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) &
1400                                 mask) == mask);
1401
1402         gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
1403
1404         /*
1405          * Reset the VBIF before power collapse to avoid issue with FIFO
1406          * entries on Adreno A510 and A530 (the others will tend to lock up)
1407          */
1408         if (adreno_is_a510(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
1409                 gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
1410                 gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
1411         }
1412
1413         ret = msm_gpu_pm_suspend(gpu);
1414         if (ret)
1415                 return ret;
1416
1417         if (a5xx_gpu->has_whereami)
1418                 for (i = 0; i < gpu->nr_rings; i++)
1419                         a5xx_gpu->shadow[i] = 0;
1420
1421         return 0;
1422 }
1423
1424 static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
1425 {
1426         *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO);
1427
1428         return 0;
1429 }
1430
1431 struct a5xx_crashdumper {
1432         void *ptr;
1433         struct drm_gem_object *bo;
1434         u64 iova;
1435 };
1436
1437 struct a5xx_gpu_state {
1438         struct msm_gpu_state base;
1439         u32 *hlsqregs;
1440 };
1441
1442 static int a5xx_crashdumper_init(struct msm_gpu *gpu,
1443                 struct a5xx_crashdumper *dumper)
1444 {
1445         dumper->ptr = msm_gem_kernel_new(gpu->dev,
1446                 SZ_1M, MSM_BO_WC, gpu->aspace,
1447                 &dumper->bo, &dumper->iova);
1448
1449         if (!IS_ERR(dumper->ptr))
1450                 msm_gem_object_set_name(dumper->bo, "crashdump");
1451
1452         return PTR_ERR_OR_ZERO(dumper->ptr);
1453 }
1454
1455 static int a5xx_crashdumper_run(struct msm_gpu *gpu,
1456                 struct a5xx_crashdumper *dumper)
1457 {
1458         u32 val;
1459
1460         if (IS_ERR_OR_NULL(dumper->ptr))
1461                 return -EINVAL;
1462
1463         gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO, dumper->iova);
1464
1465         gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
1466
1467         return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val,
1468                 val & 0x04, 100, 10000);
1469 }
1470
1471 /*
1472  * These are a list of the registers that need to be read through the HLSQ
1473  * aperture through the crashdumper.  These are not nominally accessible from
1474  * the CPU on a secure platform.
1475  */
1476 static const struct {
1477         u32 type;
1478         u32 regoffset;
1479         u32 count;
1480 } a5xx_hlsq_aperture_regs[] = {
1481         { 0x35, 0xe00, 0x32 },   /* HSLQ non-context */
1482         { 0x31, 0x2080, 0x1 },   /* HLSQ 2D context 0 */
1483         { 0x33, 0x2480, 0x1 },   /* HLSQ 2D context 1 */
1484         { 0x32, 0xe780, 0x62 },  /* HLSQ 3D context 0 */
1485         { 0x34, 0xef80, 0x62 },  /* HLSQ 3D context 1 */
1486         { 0x3f, 0x0ec0, 0x40 },  /* SP non-context */
1487         { 0x3d, 0x2040, 0x1 },   /* SP 2D context 0 */
1488         { 0x3b, 0x2440, 0x1 },   /* SP 2D context 1 */
1489         { 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */
1490         { 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */
1491         { 0x3a, 0x0f00, 0x1c },  /* TP non-context */
1492         { 0x38, 0x2000, 0xa },   /* TP 2D context 0 */
1493         { 0x36, 0x2400, 0xa },   /* TP 2D context 1 */
1494         { 0x39, 0xe700, 0x80 },  /* TP 3D context 0 */
1495         { 0x37, 0xef00, 0x80 },  /* TP 3D context 1 */
1496 };
1497
1498 static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
1499                 struct a5xx_gpu_state *a5xx_state)
1500 {
1501         struct a5xx_crashdumper dumper = { 0 };
1502         u32 offset, count = 0;
1503         u64 *ptr;
1504         int i;
1505
1506         if (a5xx_crashdumper_init(gpu, &dumper))
1507                 return;
1508
1509         /* The script will be written at offset 0 */
1510         ptr = dumper.ptr;
1511
1512         /* Start writing the data at offset 256k */
1513         offset = dumper.iova + (256 * SZ_1K);
1514
1515         /* Count how many additional registers to get from the HLSQ aperture */
1516         for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
1517                 count += a5xx_hlsq_aperture_regs[i].count;
1518
1519         a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL);
1520         if (!a5xx_state->hlsqregs)
1521                 return;
1522
1523         /* Build the crashdump script */
1524         for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
1525                 u32 type = a5xx_hlsq_aperture_regs[i].type;
1526                 u32 c = a5xx_hlsq_aperture_regs[i].count;
1527
1528                 /* Write the register to select the desired bank */
1529                 *ptr++ = ((u64) type << 8);
1530                 *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) |
1531                         (1 << 21) | 1;
1532
1533                 *ptr++ = offset;
1534                 *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44)
1535                         | c;
1536
1537                 offset += c * sizeof(u32);
1538         }
1539
1540         /* Write two zeros to close off the script */
1541         *ptr++ = 0;
1542         *ptr++ = 0;
1543
1544         if (a5xx_crashdumper_run(gpu, &dumper)) {
1545                 kfree(a5xx_state->hlsqregs);
1546                 msm_gem_kernel_put(dumper.bo, gpu->aspace);
1547                 return;
1548         }
1549
1550         /* Copy the data from the crashdumper to the state */
1551         memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
1552                 count * sizeof(u32));
1553
1554         msm_gem_kernel_put(dumper.bo, gpu->aspace);
1555 }
1556
1557 static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
1558 {
1559         struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
1560                         GFP_KERNEL);
1561         bool stalled = !!(gpu_read(gpu, REG_A5XX_RBBM_STATUS3) & BIT(24));
1562
1563         if (!a5xx_state)
1564                 return ERR_PTR(-ENOMEM);
1565
1566         /* Temporarily disable hardware clock gating before reading the hw */
1567         a5xx_set_hwcg(gpu, false);
1568
1569         /* First get the generic state from the adreno core */
1570         adreno_gpu_state_get(gpu, &(a5xx_state->base));
1571
1572         a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
1573
1574         /*
1575          * Get the HLSQ regs with the help of the crashdumper, but only if
1576          * we are not stalled in an iommu fault (in which case the crashdumper
1577          * would not have access to memory)
1578          */
1579         if (!stalled)
1580                 a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
1581
1582         a5xx_set_hwcg(gpu, true);
1583
1584         return &a5xx_state->base;
1585 }
1586
1587 static void a5xx_gpu_state_destroy(struct kref *kref)
1588 {
1589         struct msm_gpu_state *state = container_of(kref,
1590                 struct msm_gpu_state, ref);
1591         struct a5xx_gpu_state *a5xx_state = container_of(state,
1592                 struct a5xx_gpu_state, base);
1593
1594         kfree(a5xx_state->hlsqregs);
1595
1596         adreno_gpu_state_destroy(state);
1597         kfree(a5xx_state);
1598 }
1599
1600 static int a5xx_gpu_state_put(struct msm_gpu_state *state)
1601 {
1602         if (IS_ERR_OR_NULL(state))
1603                 return 1;
1604
1605         return kref_put(&state->ref, a5xx_gpu_state_destroy);
1606 }
1607
1608
1609 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
1610 static void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
1611                       struct drm_printer *p)
1612 {
1613         int i, j;
1614         u32 pos = 0;
1615         struct a5xx_gpu_state *a5xx_state = container_of(state,
1616                 struct a5xx_gpu_state, base);
1617
1618         if (IS_ERR_OR_NULL(state))
1619                 return;
1620
1621         adreno_show(gpu, state, p);
1622
1623         /* Dump the additional a5xx HLSQ registers */
1624         if (!a5xx_state->hlsqregs)
1625                 return;
1626
1627         drm_printf(p, "registers-hlsq:\n");
1628
1629         for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
1630                 u32 o = a5xx_hlsq_aperture_regs[i].regoffset;
1631                 u32 c = a5xx_hlsq_aperture_regs[i].count;
1632
1633                 for (j = 0; j < c; j++, pos++, o++) {
1634                         /*
1635                          * To keep the crashdump simple we pull the entire range
1636                          * for each register type but not all of the registers
1637                          * in the range are valid. Fortunately invalid registers
1638                          * stick out like a sore thumb with a value of
1639                          * 0xdeadbeef
1640                          */
1641                         if (a5xx_state->hlsqregs[pos] == 0xdeadbeef)
1642                                 continue;
1643
1644                         drm_printf(p, "  - { offset: 0x%04x, value: 0x%08x }\n",
1645                                 o << 2, a5xx_state->hlsqregs[pos]);
1646                 }
1647         }
1648 }
1649 #endif
1650
1651 static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
1652 {
1653         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1654         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1655
1656         return a5xx_gpu->cur_ring;
1657 }
1658
1659 static u64 a5xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
1660 {
1661         u64 busy_cycles;
1662
1663         busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO);
1664         *out_sample_rate = clk_get_rate(gpu->core_clk);
1665
1666         return busy_cycles;
1667 }
1668
1669 static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
1670 {
1671         struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1672         struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
1673
1674         if (a5xx_gpu->has_whereami)
1675                 return a5xx_gpu->shadow[ring->id];
1676
1677         return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
1678 }
1679
1680 static const struct adreno_gpu_funcs funcs = {
1681         .base = {
1682                 .get_param = adreno_get_param,
1683                 .set_param = adreno_set_param,
1684                 .hw_init = a5xx_hw_init,
1685                 .pm_suspend = a5xx_pm_suspend,
1686                 .pm_resume = a5xx_pm_resume,
1687                 .recover = a5xx_recover,
1688                 .submit = a5xx_submit,
1689                 .active_ring = a5xx_active_ring,
1690                 .irq = a5xx_irq,
1691                 .destroy = a5xx_destroy,
1692 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
1693                 .show = a5xx_show,
1694 #endif
1695 #if defined(CONFIG_DEBUG_FS)
1696                 .debugfs_init = a5xx_debugfs_init,
1697 #endif
1698                 .gpu_busy = a5xx_gpu_busy,
1699                 .gpu_state_get = a5xx_gpu_state_get,
1700                 .gpu_state_put = a5xx_gpu_state_put,
1701                 .create_address_space = adreno_create_address_space,
1702                 .get_rptr = a5xx_get_rptr,
1703         },
1704         .get_timestamp = a5xx_get_timestamp,
1705 };
1706
1707 static void check_speed_bin(struct device *dev)
1708 {
1709         struct nvmem_cell *cell;
1710         u32 val;
1711
1712         /*
1713          * If the OPP table specifies a opp-supported-hw property then we have
1714          * to set something with dev_pm_opp_set_supported_hw() or the table
1715          * doesn't get populated so pick an arbitrary value that should
1716          * ensure the default frequencies are selected but not conflict with any
1717          * actual bins
1718          */
1719         val = 0x80;
1720
1721         cell = nvmem_cell_get(dev, "speed_bin");
1722
1723         if (!IS_ERR(cell)) {
1724                 void *buf = nvmem_cell_read(cell, NULL);
1725
1726                 if (!IS_ERR(buf)) {
1727                         u8 bin = *((u8 *) buf);
1728
1729                         val = (1 << bin);
1730                         kfree(buf);
1731                 }
1732
1733                 nvmem_cell_put(cell);
1734         }
1735
1736         devm_pm_opp_set_supported_hw(dev, &val, 1);
1737 }
1738
1739 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
1740 {
1741         struct msm_drm_private *priv = dev->dev_private;
1742         struct platform_device *pdev = priv->gpu_pdev;
1743         struct a5xx_gpu *a5xx_gpu = NULL;
1744         struct adreno_gpu *adreno_gpu;
1745         struct msm_gpu *gpu;
1746         int ret;
1747
1748         if (!pdev) {
1749                 DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
1750                 return ERR_PTR(-ENXIO);
1751         }
1752
1753         a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
1754         if (!a5xx_gpu)
1755                 return ERR_PTR(-ENOMEM);
1756
1757         adreno_gpu = &a5xx_gpu->base;
1758         gpu = &adreno_gpu->base;
1759
1760         adreno_gpu->registers = a5xx_registers;
1761
1762         a5xx_gpu->lm_leakage = 0x4E001A;
1763
1764         check_speed_bin(&pdev->dev);
1765
1766         ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
1767         if (ret) {
1768                 a5xx_destroy(&(a5xx_gpu->base.base));
1769                 return ERR_PTR(ret);
1770         }
1771
1772         if (gpu->aspace)
1773                 msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
1774
1775         /* Set up the preemption specific bits and pieces for each ringbuffer */
1776         a5xx_preempt_init(gpu);
1777
1778         return gpu;
1779 }