2 * Copyright 2012 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
29 #include "radeon_asic.h"
32 #include "cik_blit_shaders.h"
33 #include "radeon_ucode.h"
34 #include "clearstate_ci.h"
36 MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
37 MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
38 MODULE_FIRMWARE("radeon/BONAIRE_ce.bin");
39 MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
40 MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
41 MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
42 MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
43 MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
44 MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
45 MODULE_FIRMWARE("radeon/HAWAII_me.bin");
46 MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
47 MODULE_FIRMWARE("radeon/HAWAII_mec.bin");
48 MODULE_FIRMWARE("radeon/HAWAII_mc.bin");
49 MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
50 MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
51 MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
52 MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
53 MODULE_FIRMWARE("radeon/KAVERI_me.bin");
54 MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
55 MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
56 MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
57 MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
58 MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
59 MODULE_FIRMWARE("radeon/KABINI_me.bin");
60 MODULE_FIRMWARE("radeon/KABINI_ce.bin");
61 MODULE_FIRMWARE("radeon/KABINI_mec.bin");
62 MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
63 MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
65 extern int r600_ih_ring_alloc(struct radeon_device *rdev);
66 extern void r600_ih_ring_fini(struct radeon_device *rdev);
67 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
68 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
69 extern bool evergreen_is_display_hung(struct radeon_device *rdev);
70 extern void sumo_rlc_fini(struct radeon_device *rdev);
71 extern int sumo_rlc_init(struct radeon_device *rdev);
72 extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
73 extern void si_rlc_reset(struct radeon_device *rdev);
74 extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
75 extern int cik_sdma_resume(struct radeon_device *rdev);
76 extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
77 extern void cik_sdma_fini(struct radeon_device *rdev);
78 static void cik_rlc_stop(struct radeon_device *rdev);
79 static void cik_pcie_gen3_enable(struct radeon_device *rdev);
80 static void cik_program_aspm(struct radeon_device *rdev);
81 static void cik_init_pg(struct radeon_device *rdev);
82 static void cik_init_cg(struct radeon_device *rdev);
83 static void cik_fini_pg(struct radeon_device *rdev);
84 static void cik_fini_cg(struct radeon_device *rdev);
85 static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
88 /* get temperature in millidegrees */
89 int ci_get_temp(struct radeon_device *rdev)
94 temp = (RREG32_SMC(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
100 actual_temp = temp & 0x1ff;
102 actual_temp = actual_temp * 1000;
107 /* get temperature in millidegrees */
108 int kv_get_temp(struct radeon_device *rdev)
113 temp = RREG32_SMC(0xC0300E0C);
116 actual_temp = (temp / 8) - 49;
120 actual_temp = actual_temp * 1000;
126 * Indirect registers accessor
128 u32 cik_pciep_rreg(struct radeon_device *rdev, u32 reg)
133 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
134 WREG32(PCIE_INDEX, reg);
135 (void)RREG32(PCIE_INDEX);
136 r = RREG32(PCIE_DATA);
137 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
141 void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
145 spin_lock_irqsave(&rdev->pciep_idx_lock, flags);
146 WREG32(PCIE_INDEX, reg);
147 (void)RREG32(PCIE_INDEX);
148 WREG32(PCIE_DATA, v);
149 (void)RREG32(PCIE_DATA);
150 spin_unlock_irqrestore(&rdev->pciep_idx_lock, flags);
153 static const u32 spectre_rlc_save_restore_register_list[] =
155 (0x0e00 << 16) | (0xc12c >> 2),
157 (0x0e00 << 16) | (0xc140 >> 2),
159 (0x0e00 << 16) | (0xc150 >> 2),
161 (0x0e00 << 16) | (0xc15c >> 2),
163 (0x0e00 << 16) | (0xc168 >> 2),
165 (0x0e00 << 16) | (0xc170 >> 2),
167 (0x0e00 << 16) | (0xc178 >> 2),
169 (0x0e00 << 16) | (0xc204 >> 2),
171 (0x0e00 << 16) | (0xc2b4 >> 2),
173 (0x0e00 << 16) | (0xc2b8 >> 2),
175 (0x0e00 << 16) | (0xc2bc >> 2),
177 (0x0e00 << 16) | (0xc2c0 >> 2),
179 (0x0e00 << 16) | (0x8228 >> 2),
181 (0x0e00 << 16) | (0x829c >> 2),
183 (0x0e00 << 16) | (0x869c >> 2),
185 (0x0600 << 16) | (0x98f4 >> 2),
187 (0x0e00 << 16) | (0x98f8 >> 2),
189 (0x0e00 << 16) | (0x9900 >> 2),
191 (0x0e00 << 16) | (0xc260 >> 2),
193 (0x0e00 << 16) | (0x90e8 >> 2),
195 (0x0e00 << 16) | (0x3c000 >> 2),
197 (0x0e00 << 16) | (0x3c00c >> 2),
199 (0x0e00 << 16) | (0x8c1c >> 2),
201 (0x0e00 << 16) | (0x9700 >> 2),
203 (0x0e00 << 16) | (0xcd20 >> 2),
205 (0x4e00 << 16) | (0xcd20 >> 2),
207 (0x5e00 << 16) | (0xcd20 >> 2),
209 (0x6e00 << 16) | (0xcd20 >> 2),
211 (0x7e00 << 16) | (0xcd20 >> 2),
213 (0x8e00 << 16) | (0xcd20 >> 2),
215 (0x9e00 << 16) | (0xcd20 >> 2),
217 (0xae00 << 16) | (0xcd20 >> 2),
219 (0xbe00 << 16) | (0xcd20 >> 2),
221 (0x0e00 << 16) | (0x89bc >> 2),
223 (0x0e00 << 16) | (0x8900 >> 2),
226 (0x0e00 << 16) | (0xc130 >> 2),
228 (0x0e00 << 16) | (0xc134 >> 2),
230 (0x0e00 << 16) | (0xc1fc >> 2),
232 (0x0e00 << 16) | (0xc208 >> 2),
234 (0x0e00 << 16) | (0xc264 >> 2),
236 (0x0e00 << 16) | (0xc268 >> 2),
238 (0x0e00 << 16) | (0xc26c >> 2),
240 (0x0e00 << 16) | (0xc270 >> 2),
242 (0x0e00 << 16) | (0xc274 >> 2),
244 (0x0e00 << 16) | (0xc278 >> 2),
246 (0x0e00 << 16) | (0xc27c >> 2),
248 (0x0e00 << 16) | (0xc280 >> 2),
250 (0x0e00 << 16) | (0xc284 >> 2),
252 (0x0e00 << 16) | (0xc288 >> 2),
254 (0x0e00 << 16) | (0xc28c >> 2),
256 (0x0e00 << 16) | (0xc290 >> 2),
258 (0x0e00 << 16) | (0xc294 >> 2),
260 (0x0e00 << 16) | (0xc298 >> 2),
262 (0x0e00 << 16) | (0xc29c >> 2),
264 (0x0e00 << 16) | (0xc2a0 >> 2),
266 (0x0e00 << 16) | (0xc2a4 >> 2),
268 (0x0e00 << 16) | (0xc2a8 >> 2),
270 (0x0e00 << 16) | (0xc2ac >> 2),
272 (0x0e00 << 16) | (0xc2b0 >> 2),
274 (0x0e00 << 16) | (0x301d0 >> 2),
276 (0x0e00 << 16) | (0x30238 >> 2),
278 (0x0e00 << 16) | (0x30250 >> 2),
280 (0x0e00 << 16) | (0x30254 >> 2),
282 (0x0e00 << 16) | (0x30258 >> 2),
284 (0x0e00 << 16) | (0x3025c >> 2),
286 (0x4e00 << 16) | (0xc900 >> 2),
288 (0x5e00 << 16) | (0xc900 >> 2),
290 (0x6e00 << 16) | (0xc900 >> 2),
292 (0x7e00 << 16) | (0xc900 >> 2),
294 (0x8e00 << 16) | (0xc900 >> 2),
296 (0x9e00 << 16) | (0xc900 >> 2),
298 (0xae00 << 16) | (0xc900 >> 2),
300 (0xbe00 << 16) | (0xc900 >> 2),
302 (0x4e00 << 16) | (0xc904 >> 2),
304 (0x5e00 << 16) | (0xc904 >> 2),
306 (0x6e00 << 16) | (0xc904 >> 2),
308 (0x7e00 << 16) | (0xc904 >> 2),
310 (0x8e00 << 16) | (0xc904 >> 2),
312 (0x9e00 << 16) | (0xc904 >> 2),
314 (0xae00 << 16) | (0xc904 >> 2),
316 (0xbe00 << 16) | (0xc904 >> 2),
318 (0x4e00 << 16) | (0xc908 >> 2),
320 (0x5e00 << 16) | (0xc908 >> 2),
322 (0x6e00 << 16) | (0xc908 >> 2),
324 (0x7e00 << 16) | (0xc908 >> 2),
326 (0x8e00 << 16) | (0xc908 >> 2),
328 (0x9e00 << 16) | (0xc908 >> 2),
330 (0xae00 << 16) | (0xc908 >> 2),
332 (0xbe00 << 16) | (0xc908 >> 2),
334 (0x4e00 << 16) | (0xc90c >> 2),
336 (0x5e00 << 16) | (0xc90c >> 2),
338 (0x6e00 << 16) | (0xc90c >> 2),
340 (0x7e00 << 16) | (0xc90c >> 2),
342 (0x8e00 << 16) | (0xc90c >> 2),
344 (0x9e00 << 16) | (0xc90c >> 2),
346 (0xae00 << 16) | (0xc90c >> 2),
348 (0xbe00 << 16) | (0xc90c >> 2),
350 (0x4e00 << 16) | (0xc910 >> 2),
352 (0x5e00 << 16) | (0xc910 >> 2),
354 (0x6e00 << 16) | (0xc910 >> 2),
356 (0x7e00 << 16) | (0xc910 >> 2),
358 (0x8e00 << 16) | (0xc910 >> 2),
360 (0x9e00 << 16) | (0xc910 >> 2),
362 (0xae00 << 16) | (0xc910 >> 2),
364 (0xbe00 << 16) | (0xc910 >> 2),
366 (0x0e00 << 16) | (0xc99c >> 2),
368 (0x0e00 << 16) | (0x9834 >> 2),
370 (0x0000 << 16) | (0x30f00 >> 2),
372 (0x0001 << 16) | (0x30f00 >> 2),
374 (0x0000 << 16) | (0x30f04 >> 2),
376 (0x0001 << 16) | (0x30f04 >> 2),
378 (0x0000 << 16) | (0x30f08 >> 2),
380 (0x0001 << 16) | (0x30f08 >> 2),
382 (0x0000 << 16) | (0x30f0c >> 2),
384 (0x0001 << 16) | (0x30f0c >> 2),
386 (0x0600 << 16) | (0x9b7c >> 2),
388 (0x0e00 << 16) | (0x8a14 >> 2),
390 (0x0e00 << 16) | (0x8a18 >> 2),
392 (0x0600 << 16) | (0x30a00 >> 2),
394 (0x0e00 << 16) | (0x8bf0 >> 2),
396 (0x0e00 << 16) | (0x8bcc >> 2),
398 (0x0e00 << 16) | (0x8b24 >> 2),
400 (0x0e00 << 16) | (0x30a04 >> 2),
402 (0x0600 << 16) | (0x30a10 >> 2),
404 (0x0600 << 16) | (0x30a14 >> 2),
406 (0x0600 << 16) | (0x30a18 >> 2),
408 (0x0600 << 16) | (0x30a2c >> 2),
410 (0x0e00 << 16) | (0xc700 >> 2),
412 (0x0e00 << 16) | (0xc704 >> 2),
414 (0x0e00 << 16) | (0xc708 >> 2),
416 (0x0e00 << 16) | (0xc768 >> 2),
418 (0x0400 << 16) | (0xc770 >> 2),
420 (0x0400 << 16) | (0xc774 >> 2),
422 (0x0400 << 16) | (0xc778 >> 2),
424 (0x0400 << 16) | (0xc77c >> 2),
426 (0x0400 << 16) | (0xc780 >> 2),
428 (0x0400 << 16) | (0xc784 >> 2),
430 (0x0400 << 16) | (0xc788 >> 2),
432 (0x0400 << 16) | (0xc78c >> 2),
434 (0x0400 << 16) | (0xc798 >> 2),
436 (0x0400 << 16) | (0xc79c >> 2),
438 (0x0400 << 16) | (0xc7a0 >> 2),
440 (0x0400 << 16) | (0xc7a4 >> 2),
442 (0x0400 << 16) | (0xc7a8 >> 2),
444 (0x0400 << 16) | (0xc7ac >> 2),
446 (0x0400 << 16) | (0xc7b0 >> 2),
448 (0x0400 << 16) | (0xc7b4 >> 2),
450 (0x0e00 << 16) | (0x9100 >> 2),
452 (0x0e00 << 16) | (0x3c010 >> 2),
454 (0x0e00 << 16) | (0x92a8 >> 2),
456 (0x0e00 << 16) | (0x92ac >> 2),
458 (0x0e00 << 16) | (0x92b4 >> 2),
460 (0x0e00 << 16) | (0x92b8 >> 2),
462 (0x0e00 << 16) | (0x92bc >> 2),
464 (0x0e00 << 16) | (0x92c0 >> 2),
466 (0x0e00 << 16) | (0x92c4 >> 2),
468 (0x0e00 << 16) | (0x92c8 >> 2),
470 (0x0e00 << 16) | (0x92cc >> 2),
472 (0x0e00 << 16) | (0x92d0 >> 2),
474 (0x0e00 << 16) | (0x8c00 >> 2),
476 (0x0e00 << 16) | (0x8c04 >> 2),
478 (0x0e00 << 16) | (0x8c20 >> 2),
480 (0x0e00 << 16) | (0x8c38 >> 2),
482 (0x0e00 << 16) | (0x8c3c >> 2),
484 (0x0e00 << 16) | (0xae00 >> 2),
486 (0x0e00 << 16) | (0x9604 >> 2),
488 (0x0e00 << 16) | (0xac08 >> 2),
490 (0x0e00 << 16) | (0xac0c >> 2),
492 (0x0e00 << 16) | (0xac10 >> 2),
494 (0x0e00 << 16) | (0xac14 >> 2),
496 (0x0e00 << 16) | (0xac58 >> 2),
498 (0x0e00 << 16) | (0xac68 >> 2),
500 (0x0e00 << 16) | (0xac6c >> 2),
502 (0x0e00 << 16) | (0xac70 >> 2),
504 (0x0e00 << 16) | (0xac74 >> 2),
506 (0x0e00 << 16) | (0xac78 >> 2),
508 (0x0e00 << 16) | (0xac7c >> 2),
510 (0x0e00 << 16) | (0xac80 >> 2),
512 (0x0e00 << 16) | (0xac84 >> 2),
514 (0x0e00 << 16) | (0xac88 >> 2),
516 (0x0e00 << 16) | (0xac8c >> 2),
518 (0x0e00 << 16) | (0x970c >> 2),
520 (0x0e00 << 16) | (0x9714 >> 2),
522 (0x0e00 << 16) | (0x9718 >> 2),
524 (0x0e00 << 16) | (0x971c >> 2),
526 (0x0e00 << 16) | (0x31068 >> 2),
528 (0x4e00 << 16) | (0x31068 >> 2),
530 (0x5e00 << 16) | (0x31068 >> 2),
532 (0x6e00 << 16) | (0x31068 >> 2),
534 (0x7e00 << 16) | (0x31068 >> 2),
536 (0x8e00 << 16) | (0x31068 >> 2),
538 (0x9e00 << 16) | (0x31068 >> 2),
540 (0xae00 << 16) | (0x31068 >> 2),
542 (0xbe00 << 16) | (0x31068 >> 2),
544 (0x0e00 << 16) | (0xcd10 >> 2),
546 (0x0e00 << 16) | (0xcd14 >> 2),
548 (0x0e00 << 16) | (0x88b0 >> 2),
550 (0x0e00 << 16) | (0x88b4 >> 2),
552 (0x0e00 << 16) | (0x88b8 >> 2),
554 (0x0e00 << 16) | (0x88bc >> 2),
556 (0x0400 << 16) | (0x89c0 >> 2),
558 (0x0e00 << 16) | (0x88c4 >> 2),
560 (0x0e00 << 16) | (0x88c8 >> 2),
562 (0x0e00 << 16) | (0x88d0 >> 2),
564 (0x0e00 << 16) | (0x88d4 >> 2),
566 (0x0e00 << 16) | (0x88d8 >> 2),
568 (0x0e00 << 16) | (0x8980 >> 2),
570 (0x0e00 << 16) | (0x30938 >> 2),
572 (0x0e00 << 16) | (0x3093c >> 2),
574 (0x0e00 << 16) | (0x30940 >> 2),
576 (0x0e00 << 16) | (0x89a0 >> 2),
578 (0x0e00 << 16) | (0x30900 >> 2),
580 (0x0e00 << 16) | (0x30904 >> 2),
582 (0x0e00 << 16) | (0x89b4 >> 2),
584 (0x0e00 << 16) | (0x3c210 >> 2),
586 (0x0e00 << 16) | (0x3c214 >> 2),
588 (0x0e00 << 16) | (0x3c218 >> 2),
590 (0x0e00 << 16) | (0x8904 >> 2),
593 (0x0e00 << 16) | (0x8c28 >> 2),
594 (0x0e00 << 16) | (0x8c2c >> 2),
595 (0x0e00 << 16) | (0x8c30 >> 2),
596 (0x0e00 << 16) | (0x8c34 >> 2),
597 (0x0e00 << 16) | (0x9600 >> 2),
600 static const u32 kalindi_rlc_save_restore_register_list[] =
602 (0x0e00 << 16) | (0xc12c >> 2),
604 (0x0e00 << 16) | (0xc140 >> 2),
606 (0x0e00 << 16) | (0xc150 >> 2),
608 (0x0e00 << 16) | (0xc15c >> 2),
610 (0x0e00 << 16) | (0xc168 >> 2),
612 (0x0e00 << 16) | (0xc170 >> 2),
614 (0x0e00 << 16) | (0xc204 >> 2),
616 (0x0e00 << 16) | (0xc2b4 >> 2),
618 (0x0e00 << 16) | (0xc2b8 >> 2),
620 (0x0e00 << 16) | (0xc2bc >> 2),
622 (0x0e00 << 16) | (0xc2c0 >> 2),
624 (0x0e00 << 16) | (0x8228 >> 2),
626 (0x0e00 << 16) | (0x829c >> 2),
628 (0x0e00 << 16) | (0x869c >> 2),
630 (0x0600 << 16) | (0x98f4 >> 2),
632 (0x0e00 << 16) | (0x98f8 >> 2),
634 (0x0e00 << 16) | (0x9900 >> 2),
636 (0x0e00 << 16) | (0xc260 >> 2),
638 (0x0e00 << 16) | (0x90e8 >> 2),
640 (0x0e00 << 16) | (0x3c000 >> 2),
642 (0x0e00 << 16) | (0x3c00c >> 2),
644 (0x0e00 << 16) | (0x8c1c >> 2),
646 (0x0e00 << 16) | (0x9700 >> 2),
648 (0x0e00 << 16) | (0xcd20 >> 2),
650 (0x4e00 << 16) | (0xcd20 >> 2),
652 (0x5e00 << 16) | (0xcd20 >> 2),
654 (0x6e00 << 16) | (0xcd20 >> 2),
656 (0x7e00 << 16) | (0xcd20 >> 2),
658 (0x0e00 << 16) | (0x89bc >> 2),
660 (0x0e00 << 16) | (0x8900 >> 2),
663 (0x0e00 << 16) | (0xc130 >> 2),
665 (0x0e00 << 16) | (0xc134 >> 2),
667 (0x0e00 << 16) | (0xc1fc >> 2),
669 (0x0e00 << 16) | (0xc208 >> 2),
671 (0x0e00 << 16) | (0xc264 >> 2),
673 (0x0e00 << 16) | (0xc268 >> 2),
675 (0x0e00 << 16) | (0xc26c >> 2),
677 (0x0e00 << 16) | (0xc270 >> 2),
679 (0x0e00 << 16) | (0xc274 >> 2),
681 (0x0e00 << 16) | (0xc28c >> 2),
683 (0x0e00 << 16) | (0xc290 >> 2),
685 (0x0e00 << 16) | (0xc294 >> 2),
687 (0x0e00 << 16) | (0xc298 >> 2),
689 (0x0e00 << 16) | (0xc2a0 >> 2),
691 (0x0e00 << 16) | (0xc2a4 >> 2),
693 (0x0e00 << 16) | (0xc2a8 >> 2),
695 (0x0e00 << 16) | (0xc2ac >> 2),
697 (0x0e00 << 16) | (0x301d0 >> 2),
699 (0x0e00 << 16) | (0x30238 >> 2),
701 (0x0e00 << 16) | (0x30250 >> 2),
703 (0x0e00 << 16) | (0x30254 >> 2),
705 (0x0e00 << 16) | (0x30258 >> 2),
707 (0x0e00 << 16) | (0x3025c >> 2),
709 (0x4e00 << 16) | (0xc900 >> 2),
711 (0x5e00 << 16) | (0xc900 >> 2),
713 (0x6e00 << 16) | (0xc900 >> 2),
715 (0x7e00 << 16) | (0xc900 >> 2),
717 (0x4e00 << 16) | (0xc904 >> 2),
719 (0x5e00 << 16) | (0xc904 >> 2),
721 (0x6e00 << 16) | (0xc904 >> 2),
723 (0x7e00 << 16) | (0xc904 >> 2),
725 (0x4e00 << 16) | (0xc908 >> 2),
727 (0x5e00 << 16) | (0xc908 >> 2),
729 (0x6e00 << 16) | (0xc908 >> 2),
731 (0x7e00 << 16) | (0xc908 >> 2),
733 (0x4e00 << 16) | (0xc90c >> 2),
735 (0x5e00 << 16) | (0xc90c >> 2),
737 (0x6e00 << 16) | (0xc90c >> 2),
739 (0x7e00 << 16) | (0xc90c >> 2),
741 (0x4e00 << 16) | (0xc910 >> 2),
743 (0x5e00 << 16) | (0xc910 >> 2),
745 (0x6e00 << 16) | (0xc910 >> 2),
747 (0x7e00 << 16) | (0xc910 >> 2),
749 (0x0e00 << 16) | (0xc99c >> 2),
751 (0x0e00 << 16) | (0x9834 >> 2),
753 (0x0000 << 16) | (0x30f00 >> 2),
755 (0x0000 << 16) | (0x30f04 >> 2),
757 (0x0000 << 16) | (0x30f08 >> 2),
759 (0x0000 << 16) | (0x30f0c >> 2),
761 (0x0600 << 16) | (0x9b7c >> 2),
763 (0x0e00 << 16) | (0x8a14 >> 2),
765 (0x0e00 << 16) | (0x8a18 >> 2),
767 (0x0600 << 16) | (0x30a00 >> 2),
769 (0x0e00 << 16) | (0x8bf0 >> 2),
771 (0x0e00 << 16) | (0x8bcc >> 2),
773 (0x0e00 << 16) | (0x8b24 >> 2),
775 (0x0e00 << 16) | (0x30a04 >> 2),
777 (0x0600 << 16) | (0x30a10 >> 2),
779 (0x0600 << 16) | (0x30a14 >> 2),
781 (0x0600 << 16) | (0x30a18 >> 2),
783 (0x0600 << 16) | (0x30a2c >> 2),
785 (0x0e00 << 16) | (0xc700 >> 2),
787 (0x0e00 << 16) | (0xc704 >> 2),
789 (0x0e00 << 16) | (0xc708 >> 2),
791 (0x0e00 << 16) | (0xc768 >> 2),
793 (0x0400 << 16) | (0xc770 >> 2),
795 (0x0400 << 16) | (0xc774 >> 2),
797 (0x0400 << 16) | (0xc798 >> 2),
799 (0x0400 << 16) | (0xc79c >> 2),
801 (0x0e00 << 16) | (0x9100 >> 2),
803 (0x0e00 << 16) | (0x3c010 >> 2),
805 (0x0e00 << 16) | (0x8c00 >> 2),
807 (0x0e00 << 16) | (0x8c04 >> 2),
809 (0x0e00 << 16) | (0x8c20 >> 2),
811 (0x0e00 << 16) | (0x8c38 >> 2),
813 (0x0e00 << 16) | (0x8c3c >> 2),
815 (0x0e00 << 16) | (0xae00 >> 2),
817 (0x0e00 << 16) | (0x9604 >> 2),
819 (0x0e00 << 16) | (0xac08 >> 2),
821 (0x0e00 << 16) | (0xac0c >> 2),
823 (0x0e00 << 16) | (0xac10 >> 2),
825 (0x0e00 << 16) | (0xac14 >> 2),
827 (0x0e00 << 16) | (0xac58 >> 2),
829 (0x0e00 << 16) | (0xac68 >> 2),
831 (0x0e00 << 16) | (0xac6c >> 2),
833 (0x0e00 << 16) | (0xac70 >> 2),
835 (0x0e00 << 16) | (0xac74 >> 2),
837 (0x0e00 << 16) | (0xac78 >> 2),
839 (0x0e00 << 16) | (0xac7c >> 2),
841 (0x0e00 << 16) | (0xac80 >> 2),
843 (0x0e00 << 16) | (0xac84 >> 2),
845 (0x0e00 << 16) | (0xac88 >> 2),
847 (0x0e00 << 16) | (0xac8c >> 2),
849 (0x0e00 << 16) | (0x970c >> 2),
851 (0x0e00 << 16) | (0x9714 >> 2),
853 (0x0e00 << 16) | (0x9718 >> 2),
855 (0x0e00 << 16) | (0x971c >> 2),
857 (0x0e00 << 16) | (0x31068 >> 2),
859 (0x4e00 << 16) | (0x31068 >> 2),
861 (0x5e00 << 16) | (0x31068 >> 2),
863 (0x6e00 << 16) | (0x31068 >> 2),
865 (0x7e00 << 16) | (0x31068 >> 2),
867 (0x0e00 << 16) | (0xcd10 >> 2),
869 (0x0e00 << 16) | (0xcd14 >> 2),
871 (0x0e00 << 16) | (0x88b0 >> 2),
873 (0x0e00 << 16) | (0x88b4 >> 2),
875 (0x0e00 << 16) | (0x88b8 >> 2),
877 (0x0e00 << 16) | (0x88bc >> 2),
879 (0x0400 << 16) | (0x89c0 >> 2),
881 (0x0e00 << 16) | (0x88c4 >> 2),
883 (0x0e00 << 16) | (0x88c8 >> 2),
885 (0x0e00 << 16) | (0x88d0 >> 2),
887 (0x0e00 << 16) | (0x88d4 >> 2),
889 (0x0e00 << 16) | (0x88d8 >> 2),
891 (0x0e00 << 16) | (0x8980 >> 2),
893 (0x0e00 << 16) | (0x30938 >> 2),
895 (0x0e00 << 16) | (0x3093c >> 2),
897 (0x0e00 << 16) | (0x30940 >> 2),
899 (0x0e00 << 16) | (0x89a0 >> 2),
901 (0x0e00 << 16) | (0x30900 >> 2),
903 (0x0e00 << 16) | (0x30904 >> 2),
905 (0x0e00 << 16) | (0x89b4 >> 2),
907 (0x0e00 << 16) | (0x3e1fc >> 2),
909 (0x0e00 << 16) | (0x3c210 >> 2),
911 (0x0e00 << 16) | (0x3c214 >> 2),
913 (0x0e00 << 16) | (0x3c218 >> 2),
915 (0x0e00 << 16) | (0x8904 >> 2),
918 (0x0e00 << 16) | (0x8c28 >> 2),
919 (0x0e00 << 16) | (0x8c2c >> 2),
920 (0x0e00 << 16) | (0x8c30 >> 2),
921 (0x0e00 << 16) | (0x8c34 >> 2),
922 (0x0e00 << 16) | (0x9600 >> 2),
925 static const u32 bonaire_golden_spm_registers[] =
927 0x30800, 0xe0ffffff, 0xe0000000
930 static const u32 bonaire_golden_common_registers[] =
932 0xc770, 0xffffffff, 0x00000800,
933 0xc774, 0xffffffff, 0x00000800,
934 0xc798, 0xffffffff, 0x00007fbf,
935 0xc79c, 0xffffffff, 0x00007faf
938 static const u32 bonaire_golden_registers[] =
940 0x3354, 0x00000333, 0x00000333,
941 0x3350, 0x000c0fc0, 0x00040200,
942 0x9a10, 0x00010000, 0x00058208,
943 0x3c000, 0xffff1fff, 0x00140000,
944 0x3c200, 0xfdfc0fff, 0x00000100,
945 0x3c234, 0x40000000, 0x40000200,
946 0x9830, 0xffffffff, 0x00000000,
947 0x9834, 0xf00fffff, 0x00000400,
948 0x9838, 0x0002021c, 0x00020200,
949 0xc78, 0x00000080, 0x00000000,
950 0x5bb0, 0x000000f0, 0x00000070,
951 0x5bc0, 0xf0311fff, 0x80300000,
952 0x98f8, 0x73773777, 0x12010001,
953 0x350c, 0x00810000, 0x408af000,
954 0x7030, 0x31000111, 0x00000011,
955 0x2f48, 0x73773777, 0x12010001,
956 0x220c, 0x00007fb6, 0x0021a1b1,
957 0x2210, 0x00007fb6, 0x002021b1,
958 0x2180, 0x00007fb6, 0x00002191,
959 0x2218, 0x00007fb6, 0x002121b1,
960 0x221c, 0x00007fb6, 0x002021b1,
961 0x21dc, 0x00007fb6, 0x00002191,
962 0x21e0, 0x00007fb6, 0x00002191,
963 0x3628, 0x0000003f, 0x0000000a,
964 0x362c, 0x0000003f, 0x0000000a,
965 0x2ae4, 0x00073ffe, 0x000022a2,
966 0x240c, 0x000007ff, 0x00000000,
967 0x8a14, 0xf000003f, 0x00000007,
968 0x8bf0, 0x00002001, 0x00000001,
969 0x8b24, 0xffffffff, 0x00ffffff,
970 0x30a04, 0x0000ff0f, 0x00000000,
971 0x28a4c, 0x07ffffff, 0x06000000,
972 0x4d8, 0x00000fff, 0x00000100,
973 0x3e78, 0x00000001, 0x00000002,
974 0x9100, 0x03000000, 0x0362c688,
975 0x8c00, 0x000000ff, 0x00000001,
976 0xe40, 0x00001fff, 0x00001fff,
977 0x9060, 0x0000007f, 0x00000020,
978 0x9508, 0x00010000, 0x00010000,
979 0xac14, 0x000003ff, 0x000000f3,
980 0xac0c, 0xffffffff, 0x00001032
983 static const u32 bonaire_mgcg_cgcg_init[] =
985 0xc420, 0xffffffff, 0xfffffffc,
986 0x30800, 0xffffffff, 0xe0000000,
987 0x3c2a0, 0xffffffff, 0x00000100,
988 0x3c208, 0xffffffff, 0x00000100,
989 0x3c2c0, 0xffffffff, 0xc0000100,
990 0x3c2c8, 0xffffffff, 0xc0000100,
991 0x3c2c4, 0xffffffff, 0xc0000100,
992 0x55e4, 0xffffffff, 0x00600100,
993 0x3c280, 0xffffffff, 0x00000100,
994 0x3c214, 0xffffffff, 0x06000100,
995 0x3c220, 0xffffffff, 0x00000100,
996 0x3c218, 0xffffffff, 0x06000100,
997 0x3c204, 0xffffffff, 0x00000100,
998 0x3c2e0, 0xffffffff, 0x00000100,
999 0x3c224, 0xffffffff, 0x00000100,
1000 0x3c200, 0xffffffff, 0x00000100,
1001 0x3c230, 0xffffffff, 0x00000100,
1002 0x3c234, 0xffffffff, 0x00000100,
1003 0x3c250, 0xffffffff, 0x00000100,
1004 0x3c254, 0xffffffff, 0x00000100,
1005 0x3c258, 0xffffffff, 0x00000100,
1006 0x3c25c, 0xffffffff, 0x00000100,
1007 0x3c260, 0xffffffff, 0x00000100,
1008 0x3c27c, 0xffffffff, 0x00000100,
1009 0x3c278, 0xffffffff, 0x00000100,
1010 0x3c210, 0xffffffff, 0x06000100,
1011 0x3c290, 0xffffffff, 0x00000100,
1012 0x3c274, 0xffffffff, 0x00000100,
1013 0x3c2b4, 0xffffffff, 0x00000100,
1014 0x3c2b0, 0xffffffff, 0x00000100,
1015 0x3c270, 0xffffffff, 0x00000100,
1016 0x30800, 0xffffffff, 0xe0000000,
1017 0x3c020, 0xffffffff, 0x00010000,
1018 0x3c024, 0xffffffff, 0x00030002,
1019 0x3c028, 0xffffffff, 0x00040007,
1020 0x3c02c, 0xffffffff, 0x00060005,
1021 0x3c030, 0xffffffff, 0x00090008,
1022 0x3c034, 0xffffffff, 0x00010000,
1023 0x3c038, 0xffffffff, 0x00030002,
1024 0x3c03c, 0xffffffff, 0x00040007,
1025 0x3c040, 0xffffffff, 0x00060005,
1026 0x3c044, 0xffffffff, 0x00090008,
1027 0x3c048, 0xffffffff, 0x00010000,
1028 0x3c04c, 0xffffffff, 0x00030002,
1029 0x3c050, 0xffffffff, 0x00040007,
1030 0x3c054, 0xffffffff, 0x00060005,
1031 0x3c058, 0xffffffff, 0x00090008,
1032 0x3c05c, 0xffffffff, 0x00010000,
1033 0x3c060, 0xffffffff, 0x00030002,
1034 0x3c064, 0xffffffff, 0x00040007,
1035 0x3c068, 0xffffffff, 0x00060005,
1036 0x3c06c, 0xffffffff, 0x00090008,
1037 0x3c070, 0xffffffff, 0x00010000,
1038 0x3c074, 0xffffffff, 0x00030002,
1039 0x3c078, 0xffffffff, 0x00040007,
1040 0x3c07c, 0xffffffff, 0x00060005,
1041 0x3c080, 0xffffffff, 0x00090008,
1042 0x3c084, 0xffffffff, 0x00010000,
1043 0x3c088, 0xffffffff, 0x00030002,
1044 0x3c08c, 0xffffffff, 0x00040007,
1045 0x3c090, 0xffffffff, 0x00060005,
1046 0x3c094, 0xffffffff, 0x00090008,
1047 0x3c098, 0xffffffff, 0x00010000,
1048 0x3c09c, 0xffffffff, 0x00030002,
1049 0x3c0a0, 0xffffffff, 0x00040007,
1050 0x3c0a4, 0xffffffff, 0x00060005,
1051 0x3c0a8, 0xffffffff, 0x00090008,
1052 0x3c000, 0xffffffff, 0x96e00200,
1053 0x8708, 0xffffffff, 0x00900100,
1054 0xc424, 0xffffffff, 0x0020003f,
1055 0x38, 0xffffffff, 0x0140001c,
1056 0x3c, 0x000f0000, 0x000f0000,
1057 0x220, 0xffffffff, 0xC060000C,
1058 0x224, 0xc0000fff, 0x00000100,
1059 0xf90, 0xffffffff, 0x00000100,
1060 0xf98, 0x00000101, 0x00000000,
1061 0x20a8, 0xffffffff, 0x00000104,
1062 0x55e4, 0xff000fff, 0x00000100,
1063 0x30cc, 0xc0000fff, 0x00000104,
1064 0xc1e4, 0x00000001, 0x00000001,
1065 0xd00c, 0xff000ff0, 0x00000100,
1066 0xd80c, 0xff000ff0, 0x00000100
1069 static const u32 spectre_golden_spm_registers[] =
1071 0x30800, 0xe0ffffff, 0xe0000000
1074 static const u32 spectre_golden_common_registers[] =
1076 0xc770, 0xffffffff, 0x00000800,
1077 0xc774, 0xffffffff, 0x00000800,
1078 0xc798, 0xffffffff, 0x00007fbf,
1079 0xc79c, 0xffffffff, 0x00007faf
1082 static const u32 spectre_golden_registers[] =
1084 0x3c000, 0xffff1fff, 0x96940200,
1085 0x3c00c, 0xffff0001, 0xff000000,
1086 0x3c200, 0xfffc0fff, 0x00000100,
1087 0x6ed8, 0x00010101, 0x00010000,
1088 0x9834, 0xf00fffff, 0x00000400,
1089 0x9838, 0xfffffffc, 0x00020200,
1090 0x5bb0, 0x000000f0, 0x00000070,
1091 0x5bc0, 0xf0311fff, 0x80300000,
1092 0x98f8, 0x73773777, 0x12010001,
1093 0x9b7c, 0x00ff0000, 0x00fc0000,
1094 0x2f48, 0x73773777, 0x12010001,
1095 0x8a14, 0xf000003f, 0x00000007,
1096 0x8b24, 0xffffffff, 0x00ffffff,
1097 0x28350, 0x3f3f3fff, 0x00000082,
1098 0x28355, 0x0000003f, 0x00000000,
1099 0x3e78, 0x00000001, 0x00000002,
1100 0x913c, 0xffff03df, 0x00000004,
1101 0xc768, 0x00000008, 0x00000008,
1102 0x8c00, 0x000008ff, 0x00000800,
1103 0x9508, 0x00010000, 0x00010000,
1104 0xac0c, 0xffffffff, 0x54763210,
1105 0x214f8, 0x01ff01ff, 0x00000002,
1106 0x21498, 0x007ff800, 0x00200000,
1107 0x2015c, 0xffffffff, 0x00000f40,
1108 0x30934, 0xffffffff, 0x00000001
1111 static const u32 spectre_mgcg_cgcg_init[] =
1113 0xc420, 0xffffffff, 0xfffffffc,
1114 0x30800, 0xffffffff, 0xe0000000,
1115 0x3c2a0, 0xffffffff, 0x00000100,
1116 0x3c208, 0xffffffff, 0x00000100,
1117 0x3c2c0, 0xffffffff, 0x00000100,
1118 0x3c2c8, 0xffffffff, 0x00000100,
1119 0x3c2c4, 0xffffffff, 0x00000100,
1120 0x55e4, 0xffffffff, 0x00600100,
1121 0x3c280, 0xffffffff, 0x00000100,
1122 0x3c214, 0xffffffff, 0x06000100,
1123 0x3c220, 0xffffffff, 0x00000100,
1124 0x3c218, 0xffffffff, 0x06000100,
1125 0x3c204, 0xffffffff, 0x00000100,
1126 0x3c2e0, 0xffffffff, 0x00000100,
1127 0x3c224, 0xffffffff, 0x00000100,
1128 0x3c200, 0xffffffff, 0x00000100,
1129 0x3c230, 0xffffffff, 0x00000100,
1130 0x3c234, 0xffffffff, 0x00000100,
1131 0x3c250, 0xffffffff, 0x00000100,
1132 0x3c254, 0xffffffff, 0x00000100,
1133 0x3c258, 0xffffffff, 0x00000100,
1134 0x3c25c, 0xffffffff, 0x00000100,
1135 0x3c260, 0xffffffff, 0x00000100,
1136 0x3c27c, 0xffffffff, 0x00000100,
1137 0x3c278, 0xffffffff, 0x00000100,
1138 0x3c210, 0xffffffff, 0x06000100,
1139 0x3c290, 0xffffffff, 0x00000100,
1140 0x3c274, 0xffffffff, 0x00000100,
1141 0x3c2b4, 0xffffffff, 0x00000100,
1142 0x3c2b0, 0xffffffff, 0x00000100,
1143 0x3c270, 0xffffffff, 0x00000100,
1144 0x30800, 0xffffffff, 0xe0000000,
1145 0x3c020, 0xffffffff, 0x00010000,
1146 0x3c024, 0xffffffff, 0x00030002,
1147 0x3c028, 0xffffffff, 0x00040007,
1148 0x3c02c, 0xffffffff, 0x00060005,
1149 0x3c030, 0xffffffff, 0x00090008,
1150 0x3c034, 0xffffffff, 0x00010000,
1151 0x3c038, 0xffffffff, 0x00030002,
1152 0x3c03c, 0xffffffff, 0x00040007,
1153 0x3c040, 0xffffffff, 0x00060005,
1154 0x3c044, 0xffffffff, 0x00090008,
1155 0x3c048, 0xffffffff, 0x00010000,
1156 0x3c04c, 0xffffffff, 0x00030002,
1157 0x3c050, 0xffffffff, 0x00040007,
1158 0x3c054, 0xffffffff, 0x00060005,
1159 0x3c058, 0xffffffff, 0x00090008,
1160 0x3c05c, 0xffffffff, 0x00010000,
1161 0x3c060, 0xffffffff, 0x00030002,
1162 0x3c064, 0xffffffff, 0x00040007,
1163 0x3c068, 0xffffffff, 0x00060005,
1164 0x3c06c, 0xffffffff, 0x00090008,
1165 0x3c070, 0xffffffff, 0x00010000,
1166 0x3c074, 0xffffffff, 0x00030002,
1167 0x3c078, 0xffffffff, 0x00040007,
1168 0x3c07c, 0xffffffff, 0x00060005,
1169 0x3c080, 0xffffffff, 0x00090008,
1170 0x3c084, 0xffffffff, 0x00010000,
1171 0x3c088, 0xffffffff, 0x00030002,
1172 0x3c08c, 0xffffffff, 0x00040007,
1173 0x3c090, 0xffffffff, 0x00060005,
1174 0x3c094, 0xffffffff, 0x00090008,
1175 0x3c098, 0xffffffff, 0x00010000,
1176 0x3c09c, 0xffffffff, 0x00030002,
1177 0x3c0a0, 0xffffffff, 0x00040007,
1178 0x3c0a4, 0xffffffff, 0x00060005,
1179 0x3c0a8, 0xffffffff, 0x00090008,
1180 0x3c0ac, 0xffffffff, 0x00010000,
1181 0x3c0b0, 0xffffffff, 0x00030002,
1182 0x3c0b4, 0xffffffff, 0x00040007,
1183 0x3c0b8, 0xffffffff, 0x00060005,
1184 0x3c0bc, 0xffffffff, 0x00090008,
1185 0x3c000, 0xffffffff, 0x96e00200,
1186 0x8708, 0xffffffff, 0x00900100,
1187 0xc424, 0xffffffff, 0x0020003f,
1188 0x38, 0xffffffff, 0x0140001c,
1189 0x3c, 0x000f0000, 0x000f0000,
1190 0x220, 0xffffffff, 0xC060000C,
1191 0x224, 0xc0000fff, 0x00000100,
1192 0xf90, 0xffffffff, 0x00000100,
1193 0xf98, 0x00000101, 0x00000000,
1194 0x20a8, 0xffffffff, 0x00000104,
1195 0x55e4, 0xff000fff, 0x00000100,
1196 0x30cc, 0xc0000fff, 0x00000104,
1197 0xc1e4, 0x00000001, 0x00000001,
1198 0xd00c, 0xff000ff0, 0x00000100,
1199 0xd80c, 0xff000ff0, 0x00000100
1202 static const u32 kalindi_golden_spm_registers[] =
1204 0x30800, 0xe0ffffff, 0xe0000000
1207 static const u32 kalindi_golden_common_registers[] =
1209 0xc770, 0xffffffff, 0x00000800,
1210 0xc774, 0xffffffff, 0x00000800,
1211 0xc798, 0xffffffff, 0x00007fbf,
1212 0xc79c, 0xffffffff, 0x00007faf
1215 static const u32 kalindi_golden_registers[] =
1217 0x3c000, 0xffffdfff, 0x6e944040,
1218 0x55e4, 0xff607fff, 0xfc000100,
1219 0x3c220, 0xff000fff, 0x00000100,
1220 0x3c224, 0xff000fff, 0x00000100,
1221 0x3c200, 0xfffc0fff, 0x00000100,
1222 0x6ed8, 0x00010101, 0x00010000,
1223 0x9830, 0xffffffff, 0x00000000,
1224 0x9834, 0xf00fffff, 0x00000400,
1225 0x5bb0, 0x000000f0, 0x00000070,
1226 0x5bc0, 0xf0311fff, 0x80300000,
1227 0x98f8, 0x73773777, 0x12010001,
1228 0x98fc, 0xffffffff, 0x00000010,
1229 0x9b7c, 0x00ff0000, 0x00fc0000,
1230 0x8030, 0x00001f0f, 0x0000100a,
1231 0x2f48, 0x73773777, 0x12010001,
1232 0x2408, 0x000fffff, 0x000c007f,
1233 0x8a14, 0xf000003f, 0x00000007,
1234 0x8b24, 0x3fff3fff, 0x00ffcfff,
1235 0x30a04, 0x0000ff0f, 0x00000000,
1236 0x28a4c, 0x07ffffff, 0x06000000,
1237 0x4d8, 0x00000fff, 0x00000100,
1238 0x3e78, 0x00000001, 0x00000002,
1239 0xc768, 0x00000008, 0x00000008,
1240 0x8c00, 0x000000ff, 0x00000003,
1241 0x214f8, 0x01ff01ff, 0x00000002,
1242 0x21498, 0x007ff800, 0x00200000,
1243 0x2015c, 0xffffffff, 0x00000f40,
1244 0x88c4, 0x001f3ae3, 0x00000082,
1245 0x88d4, 0x0000001f, 0x00000010,
1246 0x30934, 0xffffffff, 0x00000000
1249 static const u32 kalindi_mgcg_cgcg_init[] =
1251 0xc420, 0xffffffff, 0xfffffffc,
1252 0x30800, 0xffffffff, 0xe0000000,
1253 0x3c2a0, 0xffffffff, 0x00000100,
1254 0x3c208, 0xffffffff, 0x00000100,
1255 0x3c2c0, 0xffffffff, 0x00000100,
1256 0x3c2c8, 0xffffffff, 0x00000100,
1257 0x3c2c4, 0xffffffff, 0x00000100,
1258 0x55e4, 0xffffffff, 0x00600100,
1259 0x3c280, 0xffffffff, 0x00000100,
1260 0x3c214, 0xffffffff, 0x06000100,
1261 0x3c220, 0xffffffff, 0x00000100,
1262 0x3c218, 0xffffffff, 0x06000100,
1263 0x3c204, 0xffffffff, 0x00000100,
1264 0x3c2e0, 0xffffffff, 0x00000100,
1265 0x3c224, 0xffffffff, 0x00000100,
1266 0x3c200, 0xffffffff, 0x00000100,
1267 0x3c230, 0xffffffff, 0x00000100,
1268 0x3c234, 0xffffffff, 0x00000100,
1269 0x3c250, 0xffffffff, 0x00000100,
1270 0x3c254, 0xffffffff, 0x00000100,
1271 0x3c258, 0xffffffff, 0x00000100,
1272 0x3c25c, 0xffffffff, 0x00000100,
1273 0x3c260, 0xffffffff, 0x00000100,
1274 0x3c27c, 0xffffffff, 0x00000100,
1275 0x3c278, 0xffffffff, 0x00000100,
1276 0x3c210, 0xffffffff, 0x06000100,
1277 0x3c290, 0xffffffff, 0x00000100,
1278 0x3c274, 0xffffffff, 0x00000100,
1279 0x3c2b4, 0xffffffff, 0x00000100,
1280 0x3c2b0, 0xffffffff, 0x00000100,
1281 0x3c270, 0xffffffff, 0x00000100,
1282 0x30800, 0xffffffff, 0xe0000000,
1283 0x3c020, 0xffffffff, 0x00010000,
1284 0x3c024, 0xffffffff, 0x00030002,
1285 0x3c028, 0xffffffff, 0x00040007,
1286 0x3c02c, 0xffffffff, 0x00060005,
1287 0x3c030, 0xffffffff, 0x00090008,
1288 0x3c034, 0xffffffff, 0x00010000,
1289 0x3c038, 0xffffffff, 0x00030002,
1290 0x3c03c, 0xffffffff, 0x00040007,
1291 0x3c040, 0xffffffff, 0x00060005,
1292 0x3c044, 0xffffffff, 0x00090008,
1293 0x3c000, 0xffffffff, 0x96e00200,
1294 0x8708, 0xffffffff, 0x00900100,
1295 0xc424, 0xffffffff, 0x0020003f,
1296 0x38, 0xffffffff, 0x0140001c,
1297 0x3c, 0x000f0000, 0x000f0000,
1298 0x220, 0xffffffff, 0xC060000C,
1299 0x224, 0xc0000fff, 0x00000100,
1300 0x20a8, 0xffffffff, 0x00000104,
1301 0x55e4, 0xff000fff, 0x00000100,
1302 0x30cc, 0xc0000fff, 0x00000104,
1303 0xc1e4, 0x00000001, 0x00000001,
1304 0xd00c, 0xff000ff0, 0x00000100,
1305 0xd80c, 0xff000ff0, 0x00000100
1308 static const u32 hawaii_golden_spm_registers[] =
1310 0x30800, 0xe0ffffff, 0xe0000000
1313 static const u32 hawaii_golden_common_registers[] =
1315 0x30800, 0xffffffff, 0xe0000000,
1316 0x28350, 0xffffffff, 0x3a00161a,
1317 0x28354, 0xffffffff, 0x0000002e,
1318 0x9a10, 0xffffffff, 0x00018208,
1319 0x98f8, 0xffffffff, 0x12011003
1322 static const u32 hawaii_golden_registers[] =
1324 0x3354, 0x00000333, 0x00000333,
1325 0x9a10, 0x00010000, 0x00058208,
1326 0x9830, 0xffffffff, 0x00000000,
1327 0x9834, 0xf00fffff, 0x00000400,
1328 0x9838, 0x0002021c, 0x00020200,
1329 0xc78, 0x00000080, 0x00000000,
1330 0x5bb0, 0x000000f0, 0x00000070,
1331 0x5bc0, 0xf0311fff, 0x80300000,
1332 0x350c, 0x00810000, 0x408af000,
1333 0x7030, 0x31000111, 0x00000011,
1334 0x2f48, 0x73773777, 0x12010001,
1335 0x2120, 0x0000007f, 0x0000001b,
1336 0x21dc, 0x00007fb6, 0x00002191,
1337 0x3628, 0x0000003f, 0x0000000a,
1338 0x362c, 0x0000003f, 0x0000000a,
1339 0x2ae4, 0x00073ffe, 0x000022a2,
1340 0x240c, 0x000007ff, 0x00000000,
1341 0x8bf0, 0x00002001, 0x00000001,
1342 0x8b24, 0xffffffff, 0x00ffffff,
1343 0x30a04, 0x0000ff0f, 0x00000000,
1344 0x28a4c, 0x07ffffff, 0x06000000,
1345 0x3e78, 0x00000001, 0x00000002,
1346 0xc768, 0x00000008, 0x00000008,
1347 0xc770, 0x00000f00, 0x00000800,
1348 0xc774, 0x00000f00, 0x00000800,
1349 0xc798, 0x00ffffff, 0x00ff7fbf,
1350 0xc79c, 0x00ffffff, 0x00ff7faf,
1351 0x8c00, 0x000000ff, 0x00000800,
1352 0xe40, 0x00001fff, 0x00001fff,
1353 0x9060, 0x0000007f, 0x00000020,
1354 0x9508, 0x00010000, 0x00010000,
1355 0xae00, 0x00100000, 0x000ff07c,
1356 0xac14, 0x000003ff, 0x0000000f,
1357 0xac10, 0xffffffff, 0x7564fdec,
1358 0xac0c, 0xffffffff, 0x3120b9a8,
1359 0xac08, 0x20000000, 0x0f9c0000
1362 static const u32 hawaii_mgcg_cgcg_init[] =
1364 0xc420, 0xffffffff, 0xfffffffd,
1365 0x30800, 0xffffffff, 0xe0000000,
1366 0x3c2a0, 0xffffffff, 0x00000100,
1367 0x3c208, 0xffffffff, 0x00000100,
1368 0x3c2c0, 0xffffffff, 0x00000100,
1369 0x3c2c8, 0xffffffff, 0x00000100,
1370 0x3c2c4, 0xffffffff, 0x00000100,
1371 0x55e4, 0xffffffff, 0x00200100,
1372 0x3c280, 0xffffffff, 0x00000100,
1373 0x3c214, 0xffffffff, 0x06000100,
1374 0x3c220, 0xffffffff, 0x00000100,
1375 0x3c218, 0xffffffff, 0x06000100,
1376 0x3c204, 0xffffffff, 0x00000100,
1377 0x3c2e0, 0xffffffff, 0x00000100,
1378 0x3c224, 0xffffffff, 0x00000100,
1379 0x3c200, 0xffffffff, 0x00000100,
1380 0x3c230, 0xffffffff, 0x00000100,
1381 0x3c234, 0xffffffff, 0x00000100,
1382 0x3c250, 0xffffffff, 0x00000100,
1383 0x3c254, 0xffffffff, 0x00000100,
1384 0x3c258, 0xffffffff, 0x00000100,
1385 0x3c25c, 0xffffffff, 0x00000100,
1386 0x3c260, 0xffffffff, 0x00000100,
1387 0x3c27c, 0xffffffff, 0x00000100,
1388 0x3c278, 0xffffffff, 0x00000100,
1389 0x3c210, 0xffffffff, 0x06000100,
1390 0x3c290, 0xffffffff, 0x00000100,
1391 0x3c274, 0xffffffff, 0x00000100,
1392 0x3c2b4, 0xffffffff, 0x00000100,
1393 0x3c2b0, 0xffffffff, 0x00000100,
1394 0x3c270, 0xffffffff, 0x00000100,
1395 0x30800, 0xffffffff, 0xe0000000,
1396 0x3c020, 0xffffffff, 0x00010000,
1397 0x3c024, 0xffffffff, 0x00030002,
1398 0x3c028, 0xffffffff, 0x00040007,
1399 0x3c02c, 0xffffffff, 0x00060005,
1400 0x3c030, 0xffffffff, 0x00090008,
1401 0x3c034, 0xffffffff, 0x00010000,
1402 0x3c038, 0xffffffff, 0x00030002,
1403 0x3c03c, 0xffffffff, 0x00040007,
1404 0x3c040, 0xffffffff, 0x00060005,
1405 0x3c044, 0xffffffff, 0x00090008,
1406 0x3c048, 0xffffffff, 0x00010000,
1407 0x3c04c, 0xffffffff, 0x00030002,
1408 0x3c050, 0xffffffff, 0x00040007,
1409 0x3c054, 0xffffffff, 0x00060005,
1410 0x3c058, 0xffffffff, 0x00090008,
1411 0x3c05c, 0xffffffff, 0x00010000,
1412 0x3c060, 0xffffffff, 0x00030002,
1413 0x3c064, 0xffffffff, 0x00040007,
1414 0x3c068, 0xffffffff, 0x00060005,
1415 0x3c06c, 0xffffffff, 0x00090008,
1416 0x3c070, 0xffffffff, 0x00010000,
1417 0x3c074, 0xffffffff, 0x00030002,
1418 0x3c078, 0xffffffff, 0x00040007,
1419 0x3c07c, 0xffffffff, 0x00060005,
1420 0x3c080, 0xffffffff, 0x00090008,
1421 0x3c084, 0xffffffff, 0x00010000,
1422 0x3c088, 0xffffffff, 0x00030002,
1423 0x3c08c, 0xffffffff, 0x00040007,
1424 0x3c090, 0xffffffff, 0x00060005,
1425 0x3c094, 0xffffffff, 0x00090008,
1426 0x3c098, 0xffffffff, 0x00010000,
1427 0x3c09c, 0xffffffff, 0x00030002,
1428 0x3c0a0, 0xffffffff, 0x00040007,
1429 0x3c0a4, 0xffffffff, 0x00060005,
1430 0x3c0a8, 0xffffffff, 0x00090008,
1431 0x3c0ac, 0xffffffff, 0x00010000,
1432 0x3c0b0, 0xffffffff, 0x00030002,
1433 0x3c0b4, 0xffffffff, 0x00040007,
1434 0x3c0b8, 0xffffffff, 0x00060005,
1435 0x3c0bc, 0xffffffff, 0x00090008,
1436 0x3c0c0, 0xffffffff, 0x00010000,
1437 0x3c0c4, 0xffffffff, 0x00030002,
1438 0x3c0c8, 0xffffffff, 0x00040007,
1439 0x3c0cc, 0xffffffff, 0x00060005,
1440 0x3c0d0, 0xffffffff, 0x00090008,
1441 0x3c0d4, 0xffffffff, 0x00010000,
1442 0x3c0d8, 0xffffffff, 0x00030002,
1443 0x3c0dc, 0xffffffff, 0x00040007,
1444 0x3c0e0, 0xffffffff, 0x00060005,
1445 0x3c0e4, 0xffffffff, 0x00090008,
1446 0x3c0e8, 0xffffffff, 0x00010000,
1447 0x3c0ec, 0xffffffff, 0x00030002,
1448 0x3c0f0, 0xffffffff, 0x00040007,
1449 0x3c0f4, 0xffffffff, 0x00060005,
1450 0x3c0f8, 0xffffffff, 0x00090008,
1451 0xc318, 0xffffffff, 0x00020200,
1452 0x3350, 0xffffffff, 0x00000200,
1453 0x15c0, 0xffffffff, 0x00000400,
1454 0x55e8, 0xffffffff, 0x00000000,
1455 0x2f50, 0xffffffff, 0x00000902,
1456 0x3c000, 0xffffffff, 0x96940200,
1457 0x8708, 0xffffffff, 0x00900100,
1458 0xc424, 0xffffffff, 0x0020003f,
1459 0x38, 0xffffffff, 0x0140001c,
1460 0x3c, 0x000f0000, 0x000f0000,
1461 0x220, 0xffffffff, 0xc060000c,
1462 0x224, 0xc0000fff, 0x00000100,
1463 0xf90, 0xffffffff, 0x00000100,
1464 0xf98, 0x00000101, 0x00000000,
1465 0x20a8, 0xffffffff, 0x00000104,
1466 0x55e4, 0xff000fff, 0x00000100,
1467 0x30cc, 0xc0000fff, 0x00000104,
1468 0xc1e4, 0x00000001, 0x00000001,
1469 0xd00c, 0xff000ff0, 0x00000100,
1470 0xd80c, 0xff000ff0, 0x00000100
1473 static void cik_init_golden_registers(struct radeon_device *rdev)
1475 switch (rdev->family) {
1477 radeon_program_register_sequence(rdev,
1478 bonaire_mgcg_cgcg_init,
1479 (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
1480 radeon_program_register_sequence(rdev,
1481 bonaire_golden_registers,
1482 (const u32)ARRAY_SIZE(bonaire_golden_registers));
1483 radeon_program_register_sequence(rdev,
1484 bonaire_golden_common_registers,
1485 (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
1486 radeon_program_register_sequence(rdev,
1487 bonaire_golden_spm_registers,
1488 (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
1491 radeon_program_register_sequence(rdev,
1492 kalindi_mgcg_cgcg_init,
1493 (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
1494 radeon_program_register_sequence(rdev,
1495 kalindi_golden_registers,
1496 (const u32)ARRAY_SIZE(kalindi_golden_registers));
1497 radeon_program_register_sequence(rdev,
1498 kalindi_golden_common_registers,
1499 (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
1500 radeon_program_register_sequence(rdev,
1501 kalindi_golden_spm_registers,
1502 (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
1505 radeon_program_register_sequence(rdev,
1506 spectre_mgcg_cgcg_init,
1507 (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
1508 radeon_program_register_sequence(rdev,
1509 spectre_golden_registers,
1510 (const u32)ARRAY_SIZE(spectre_golden_registers));
1511 radeon_program_register_sequence(rdev,
1512 spectre_golden_common_registers,
1513 (const u32)ARRAY_SIZE(spectre_golden_common_registers));
1514 radeon_program_register_sequence(rdev,
1515 spectre_golden_spm_registers,
1516 (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
1519 radeon_program_register_sequence(rdev,
1520 hawaii_mgcg_cgcg_init,
1521 (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
1522 radeon_program_register_sequence(rdev,
1523 hawaii_golden_registers,
1524 (const u32)ARRAY_SIZE(hawaii_golden_registers));
1525 radeon_program_register_sequence(rdev,
1526 hawaii_golden_common_registers,
1527 (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
1528 radeon_program_register_sequence(rdev,
1529 hawaii_golden_spm_registers,
1530 (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
1538 * cik_get_xclk - get the xclk
1540 * @rdev: radeon_device pointer
1542 * Returns the reference clock used by the gfx engine
1545 u32 cik_get_xclk(struct radeon_device *rdev)
1547 u32 reference_clock = rdev->clock.spll.reference_freq;
1549 if (rdev->flags & RADEON_IS_IGP) {
1550 if (RREG32_SMC(GENERAL_PWRMGT) & GPU_COUNTER_CLK)
1551 return reference_clock / 2;
1553 if (RREG32_SMC(CG_CLKPIN_CNTL) & XTALIN_DIVIDE)
1554 return reference_clock / 4;
1556 return reference_clock;
1560 * cik_mm_rdoorbell - read a doorbell dword
1562 * @rdev: radeon_device pointer
1563 * @offset: byte offset into the aperture
1565 * Returns the value in the doorbell aperture at the
1566 * requested offset (CIK).
1568 u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
1570 if (offset < rdev->doorbell.size) {
1571 return readl(((void __iomem *)rdev->doorbell.ptr) + offset);
1573 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset);
1579 * cik_mm_wdoorbell - write a doorbell dword
1581 * @rdev: radeon_device pointer
1582 * @offset: byte offset into the aperture
1583 * @v: value to write
1585 * Writes @v to the doorbell aperture at the
1586 * requested offset (CIK).
1588 void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v)
1590 if (offset < rdev->doorbell.size) {
1591 writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset);
1593 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset);
1597 #define BONAIRE_IO_MC_REGS_SIZE 36
1599 static const u32 bonaire_io_mc_regs[BONAIRE_IO_MC_REGS_SIZE][2] =
1601 {0x00000070, 0x04400000},
1602 {0x00000071, 0x80c01803},
1603 {0x00000072, 0x00004004},
1604 {0x00000073, 0x00000100},
1605 {0x00000074, 0x00ff0000},
1606 {0x00000075, 0x34000000},
1607 {0x00000076, 0x08000014},
1608 {0x00000077, 0x00cc08ec},
1609 {0x00000078, 0x00000400},
1610 {0x00000079, 0x00000000},
1611 {0x0000007a, 0x04090000},
1612 {0x0000007c, 0x00000000},
1613 {0x0000007e, 0x4408a8e8},
1614 {0x0000007f, 0x00000304},
1615 {0x00000080, 0x00000000},
1616 {0x00000082, 0x00000001},
1617 {0x00000083, 0x00000002},
1618 {0x00000084, 0xf3e4f400},
1619 {0x00000085, 0x052024e3},
1620 {0x00000087, 0x00000000},
1621 {0x00000088, 0x01000000},
1622 {0x0000008a, 0x1c0a0000},
1623 {0x0000008b, 0xff010000},
1624 {0x0000008d, 0xffffefff},
1625 {0x0000008e, 0xfff3efff},
1626 {0x0000008f, 0xfff3efbf},
1627 {0x00000092, 0xf7ffffff},
1628 {0x00000093, 0xffffff7f},
1629 {0x00000095, 0x00101101},
1630 {0x00000096, 0x00000fff},
1631 {0x00000097, 0x00116fff},
1632 {0x00000098, 0x60010000},
1633 {0x00000099, 0x10010000},
1634 {0x0000009a, 0x00006000},
1635 {0x0000009b, 0x00001000},
1636 {0x0000009f, 0x00b48000}
1639 #define HAWAII_IO_MC_REGS_SIZE 22
1641 static const u32 hawaii_io_mc_regs[HAWAII_IO_MC_REGS_SIZE][2] =
1643 {0x0000007d, 0x40000000},
1644 {0x0000007e, 0x40180304},
1645 {0x0000007f, 0x0000ff00},
1646 {0x00000081, 0x00000000},
1647 {0x00000083, 0x00000800},
1648 {0x00000086, 0x00000000},
1649 {0x00000087, 0x00000100},
1650 {0x00000088, 0x00020100},
1651 {0x00000089, 0x00000000},
1652 {0x0000008b, 0x00040000},
1653 {0x0000008c, 0x00000100},
1654 {0x0000008e, 0xff010000},
1655 {0x00000090, 0xffffefff},
1656 {0x00000091, 0xfff3efff},
1657 {0x00000092, 0xfff3efbf},
1658 {0x00000093, 0xf7ffffff},
1659 {0x00000094, 0xffffff7f},
1660 {0x00000095, 0x00000fff},
1661 {0x00000096, 0x00116fff},
1662 {0x00000097, 0x60010000},
1663 {0x00000098, 0x10010000},
1664 {0x0000009f, 0x00c79000}
1669 * cik_srbm_select - select specific register instances
1671 * @rdev: radeon_device pointer
1672 * @me: selected ME (micro engine)
1677 * Switches the currently active registers instances. Some
1678 * registers are instanced per VMID, others are instanced per
1679 * me/pipe/queue combination.
1681 static void cik_srbm_select(struct radeon_device *rdev,
1682 u32 me, u32 pipe, u32 queue, u32 vmid)
1684 u32 srbm_gfx_cntl = (PIPEID(pipe & 0x3) |
1687 QUEUEID(queue & 0x7));
1688 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl);
1693 * ci_mc_load_microcode - load MC ucode into the hw
1695 * @rdev: radeon_device pointer
1697 * Load the GDDR MC ucode into the hw (CIK).
1698 * Returns 0 on success, error on failure.
1700 static int ci_mc_load_microcode(struct radeon_device *rdev)
1702 const __be32 *fw_data;
1703 u32 running, blackout = 0;
1705 int i, ucode_size, regs_size;
1710 switch (rdev->family) {
1712 io_mc_regs = (u32 *)&bonaire_io_mc_regs;
1713 ucode_size = CIK_MC_UCODE_SIZE;
1714 regs_size = BONAIRE_IO_MC_REGS_SIZE;
1717 io_mc_regs = (u32 *)&hawaii_io_mc_regs;
1718 ucode_size = HAWAII_MC_UCODE_SIZE;
1719 regs_size = HAWAII_IO_MC_REGS_SIZE;
1725 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
1729 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
1730 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
1733 /* reset the engine and set to writable */
1734 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1735 WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
1737 /* load mc io regs */
1738 for (i = 0; i < regs_size; i++) {
1739 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
1740 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
1742 /* load the MC ucode */
1743 fw_data = (const __be32 *)rdev->mc_fw->data;
1744 for (i = 0; i < ucode_size; i++)
1745 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
1747 /* put the engine back into the active state */
1748 WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1749 WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
1750 WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
1752 /* wait for training to complete */
1753 for (i = 0; i < rdev->usec_timeout; i++) {
1754 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
1758 for (i = 0; i < rdev->usec_timeout; i++) {
1759 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
1765 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
1772 * cik_init_microcode - load ucode images from disk
1774 * @rdev: radeon_device pointer
1776 * Use the firmware interface to load the ucode images into
1777 * the driver (not loaded into hw).
1778 * Returns 0 on success, error on failure.
1780 static int cik_init_microcode(struct radeon_device *rdev)
1782 const char *chip_name;
1783 size_t pfp_req_size, me_req_size, ce_req_size,
1784 mec_req_size, rlc_req_size, mc_req_size = 0,
1785 sdma_req_size, smc_req_size = 0;
1791 switch (rdev->family) {
1793 chip_name = "BONAIRE";
1794 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1795 me_req_size = CIK_ME_UCODE_SIZE * 4;
1796 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1797 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1798 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
1799 mc_req_size = CIK_MC_UCODE_SIZE * 4;
1800 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1801 smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
1804 chip_name = "HAWAII";
1805 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1806 me_req_size = CIK_ME_UCODE_SIZE * 4;
1807 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1808 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1809 rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
1810 mc_req_size = HAWAII_MC_UCODE_SIZE * 4;
1811 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1812 smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
1815 chip_name = "KAVERI";
1816 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1817 me_req_size = CIK_ME_UCODE_SIZE * 4;
1818 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1819 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1820 rlc_req_size = KV_RLC_UCODE_SIZE * 4;
1821 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1824 chip_name = "KABINI";
1825 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
1826 me_req_size = CIK_ME_UCODE_SIZE * 4;
1827 ce_req_size = CIK_CE_UCODE_SIZE * 4;
1828 mec_req_size = CIK_MEC_UCODE_SIZE * 4;
1829 rlc_req_size = KB_RLC_UCODE_SIZE * 4;
1830 sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
1835 DRM_INFO("Loading %s Microcode\n", chip_name);
1837 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1838 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1841 if (rdev->pfp_fw->size != pfp_req_size) {
1843 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
1844 rdev->pfp_fw->size, fw_name);
1849 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
1850 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1853 if (rdev->me_fw->size != me_req_size) {
1855 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
1856 rdev->me_fw->size, fw_name);
1860 snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
1861 err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1864 if (rdev->ce_fw->size != ce_req_size) {
1866 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
1867 rdev->ce_fw->size, fw_name);
1871 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
1872 err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
1875 if (rdev->mec_fw->size != mec_req_size) {
1877 "cik_cp: Bogus length %zu in firmware \"%s\"\n",
1878 rdev->mec_fw->size, fw_name);
1882 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
1883 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1886 if (rdev->rlc_fw->size != rlc_req_size) {
1888 "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
1889 rdev->rlc_fw->size, fw_name);
1893 snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
1894 err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
1897 if (rdev->sdma_fw->size != sdma_req_size) {
1899 "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
1900 rdev->sdma_fw->size, fw_name);
1904 /* No SMC, MC ucode on APUs */
1905 if (!(rdev->flags & RADEON_IS_IGP)) {
1906 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
1907 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1910 if (rdev->mc_fw->size != mc_req_size) {
1912 "cik_mc: Bogus length %zu in firmware \"%s\"\n",
1913 rdev->mc_fw->size, fw_name);
1917 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
1918 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1921 "smc: error loading firmware \"%s\"\n",
1923 release_firmware(rdev->smc_fw);
1924 rdev->smc_fw = NULL;
1926 } else if (rdev->smc_fw->size != smc_req_size) {
1928 "cik_smc: Bogus length %zu in firmware \"%s\"\n",
1929 rdev->smc_fw->size, fw_name);
1938 "cik_cp: Failed to load firmware \"%s\"\n",
1940 release_firmware(rdev->pfp_fw);
1941 rdev->pfp_fw = NULL;
1942 release_firmware(rdev->me_fw);
1944 release_firmware(rdev->ce_fw);
1946 release_firmware(rdev->rlc_fw);
1947 rdev->rlc_fw = NULL;
1948 release_firmware(rdev->mc_fw);
1950 release_firmware(rdev->smc_fw);
1951 rdev->smc_fw = NULL;
1960 * cik_tiling_mode_table_init - init the hw tiling table
1962 * @rdev: radeon_device pointer
1964 * Starting with SI, the tiling setup is done globally in a
1965 * set of 32 tiling modes. Rather than selecting each set of
1966 * parameters per surface as on older asics, we just select
1967 * which index in the tiling table we want to use, and the
1968 * surface uses those parameters (CIK).
1970 static void cik_tiling_mode_table_init(struct radeon_device *rdev)
1972 const u32 num_tile_mode_states = 32;
1973 const u32 num_secondary_tile_mode_states = 16;
1974 u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
1975 u32 num_pipe_configs;
1976 u32 num_rbs = rdev->config.cik.max_backends_per_se *
1977 rdev->config.cik.max_shader_engines;
1979 switch (rdev->config.cik.mem_row_size_in_kb) {
1981 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
1985 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
1988 split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
1992 num_pipe_configs = rdev->config.cik.max_tile_pipes;
1993 if (num_pipe_configs > 8)
1994 num_pipe_configs = 16;
1996 if (num_pipe_configs == 16) {
1997 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1998 switch (reg_offset) {
2000 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2001 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2002 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2003 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2006 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2007 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2008 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2009 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2012 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2013 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2014 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2015 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2018 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2019 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2020 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2021 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2024 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2025 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2026 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2027 TILE_SPLIT(split_equal_to_row_size));
2030 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2031 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2034 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2035 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2036 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2037 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2040 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2041 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2042 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2043 TILE_SPLIT(split_equal_to_row_size));
2046 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2047 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
2050 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2051 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2054 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2055 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2056 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2057 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2060 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2061 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2062 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2063 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2066 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2067 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2068 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2069 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2072 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2073 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2076 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2077 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2078 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2079 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2082 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2083 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2084 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2085 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2088 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2089 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2090 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2091 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2094 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2095 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2098 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2099 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2100 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2101 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2104 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2105 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2106 PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) |
2107 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2110 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2111 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2112 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2113 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2119 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2120 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2122 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2123 switch (reg_offset) {
2125 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2126 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2127 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2128 NUM_BANKS(ADDR_SURF_16_BANK));
2131 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2132 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2133 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2134 NUM_BANKS(ADDR_SURF_16_BANK));
2137 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2138 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2139 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2140 NUM_BANKS(ADDR_SURF_16_BANK));
2143 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2144 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2145 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2146 NUM_BANKS(ADDR_SURF_16_BANK));
2149 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2150 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2151 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2152 NUM_BANKS(ADDR_SURF_8_BANK));
2155 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2156 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2157 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2158 NUM_BANKS(ADDR_SURF_4_BANK));
2161 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2162 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2163 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2164 NUM_BANKS(ADDR_SURF_2_BANK));
2167 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2168 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2169 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2170 NUM_BANKS(ADDR_SURF_16_BANK));
2173 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2174 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2175 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2176 NUM_BANKS(ADDR_SURF_16_BANK));
2179 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2180 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2181 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2182 NUM_BANKS(ADDR_SURF_16_BANK));
2185 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2186 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2187 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2188 NUM_BANKS(ADDR_SURF_8_BANK));
2191 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2192 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2193 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2194 NUM_BANKS(ADDR_SURF_4_BANK));
2197 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2198 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2199 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2200 NUM_BANKS(ADDR_SURF_2_BANK));
2203 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2204 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2205 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2206 NUM_BANKS(ADDR_SURF_2_BANK));
2212 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2214 } else if (num_pipe_configs == 8) {
2215 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2216 switch (reg_offset) {
2218 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2219 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2220 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2221 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2224 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2225 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2226 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2227 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2230 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2231 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2232 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2233 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2236 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2237 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2238 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2239 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2242 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2243 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2244 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2245 TILE_SPLIT(split_equal_to_row_size));
2248 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2249 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2252 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2253 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2254 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2255 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2258 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2259 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2260 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2261 TILE_SPLIT(split_equal_to_row_size));
2264 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2265 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2268 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2269 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2272 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2273 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2274 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2275 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2278 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2279 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2280 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2281 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2284 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2285 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2286 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2287 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2290 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2291 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2294 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2295 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2296 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2297 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2300 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2301 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2302 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2303 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2306 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2307 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2308 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2309 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2312 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2313 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2316 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2317 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2318 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2319 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2322 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2323 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2324 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2325 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2328 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2329 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2330 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2331 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2337 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2338 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2340 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2341 switch (reg_offset) {
2343 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2344 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2345 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2346 NUM_BANKS(ADDR_SURF_16_BANK));
2349 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2350 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2351 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2352 NUM_BANKS(ADDR_SURF_16_BANK));
2355 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2356 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2357 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2358 NUM_BANKS(ADDR_SURF_16_BANK));
2361 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2362 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2363 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2364 NUM_BANKS(ADDR_SURF_16_BANK));
2367 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2368 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2369 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2370 NUM_BANKS(ADDR_SURF_8_BANK));
2373 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2374 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2375 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2376 NUM_BANKS(ADDR_SURF_4_BANK));
2379 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2380 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2381 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2382 NUM_BANKS(ADDR_SURF_2_BANK));
2385 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2386 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2387 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2388 NUM_BANKS(ADDR_SURF_16_BANK));
2391 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2392 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2393 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2394 NUM_BANKS(ADDR_SURF_16_BANK));
2397 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2398 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2399 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2400 NUM_BANKS(ADDR_SURF_16_BANK));
2403 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2404 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2405 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2406 NUM_BANKS(ADDR_SURF_16_BANK));
2409 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2410 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2411 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2412 NUM_BANKS(ADDR_SURF_8_BANK));
2415 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2416 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2417 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2418 NUM_BANKS(ADDR_SURF_4_BANK));
2421 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2422 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2423 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2424 NUM_BANKS(ADDR_SURF_2_BANK));
2430 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2432 } else if (num_pipe_configs == 4) {
2434 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2435 switch (reg_offset) {
2437 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2438 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2439 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2440 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2443 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2444 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2445 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2446 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2449 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2450 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2451 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2452 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2455 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2456 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2457 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2458 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2461 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2462 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2463 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2464 TILE_SPLIT(split_equal_to_row_size));
2467 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2468 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2471 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2472 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2473 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2474 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2477 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2478 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2479 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2480 TILE_SPLIT(split_equal_to_row_size));
2483 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2484 PIPE_CONFIG(ADDR_SURF_P4_16x16));
2487 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2488 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2491 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2492 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2493 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2494 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2497 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2498 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2499 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2500 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2503 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2504 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2505 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2506 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2509 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2510 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2513 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2514 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2515 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2516 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2519 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2520 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2521 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2522 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2525 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2526 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2527 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2528 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2531 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2532 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2535 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2536 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2537 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2538 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2541 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2542 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2543 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2544 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2547 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2548 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2549 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2550 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2556 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2557 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2559 } else if (num_rbs < 4) {
2560 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2561 switch (reg_offset) {
2563 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2564 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2565 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2566 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2569 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2570 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2571 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2572 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2575 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2576 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2577 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2578 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2581 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2582 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2583 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2584 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2587 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2588 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2589 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2590 TILE_SPLIT(split_equal_to_row_size));
2593 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2594 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2597 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2598 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2599 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2600 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2603 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2604 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2605 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2606 TILE_SPLIT(split_equal_to_row_size));
2609 gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2610 PIPE_CONFIG(ADDR_SURF_P4_8x16));
2613 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2614 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2617 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2618 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2619 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2620 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2623 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2624 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2625 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2626 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2629 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2630 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2631 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2632 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2635 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2636 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2639 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2640 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2641 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2642 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2645 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2646 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2647 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2648 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2651 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2652 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2653 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2654 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2657 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2658 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2661 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2662 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2663 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2664 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2667 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2668 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2669 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2670 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2673 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2674 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2675 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2676 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2682 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2683 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2686 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2687 switch (reg_offset) {
2689 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2690 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2691 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2692 NUM_BANKS(ADDR_SURF_16_BANK));
2695 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2696 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2697 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2698 NUM_BANKS(ADDR_SURF_16_BANK));
2701 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2702 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2703 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2704 NUM_BANKS(ADDR_SURF_16_BANK));
2707 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2708 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2709 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2710 NUM_BANKS(ADDR_SURF_16_BANK));
2713 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2714 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2715 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2716 NUM_BANKS(ADDR_SURF_16_BANK));
2719 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2720 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2721 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2722 NUM_BANKS(ADDR_SURF_8_BANK));
2725 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2726 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2727 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2728 NUM_BANKS(ADDR_SURF_4_BANK));
2731 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2732 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2733 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2734 NUM_BANKS(ADDR_SURF_16_BANK));
2737 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2738 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2739 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2740 NUM_BANKS(ADDR_SURF_16_BANK));
2743 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2744 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2745 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2746 NUM_BANKS(ADDR_SURF_16_BANK));
2749 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2750 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2751 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2752 NUM_BANKS(ADDR_SURF_16_BANK));
2755 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2756 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2757 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2758 NUM_BANKS(ADDR_SURF_16_BANK));
2761 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2762 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2763 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2764 NUM_BANKS(ADDR_SURF_8_BANK));
2767 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2768 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2769 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2770 NUM_BANKS(ADDR_SURF_4_BANK));
2776 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2778 } else if (num_pipe_configs == 2) {
2779 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2780 switch (reg_offset) {
2782 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2783 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2784 PIPE_CONFIG(ADDR_SURF_P2) |
2785 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B));
2788 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2789 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2790 PIPE_CONFIG(ADDR_SURF_P2) |
2791 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B));
2794 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2795 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2796 PIPE_CONFIG(ADDR_SURF_P2) |
2797 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2800 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2801 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2802 PIPE_CONFIG(ADDR_SURF_P2) |
2803 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B));
2806 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2807 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2808 PIPE_CONFIG(ADDR_SURF_P2) |
2809 TILE_SPLIT(split_equal_to_row_size));
2812 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2813 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2816 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2817 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2818 PIPE_CONFIG(ADDR_SURF_P2) |
2819 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B));
2822 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2823 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) |
2824 PIPE_CONFIG(ADDR_SURF_P2) |
2825 TILE_SPLIT(split_equal_to_row_size));
2828 gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED);
2831 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2832 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING));
2835 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2836 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2837 PIPE_CONFIG(ADDR_SURF_P2) |
2838 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2841 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2842 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2843 PIPE_CONFIG(ADDR_SURF_P2) |
2844 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2847 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2848 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2849 PIPE_CONFIG(ADDR_SURF_P2) |
2850 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2853 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2854 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING));
2857 gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2858 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2859 PIPE_CONFIG(ADDR_SURF_P2) |
2860 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2863 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2864 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2865 PIPE_CONFIG(ADDR_SURF_P2) |
2866 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2869 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2870 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2871 PIPE_CONFIG(ADDR_SURF_P2) |
2872 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2875 gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2876 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING));
2879 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2880 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2881 PIPE_CONFIG(ADDR_SURF_P2) |
2882 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2885 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2886 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2887 PIPE_CONFIG(ADDR_SURF_P2) |
2888 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2891 gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) |
2892 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2893 PIPE_CONFIG(ADDR_SURF_P2) |
2894 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2900 rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden;
2901 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2903 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
2904 switch (reg_offset) {
2906 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2907 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2908 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2909 NUM_BANKS(ADDR_SURF_16_BANK));
2912 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2913 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2914 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2915 NUM_BANKS(ADDR_SURF_16_BANK));
2918 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2919 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2920 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2921 NUM_BANKS(ADDR_SURF_16_BANK));
2924 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2925 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2926 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2927 NUM_BANKS(ADDR_SURF_16_BANK));
2930 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2931 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2932 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2933 NUM_BANKS(ADDR_SURF_16_BANK));
2936 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2937 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2938 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2939 NUM_BANKS(ADDR_SURF_16_BANK));
2942 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2943 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2944 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2945 NUM_BANKS(ADDR_SURF_8_BANK));
2948 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2949 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2950 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2951 NUM_BANKS(ADDR_SURF_16_BANK));
2954 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2955 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2956 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2957 NUM_BANKS(ADDR_SURF_16_BANK));
2960 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2961 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2962 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2963 NUM_BANKS(ADDR_SURF_16_BANK));
2966 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2967 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2968 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2969 NUM_BANKS(ADDR_SURF_16_BANK));
2972 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2973 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2974 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2975 NUM_BANKS(ADDR_SURF_16_BANK));
2978 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2979 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2980 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2981 NUM_BANKS(ADDR_SURF_16_BANK));
2984 gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2985 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2986 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2987 NUM_BANKS(ADDR_SURF_8_BANK));
2993 WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2996 DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs);
3000 * cik_select_se_sh - select which SE, SH to address
3002 * @rdev: radeon_device pointer
3003 * @se_num: shader engine to address
3004 * @sh_num: sh block to address
3006 * Select which SE, SH combinations to address. Certain
3007 * registers are instanced per SE or SH. 0xffffffff means
3008 * broadcast to all SEs or SHs (CIK).
3010 static void cik_select_se_sh(struct radeon_device *rdev,
3011 u32 se_num, u32 sh_num)
3013 u32 data = INSTANCE_BROADCAST_WRITES;
3015 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
3016 data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
3017 else if (se_num == 0xffffffff)
3018 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
3019 else if (sh_num == 0xffffffff)
3020 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
3022 data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
3023 WREG32(GRBM_GFX_INDEX, data);
3027 * cik_create_bitmask - create a bitmask
3029 * @bit_width: length of the mask
3031 * create a variable length bit mask (CIK).
3032 * Returns the bitmask.
3034 static u32 cik_create_bitmask(u32 bit_width)
3038 for (i = 0; i < bit_width; i++) {
3046 * cik_select_se_sh - select which SE, SH to address
3048 * @rdev: radeon_device pointer
3049 * @max_rb_num: max RBs (render backends) for the asic
3050 * @se_num: number of SEs (shader engines) for the asic
3051 * @sh_per_se: number of SH blocks per SE for the asic
3053 * Calculates the bitmask of disabled RBs (CIK).
3054 * Returns the disabled RB bitmask.
3056 static u32 cik_get_rb_disabled(struct radeon_device *rdev,
3057 u32 max_rb_num, u32 se_num,
3062 data = RREG32(CC_RB_BACKEND_DISABLE);
3064 data &= BACKEND_DISABLE_MASK;
3067 data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
3069 data >>= BACKEND_DISABLE_SHIFT;
3071 mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
3077 * cik_setup_rb - setup the RBs on the asic
3079 * @rdev: radeon_device pointer
3080 * @se_num: number of SEs (shader engines) for the asic
3081 * @sh_per_se: number of SH blocks per SE for the asic
3082 * @max_rb_num: max RBs (render backends) for the asic
3084 * Configures per-SE/SH RB registers (CIK).
3086 static void cik_setup_rb(struct radeon_device *rdev,
3087 u32 se_num, u32 sh_per_se,
3092 u32 disabled_rbs = 0;
3093 u32 enabled_rbs = 0;
3095 for (i = 0; i < se_num; i++) {
3096 for (j = 0; j < sh_per_se; j++) {
3097 cik_select_se_sh(rdev, i, j);
3098 data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
3099 if (rdev->family == CHIP_HAWAII)
3100 disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
3102 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
3105 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3108 for (i = 0; i < max_rb_num; i++) {
3109 if (!(disabled_rbs & mask))
3110 enabled_rbs |= mask;
3114 for (i = 0; i < se_num; i++) {
3115 cik_select_se_sh(rdev, i, 0xffffffff);
3117 for (j = 0; j < sh_per_se; j++) {
3118 switch (enabled_rbs & 3) {
3121 data |= PKR_MAP(RASTER_CONFIG_RB_MAP_3);
3123 data |= PKR_MAP(RASTER_CONFIG_RB_MAP_0);
3126 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
3129 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
3133 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
3138 WREG32(PA_SC_RASTER_CONFIG, data);
3140 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3144 * cik_gpu_init - setup the 3D engine
3146 * @rdev: radeon_device pointer
3148 * Configures the 3D engine and tiling configuration
3149 * registers so that the 3D engine is usable.
3151 static void cik_gpu_init(struct radeon_device *rdev)
3153 u32 gb_addr_config = RREG32(GB_ADDR_CONFIG);
3154 u32 mc_shared_chmap, mc_arb_ramcfg;
3155 u32 hdp_host_path_cntl;
3159 switch (rdev->family) {
3161 rdev->config.cik.max_shader_engines = 2;
3162 rdev->config.cik.max_tile_pipes = 4;
3163 rdev->config.cik.max_cu_per_sh = 7;
3164 rdev->config.cik.max_sh_per_se = 1;
3165 rdev->config.cik.max_backends_per_se = 2;
3166 rdev->config.cik.max_texture_channel_caches = 4;
3167 rdev->config.cik.max_gprs = 256;
3168 rdev->config.cik.max_gs_threads = 32;
3169 rdev->config.cik.max_hw_contexts = 8;
3171 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
3172 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
3173 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
3174 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
3175 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
3178 rdev->config.cik.max_shader_engines = 4;
3179 rdev->config.cik.max_tile_pipes = 16;
3180 rdev->config.cik.max_cu_per_sh = 11;
3181 rdev->config.cik.max_sh_per_se = 1;
3182 rdev->config.cik.max_backends_per_se = 4;
3183 rdev->config.cik.max_texture_channel_caches = 16;
3184 rdev->config.cik.max_gprs = 256;
3185 rdev->config.cik.max_gs_threads = 32;
3186 rdev->config.cik.max_hw_contexts = 8;
3188 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
3189 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
3190 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
3191 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
3192 gb_addr_config = HAWAII_GB_ADDR_CONFIG_GOLDEN;
3195 rdev->config.cik.max_shader_engines = 1;
3196 rdev->config.cik.max_tile_pipes = 4;
3197 if ((rdev->pdev->device == 0x1304) ||
3198 (rdev->pdev->device == 0x1305) ||
3199 (rdev->pdev->device == 0x130C) ||
3200 (rdev->pdev->device == 0x130F) ||
3201 (rdev->pdev->device == 0x1310) ||
3202 (rdev->pdev->device == 0x1311) ||
3203 (rdev->pdev->device == 0x131C)) {
3204 rdev->config.cik.max_cu_per_sh = 8;
3205 rdev->config.cik.max_backends_per_se = 2;
3206 } else if ((rdev->pdev->device == 0x1309) ||
3207 (rdev->pdev->device == 0x130A) ||
3208 (rdev->pdev->device == 0x130D) ||
3209 (rdev->pdev->device == 0x1313) ||
3210 (rdev->pdev->device == 0x131D)) {
3211 rdev->config.cik.max_cu_per_sh = 6;
3212 rdev->config.cik.max_backends_per_se = 2;
3213 } else if ((rdev->pdev->device == 0x1306) ||
3214 (rdev->pdev->device == 0x1307) ||
3215 (rdev->pdev->device == 0x130B) ||
3216 (rdev->pdev->device == 0x130E) ||
3217 (rdev->pdev->device == 0x1315) ||
3218 (rdev->pdev->device == 0x131B)) {
3219 rdev->config.cik.max_cu_per_sh = 4;
3220 rdev->config.cik.max_backends_per_se = 1;
3222 rdev->config.cik.max_cu_per_sh = 3;
3223 rdev->config.cik.max_backends_per_se = 1;
3225 rdev->config.cik.max_sh_per_se = 1;
3226 rdev->config.cik.max_texture_channel_caches = 4;
3227 rdev->config.cik.max_gprs = 256;
3228 rdev->config.cik.max_gs_threads = 16;
3229 rdev->config.cik.max_hw_contexts = 8;
3231 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
3232 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
3233 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
3234 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
3235 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
3239 rdev->config.cik.max_shader_engines = 1;
3240 rdev->config.cik.max_tile_pipes = 2;
3241 rdev->config.cik.max_cu_per_sh = 2;
3242 rdev->config.cik.max_sh_per_se = 1;
3243 rdev->config.cik.max_backends_per_se = 1;
3244 rdev->config.cik.max_texture_channel_caches = 2;
3245 rdev->config.cik.max_gprs = 256;
3246 rdev->config.cik.max_gs_threads = 16;
3247 rdev->config.cik.max_hw_contexts = 8;
3249 rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
3250 rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
3251 rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
3252 rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
3253 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
3257 /* Initialize HDP */
3258 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
3259 WREG32((0x2c14 + j), 0x00000000);
3260 WREG32((0x2c18 + j), 0x00000000);
3261 WREG32((0x2c1c + j), 0x00000000);
3262 WREG32((0x2c20 + j), 0x00000000);
3263 WREG32((0x2c24 + j), 0x00000000);
3266 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3268 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
3270 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
3271 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
3273 rdev->config.cik.num_tile_pipes = rdev->config.cik.max_tile_pipes;
3274 rdev->config.cik.mem_max_burst_length_bytes = 256;
3275 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
3276 rdev->config.cik.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
3277 if (rdev->config.cik.mem_row_size_in_kb > 4)
3278 rdev->config.cik.mem_row_size_in_kb = 4;
3279 /* XXX use MC settings? */
3280 rdev->config.cik.shader_engine_tile_size = 32;
3281 rdev->config.cik.num_gpus = 1;
3282 rdev->config.cik.multi_gpu_tile_size = 64;
3284 /* fix up row size */
3285 gb_addr_config &= ~ROW_SIZE_MASK;
3286 switch (rdev->config.cik.mem_row_size_in_kb) {
3289 gb_addr_config |= ROW_SIZE(0);
3292 gb_addr_config |= ROW_SIZE(1);
3295 gb_addr_config |= ROW_SIZE(2);
3299 /* setup tiling info dword. gb_addr_config is not adequate since it does
3300 * not have bank info, so create a custom tiling dword.
3301 * bits 3:0 num_pipes
3302 * bits 7:4 num_banks
3303 * bits 11:8 group_size
3304 * bits 15:12 row_size
3306 rdev->config.cik.tile_config = 0;
3307 switch (rdev->config.cik.num_tile_pipes) {
3309 rdev->config.cik.tile_config |= (0 << 0);
3312 rdev->config.cik.tile_config |= (1 << 0);
3315 rdev->config.cik.tile_config |= (2 << 0);
3319 /* XXX what about 12? */
3320 rdev->config.cik.tile_config |= (3 << 0);
3323 rdev->config.cik.tile_config |=
3324 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
3325 rdev->config.cik.tile_config |=
3326 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
3327 rdev->config.cik.tile_config |=
3328 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
3330 WREG32(GB_ADDR_CONFIG, gb_addr_config);
3331 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
3332 WREG32(DMIF_ADDR_CALC, gb_addr_config);
3333 WREG32(SDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, gb_addr_config & 0x70);
3334 WREG32(SDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, gb_addr_config & 0x70);
3335 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
3336 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
3337 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
3339 cik_tiling_mode_table_init(rdev);
3341 cik_setup_rb(rdev, rdev->config.cik.max_shader_engines,
3342 rdev->config.cik.max_sh_per_se,
3343 rdev->config.cik.max_backends_per_se);
3345 /* set HW defaults for 3D engine */
3346 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
3348 WREG32(SX_DEBUG_1, 0x20);
3350 WREG32(TA_CNTL_AUX, 0x00010000);
3352 tmp = RREG32(SPI_CONFIG_CNTL);
3354 WREG32(SPI_CONFIG_CNTL, tmp);
3356 WREG32(SQ_CONFIG, 1);
3358 WREG32(DB_DEBUG, 0);
3360 tmp = RREG32(DB_DEBUG2) & ~0xf00fffff;
3362 WREG32(DB_DEBUG2, tmp);
3364 tmp = RREG32(DB_DEBUG3) & ~0x0002021c;
3366 WREG32(DB_DEBUG3, tmp);
3368 tmp = RREG32(CB_HW_CONTROL) & ~0x00010000;
3370 WREG32(CB_HW_CONTROL, tmp);
3372 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
3374 WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_frontend) |
3375 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.cik.sc_prim_fifo_size_backend) |
3376 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cik.sc_hiz_tile_fifo_size) |
3377 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cik.sc_earlyz_tile_fifo_size)));
3379 WREG32(VGT_NUM_INSTANCES, 1);
3381 WREG32(CP_PERFMON_CNTL, 0);
3383 WREG32(SQ_CONFIG, 0);
3385 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
3386 FORCE_EOV_MAX_REZ_CNT(255)));
3388 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
3389 AUTO_INVLD_EN(ES_AND_GS_AUTO));
3391 WREG32(VGT_GS_VERTEX_REUSE, 16);
3392 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
3394 tmp = RREG32(HDP_MISC_CNTL);
3395 tmp |= HDP_FLUSH_INVALIDATE_CACHE;
3396 WREG32(HDP_MISC_CNTL, tmp);
3398 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
3399 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
3401 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
3402 WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
3408 * GPU scratch registers helpers function.
3411 * cik_scratch_init - setup driver info for CP scratch regs
3413 * @rdev: radeon_device pointer
3415 * Set up the number and offset of the CP scratch registers.
3416 * NOTE: use of CP scratch registers is a legacy inferface and
3417 * is not used by default on newer asics (r6xx+). On newer asics,
3418 * memory buffers are used for fences rather than scratch regs.
3420 static void cik_scratch_init(struct radeon_device *rdev)
3424 rdev->scratch.num_reg = 7;
3425 rdev->scratch.reg_base = SCRATCH_REG0;
3426 for (i = 0; i < rdev->scratch.num_reg; i++) {
3427 rdev->scratch.free[i] = true;
3428 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
3433 * cik_ring_test - basic gfx ring test
3435 * @rdev: radeon_device pointer
3436 * @ring: radeon_ring structure holding ring information
3438 * Allocate a scratch register and write to it using the gfx ring (CIK).
3439 * Provides a basic gfx ring test to verify that the ring is working.
3440 * Used by cik_cp_gfx_resume();
3441 * Returns 0 on success, error on failure.
3443 int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3450 r = radeon_scratch_get(rdev, &scratch);
3452 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
3455 WREG32(scratch, 0xCAFEDEAD);
3456 r = radeon_ring_lock(rdev, ring, 3);
3458 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
3459 radeon_scratch_free(rdev, scratch);
3462 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
3463 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));
3464 radeon_ring_write(ring, 0xDEADBEEF);
3465 radeon_ring_unlock_commit(rdev, ring);
3467 for (i = 0; i < rdev->usec_timeout; i++) {
3468 tmp = RREG32(scratch);
3469 if (tmp == 0xDEADBEEF)
3473 if (i < rdev->usec_timeout) {
3474 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
3476 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
3477 ring->idx, scratch, tmp);
3480 radeon_scratch_free(rdev, scratch);
3485 * cik_fence_gfx_ring_emit - emit a fence on the gfx ring
3487 * @rdev: radeon_device pointer
3488 * @fence: radeon fence object
3490 * Emits a fence sequnce number on the gfx ring and flushes
3493 void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
3494 struct radeon_fence *fence)
3496 struct radeon_ring *ring = &rdev->ring[fence->ring];
3497 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3499 /* EVENT_WRITE_EOP - flush caches, send int */
3500 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
3501 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
3503 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3505 radeon_ring_write(ring, addr & 0xfffffffc);
3506 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
3507 radeon_ring_write(ring, fence->seq);
3508 radeon_ring_write(ring, 0);
3510 /* We should be using the new WAIT_REG_MEM special op packet here
3511 * but it causes the CP to hang
3513 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3514 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3515 WRITE_DATA_DST_SEL(0)));
3516 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
3517 radeon_ring_write(ring, 0);
3518 radeon_ring_write(ring, 0);
3522 * cik_fence_compute_ring_emit - emit a fence on the compute ring
3524 * @rdev: radeon_device pointer
3525 * @fence: radeon fence object
3527 * Emits a fence sequnce number on the compute ring and flushes
3530 void cik_fence_compute_ring_emit(struct radeon_device *rdev,
3531 struct radeon_fence *fence)
3533 struct radeon_ring *ring = &rdev->ring[fence->ring];
3534 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3536 /* RELEASE_MEM - flush caches, send int */
3537 radeon_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
3538 radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
3540 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3542 radeon_ring_write(ring, DATA_SEL(1) | INT_SEL(2));
3543 radeon_ring_write(ring, addr & 0xfffffffc);
3544 radeon_ring_write(ring, upper_32_bits(addr));
3545 radeon_ring_write(ring, fence->seq);
3546 radeon_ring_write(ring, 0);
3548 /* We should be using the new WAIT_REG_MEM special op packet here
3549 * but it causes the CP to hang
3551 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3552 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3553 WRITE_DATA_DST_SEL(0)));
3554 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
3555 radeon_ring_write(ring, 0);
3556 radeon_ring_write(ring, 0);
3559 void cik_semaphore_ring_emit(struct radeon_device *rdev,
3560 struct radeon_ring *ring,
3561 struct radeon_semaphore *semaphore,
3564 uint64_t addr = semaphore->gpu_addr;
3565 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
3567 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
3568 radeon_ring_write(ring, addr & 0xffffffff);
3569 radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
3573 * cik_copy_cpdma - copy pages using the CP DMA engine
3575 * @rdev: radeon_device pointer
3576 * @src_offset: src GPU address
3577 * @dst_offset: dst GPU address
3578 * @num_gpu_pages: number of GPU pages to xfer
3579 * @fence: radeon fence object
3581 * Copy GPU paging using the CP DMA engine (CIK+).
3582 * Used by the radeon ttm implementation to move pages if
3583 * registered as the asic copy callback.
3585 int cik_copy_cpdma(struct radeon_device *rdev,
3586 uint64_t src_offset, uint64_t dst_offset,
3587 unsigned num_gpu_pages,
3588 struct radeon_fence **fence)
3590 struct radeon_semaphore *sem = NULL;
3591 int ring_index = rdev->asic->copy.blit_ring_index;
3592 struct radeon_ring *ring = &rdev->ring[ring_index];
3593 u32 size_in_bytes, cur_size_in_bytes, control;
3597 r = radeon_semaphore_create(rdev, &sem);
3599 DRM_ERROR("radeon: moving bo (%d).\n", r);
3603 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3604 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
3605 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
3607 DRM_ERROR("radeon: moving bo (%d).\n", r);
3608 radeon_semaphore_free(rdev, &sem, NULL);
3612 if (radeon_fence_need_sync(*fence, ring->idx)) {
3613 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3615 radeon_fence_note_sync(*fence, ring->idx);
3617 radeon_semaphore_free(rdev, &sem, NULL);
3620 for (i = 0; i < num_loops; i++) {
3621 cur_size_in_bytes = size_in_bytes;
3622 if (cur_size_in_bytes > 0x1fffff)
3623 cur_size_in_bytes = 0x1fffff;
3624 size_in_bytes -= cur_size_in_bytes;
3626 if (size_in_bytes == 0)
3627 control |= PACKET3_DMA_DATA_CP_SYNC;
3628 radeon_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
3629 radeon_ring_write(ring, control);
3630 radeon_ring_write(ring, lower_32_bits(src_offset));
3631 radeon_ring_write(ring, upper_32_bits(src_offset));
3632 radeon_ring_write(ring, lower_32_bits(dst_offset));
3633 radeon_ring_write(ring, upper_32_bits(dst_offset));
3634 radeon_ring_write(ring, cur_size_in_bytes);
3635 src_offset += cur_size_in_bytes;
3636 dst_offset += cur_size_in_bytes;
3639 r = radeon_fence_emit(rdev, fence, ring->idx);
3641 radeon_ring_unlock_undo(rdev, ring);
3645 radeon_ring_unlock_commit(rdev, ring);
3646 radeon_semaphore_free(rdev, &sem, *fence);
3655 * cik_ring_ib_execute - emit an IB (Indirect Buffer) on the gfx ring
3657 * @rdev: radeon_device pointer
3658 * @ib: radeon indirect buffer object
3660 * Emits an DE (drawing engine) or CE (constant engine) IB
3661 * on the gfx ring. IBs are usually generated by userspace
3662 * acceleration drivers and submitted to the kernel for
3663 * sheduling on the ring. This function schedules the IB
3664 * on the gfx ring for execution by the GPU.
3666 void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3668 struct radeon_ring *ring = &rdev->ring[ib->ring];
3669 u32 header, control = INDIRECT_BUFFER_VALID;
3671 if (ib->is_const_ib) {
3672 /* set switch buffer packet before const IB */
3673 radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3674 radeon_ring_write(ring, 0);
3676 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3679 if (ring->rptr_save_reg) {
3680 next_rptr = ring->wptr + 3 + 4;
3681 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
3682 radeon_ring_write(ring, ((ring->rptr_save_reg -
3683 PACKET3_SET_UCONFIG_REG_START) >> 2));
3684 radeon_ring_write(ring, next_rptr);
3685 } else if (rdev->wb.enabled) {
3686 next_rptr = ring->wptr + 5 + 4;
3687 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3688 radeon_ring_write(ring, WRITE_DATA_DST_SEL(1));
3689 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3690 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
3691 radeon_ring_write(ring, next_rptr);
3694 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3697 control |= ib->length_dw |
3698 (ib->vm ? (ib->vm->id << 24) : 0);
3700 radeon_ring_write(ring, header);
3701 radeon_ring_write(ring,
3705 (ib->gpu_addr & 0xFFFFFFFC));
3706 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
3707 radeon_ring_write(ring, control);
3711 * cik_ib_test - basic gfx ring IB test
3713 * @rdev: radeon_device pointer
3714 * @ring: radeon_ring structure holding ring information
3716 * Allocate an IB and execute it on the gfx ring (CIK).
3717 * Provides a basic gfx ring test to verify that IBs are working.
3718 * Returns 0 on success, error on failure.
3720 int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3722 struct radeon_ib ib;
3728 r = radeon_scratch_get(rdev, &scratch);
3730 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
3733 WREG32(scratch, 0xCAFEDEAD);
3734 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3736 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3737 radeon_scratch_free(rdev, scratch);
3740 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
3741 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);
3742 ib.ptr[2] = 0xDEADBEEF;
3744 r = radeon_ib_schedule(rdev, &ib, NULL);
3746 radeon_scratch_free(rdev, scratch);
3747 radeon_ib_free(rdev, &ib);
3748 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3751 r = radeon_fence_wait(ib.fence, false);
3753 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3754 radeon_scratch_free(rdev, scratch);
3755 radeon_ib_free(rdev, &ib);
3758 for (i = 0; i < rdev->usec_timeout; i++) {
3759 tmp = RREG32(scratch);
3760 if (tmp == 0xDEADBEEF)
3764 if (i < rdev->usec_timeout) {
3765 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3767 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3771 radeon_scratch_free(rdev, scratch);
3772 radeon_ib_free(rdev, &ib);
3778 * On CIK, gfx and compute now have independant command processors.
3781 * Gfx consists of a single ring and can process both gfx jobs and
3782 * compute jobs. The gfx CP consists of three microengines (ME):
3783 * PFP - Pre-Fetch Parser
3785 * CE - Constant Engine
3786 * The PFP and ME make up what is considered the Drawing Engine (DE).
3787 * The CE is an asynchronous engine used for updating buffer desciptors
3788 * used by the DE so that they can be loaded into cache in parallel
3789 * while the DE is processing state update packets.
3792 * The compute CP consists of two microengines (ME):
3793 * MEC1 - Compute MicroEngine 1
3794 * MEC2 - Compute MicroEngine 2
3795 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
3796 * The queues are exposed to userspace and are programmed directly
3797 * by the compute runtime.
3800 * cik_cp_gfx_enable - enable/disable the gfx CP MEs
3802 * @rdev: radeon_device pointer
3803 * @enable: enable or disable the MEs
3805 * Halts or unhalts the gfx MEs.
3807 static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
3810 WREG32(CP_ME_CNTL, 0);
3812 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
3813 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3819 * cik_cp_gfx_load_microcode - load the gfx CP ME ucode
3821 * @rdev: radeon_device pointer
3823 * Loads the gfx PFP, ME, and CE ucode.
3824 * Returns 0 for success, -EINVAL if the ucode is not available.
3826 static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
3828 const __be32 *fw_data;
3831 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
3834 cik_cp_gfx_enable(rdev, false);
3837 fw_data = (const __be32 *)rdev->pfp_fw->data;
3838 WREG32(CP_PFP_UCODE_ADDR, 0);
3839 for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
3840 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
3841 WREG32(CP_PFP_UCODE_ADDR, 0);
3844 fw_data = (const __be32 *)rdev->ce_fw->data;
3845 WREG32(CP_CE_UCODE_ADDR, 0);
3846 for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
3847 WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
3848 WREG32(CP_CE_UCODE_ADDR, 0);
3851 fw_data = (const __be32 *)rdev->me_fw->data;
3852 WREG32(CP_ME_RAM_WADDR, 0);
3853 for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
3854 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
3855 WREG32(CP_ME_RAM_WADDR, 0);
3857 WREG32(CP_PFP_UCODE_ADDR, 0);
3858 WREG32(CP_CE_UCODE_ADDR, 0);
3859 WREG32(CP_ME_RAM_WADDR, 0);
3860 WREG32(CP_ME_RAM_RADDR, 0);
3865 * cik_cp_gfx_start - start the gfx ring
3867 * @rdev: radeon_device pointer
3869 * Enables the ring and loads the clear state context and other
3870 * packets required to init the ring.
3871 * Returns 0 for success, error for failure.
3873 static int cik_cp_gfx_start(struct radeon_device *rdev)
3875 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3879 WREG32(CP_MAX_CONTEXT, rdev->config.cik.max_hw_contexts - 1);
3880 WREG32(CP_ENDIAN_SWAP, 0);
3881 WREG32(CP_DEVICE_ID, 1);
3883 cik_cp_gfx_enable(rdev, true);
3885 r = radeon_ring_lock(rdev, ring, cik_default_size + 17);
3887 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3891 /* init the CE partitions. CE only used for gfx on CIK */
3892 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3893 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3894 radeon_ring_write(ring, 0xc000);
3895 radeon_ring_write(ring, 0xc000);
3897 /* setup clear context state */
3898 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3899 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3901 radeon_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3902 radeon_ring_write(ring, 0x80000000);
3903 radeon_ring_write(ring, 0x80000000);
3905 for (i = 0; i < cik_default_size; i++)
3906 radeon_ring_write(ring, cik_default_state[i]);
3908 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3909 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3911 /* set clear context state */
3912 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3913 radeon_ring_write(ring, 0);
3915 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
3916 radeon_ring_write(ring, 0x00000316);
3917 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
3918 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
3920 radeon_ring_unlock_commit(rdev, ring);
3926 * cik_cp_gfx_fini - stop the gfx ring
3928 * @rdev: radeon_device pointer
3930 * Stop the gfx ring and tear down the driver ring
3933 static void cik_cp_gfx_fini(struct radeon_device *rdev)
3935 cik_cp_gfx_enable(rdev, false);
3936 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
3940 * cik_cp_gfx_resume - setup the gfx ring buffer registers
3942 * @rdev: radeon_device pointer
3944 * Program the location and size of the gfx ring buffer
3945 * and test it to make sure it's working.
3946 * Returns 0 for success, error for failure.
3948 static int cik_cp_gfx_resume(struct radeon_device *rdev)
3950 struct radeon_ring *ring;
3956 WREG32(CP_SEM_WAIT_TIMER, 0x0);
3957 if (rdev->family != CHIP_HAWAII)
3958 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3960 /* Set the write pointer delay */
3961 WREG32(CP_RB_WPTR_DELAY, 0);
3963 /* set the RB to use vmid 0 */
3964 WREG32(CP_RB_VMID, 0);
3966 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
3968 /* ring 0 - compute and gfx */
3969 /* Set ring buffer size */
3970 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3971 rb_bufsz = order_base_2(ring->ring_size / 8);
3972 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3974 tmp |= BUF_SWAP_32BIT;
3976 WREG32(CP_RB0_CNTL, tmp);
3978 /* Initialize the ring buffer's read and write pointers */
3979 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
3981 WREG32(CP_RB0_WPTR, ring->wptr);
3983 /* set the wb address wether it's enabled or not */
3984 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
3985 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
3987 /* scratch register shadowing is no longer supported */
3988 WREG32(SCRATCH_UMSK, 0);
3990 if (!rdev->wb.enabled)
3991 tmp |= RB_NO_UPDATE;
3994 WREG32(CP_RB0_CNTL, tmp);
3996 rb_addr = ring->gpu_addr >> 8;
3997 WREG32(CP_RB0_BASE, rb_addr);
3998 WREG32(CP_RB0_BASE_HI, upper_32_bits(rb_addr));
4000 ring->rptr = RREG32(CP_RB0_RPTR);
4002 /* start the ring */
4003 cik_cp_gfx_start(rdev);
4004 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
4005 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
4007 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
4013 u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
4014 struct radeon_ring *ring)
4020 if (rdev->wb.enabled) {
4021 rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
4023 mutex_lock(&rdev->srbm_mutex);
4024 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
4025 rptr = RREG32(CP_HQD_PQ_RPTR);
4026 cik_srbm_select(rdev, 0, 0, 0, 0);
4027 mutex_unlock(&rdev->srbm_mutex);
4033 u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
4034 struct radeon_ring *ring)
4038 if (rdev->wb.enabled) {
4039 wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
4041 mutex_lock(&rdev->srbm_mutex);
4042 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
4043 wptr = RREG32(CP_HQD_PQ_WPTR);
4044 cik_srbm_select(rdev, 0, 0, 0, 0);
4045 mutex_unlock(&rdev->srbm_mutex);
4051 void cik_compute_ring_set_wptr(struct radeon_device *rdev,
4052 struct radeon_ring *ring)
4054 rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
4055 WDOORBELL32(ring->doorbell_offset, ring->wptr);
4059 * cik_cp_compute_enable - enable/disable the compute CP MEs
4061 * @rdev: radeon_device pointer
4062 * @enable: enable or disable the MEs
4064 * Halts or unhalts the compute MEs.
4066 static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
4069 WREG32(CP_MEC_CNTL, 0);
4071 WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
4076 * cik_cp_compute_load_microcode - load the compute CP ME ucode
4078 * @rdev: radeon_device pointer
4080 * Loads the compute MEC1&2 ucode.
4081 * Returns 0 for success, -EINVAL if the ucode is not available.
4083 static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
4085 const __be32 *fw_data;
4091 cik_cp_compute_enable(rdev, false);
4094 fw_data = (const __be32 *)rdev->mec_fw->data;
4095 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
4096 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
4097 WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
4098 WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
4100 if (rdev->family == CHIP_KAVERI) {
4102 fw_data = (const __be32 *)rdev->mec_fw->data;
4103 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
4104 for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
4105 WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
4106 WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
4113 * cik_cp_compute_start - start the compute queues
4115 * @rdev: radeon_device pointer
4117 * Enable the compute queues.
4118 * Returns 0 for success, error for failure.
4120 static int cik_cp_compute_start(struct radeon_device *rdev)
4122 cik_cp_compute_enable(rdev, true);
4128 * cik_cp_compute_fini - stop the compute queues
4130 * @rdev: radeon_device pointer
4132 * Stop the compute queues and tear down the driver queue
4135 static void cik_cp_compute_fini(struct radeon_device *rdev)
4139 cik_cp_compute_enable(rdev, false);
4141 for (i = 0; i < 2; i++) {
4143 idx = CAYMAN_RING_TYPE_CP1_INDEX;
4145 idx = CAYMAN_RING_TYPE_CP2_INDEX;
4147 if (rdev->ring[idx].mqd_obj) {
4148 r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
4149 if (unlikely(r != 0))
4150 dev_warn(rdev->dev, "(%d) reserve MQD bo failed\n", r);
4152 radeon_bo_unpin(rdev->ring[idx].mqd_obj);
4153 radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
4155 radeon_bo_unref(&rdev->ring[idx].mqd_obj);
4156 rdev->ring[idx].mqd_obj = NULL;
4161 static void cik_mec_fini(struct radeon_device *rdev)
4165 if (rdev->mec.hpd_eop_obj) {
4166 r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
4167 if (unlikely(r != 0))
4168 dev_warn(rdev->dev, "(%d) reserve HPD EOP bo failed\n", r);
4169 radeon_bo_unpin(rdev->mec.hpd_eop_obj);
4170 radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
4172 radeon_bo_unref(&rdev->mec.hpd_eop_obj);
4173 rdev->mec.hpd_eop_obj = NULL;
4177 #define MEC_HPD_SIZE 2048
4179 static int cik_mec_init(struct radeon_device *rdev)
4185 * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
4186 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
4188 if (rdev->family == CHIP_KAVERI)
4189 rdev->mec.num_mec = 2;
4191 rdev->mec.num_mec = 1;
4192 rdev->mec.num_pipe = 4;
4193 rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
4195 if (rdev->mec.hpd_eop_obj == NULL) {
4196 r = radeon_bo_create(rdev,
4197 rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
4199 RADEON_GEM_DOMAIN_GTT, NULL,
4200 &rdev->mec.hpd_eop_obj);
4202 dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
4207 r = radeon_bo_reserve(rdev->mec.hpd_eop_obj, false);
4208 if (unlikely(r != 0)) {
4212 r = radeon_bo_pin(rdev->mec.hpd_eop_obj, RADEON_GEM_DOMAIN_GTT,
4213 &rdev->mec.hpd_eop_gpu_addr);
4215 dev_warn(rdev->dev, "(%d) pin HDP EOP bo failed\n", r);
4219 r = radeon_bo_kmap(rdev->mec.hpd_eop_obj, (void **)&hpd);
4221 dev_warn(rdev->dev, "(%d) map HDP EOP bo failed\n", r);
4226 /* clear memory. Not sure if this is required or not */
4227 memset(hpd, 0, rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2);
4229 radeon_bo_kunmap(rdev->mec.hpd_eop_obj);
4230 radeon_bo_unreserve(rdev->mec.hpd_eop_obj);
4235 struct hqd_registers
4237 u32 cp_mqd_base_addr;
4238 u32 cp_mqd_base_addr_hi;
4241 u32 cp_hqd_persistent_state;
4242 u32 cp_hqd_pipe_priority;
4243 u32 cp_hqd_queue_priority;
4246 u32 cp_hqd_pq_base_hi;
4248 u32 cp_hqd_pq_rptr_report_addr;
4249 u32 cp_hqd_pq_rptr_report_addr_hi;
4250 u32 cp_hqd_pq_wptr_poll_addr;
4251 u32 cp_hqd_pq_wptr_poll_addr_hi;
4252 u32 cp_hqd_pq_doorbell_control;
4254 u32 cp_hqd_pq_control;
4255 u32 cp_hqd_ib_base_addr;
4256 u32 cp_hqd_ib_base_addr_hi;
4258 u32 cp_hqd_ib_control;
4259 u32 cp_hqd_iq_timer;
4261 u32 cp_hqd_dequeue_request;
4262 u32 cp_hqd_dma_offload;
4263 u32 cp_hqd_sema_cmd;
4264 u32 cp_hqd_msg_type;
4265 u32 cp_hqd_atomic0_preop_lo;
4266 u32 cp_hqd_atomic0_preop_hi;
4267 u32 cp_hqd_atomic1_preop_lo;
4268 u32 cp_hqd_atomic1_preop_hi;
4269 u32 cp_hqd_hq_scheduler0;
4270 u32 cp_hqd_hq_scheduler1;
4277 u32 dispatch_initiator;
4281 u32 pipeline_stat_enable;
4282 u32 perf_counter_enable;
4288 u32 resource_limits;
4289 u32 static_thread_mgmt01[2];
4291 u32 static_thread_mgmt23[2];
4293 u32 thread_trace_enable;
4296 u32 vgtcs_invoke_count[2];
4297 struct hqd_registers queue_state;
4299 u32 interrupt_queue[64];
4303 * cik_cp_compute_resume - setup the compute queue registers
4305 * @rdev: radeon_device pointer
4307 * Program the compute queues and test them to make sure they
4309 * Returns 0 for success, error for failure.
4311 static int cik_cp_compute_resume(struct radeon_device *rdev)
4315 bool use_doorbell = true;
4321 struct bonaire_mqd *mqd;
4323 r = cik_cp_compute_start(rdev);
4327 /* fix up chicken bits */
4328 tmp = RREG32(CP_CPF_DEBUG);
4330 WREG32(CP_CPF_DEBUG, tmp);
4332 /* init the pipes */
4333 mutex_lock(&rdev->srbm_mutex);
4334 for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
4335 int me = (i < 4) ? 1 : 2;
4336 int pipe = (i < 4) ? i : (i - 4);
4338 eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2);
4340 cik_srbm_select(rdev, me, pipe, 0, 0);
4342 /* write the EOP addr */
4343 WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
4344 WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
4346 /* set the VMID assigned */
4347 WREG32(CP_HPD_EOP_VMID, 0);
4349 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4350 tmp = RREG32(CP_HPD_EOP_CONTROL);
4351 tmp &= ~EOP_SIZE_MASK;
4352 tmp |= order_base_2(MEC_HPD_SIZE / 8);
4353 WREG32(CP_HPD_EOP_CONTROL, tmp);
4355 cik_srbm_select(rdev, 0, 0, 0, 0);
4356 mutex_unlock(&rdev->srbm_mutex);
4358 /* init the queues. Just two for now. */
4359 for (i = 0; i < 2; i++) {
4361 idx = CAYMAN_RING_TYPE_CP1_INDEX;
4363 idx = CAYMAN_RING_TYPE_CP2_INDEX;
4365 if (rdev->ring[idx].mqd_obj == NULL) {
4366 r = radeon_bo_create(rdev,
4367 sizeof(struct bonaire_mqd),
4369 RADEON_GEM_DOMAIN_GTT, NULL,
4370 &rdev->ring[idx].mqd_obj);
4372 dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
4377 r = radeon_bo_reserve(rdev->ring[idx].mqd_obj, false);
4378 if (unlikely(r != 0)) {
4379 cik_cp_compute_fini(rdev);
4382 r = radeon_bo_pin(rdev->ring[idx].mqd_obj, RADEON_GEM_DOMAIN_GTT,
4385 dev_warn(rdev->dev, "(%d) pin MQD bo failed\n", r);
4386 cik_cp_compute_fini(rdev);
4389 r = radeon_bo_kmap(rdev->ring[idx].mqd_obj, (void **)&buf);
4391 dev_warn(rdev->dev, "(%d) map MQD bo failed\n", r);
4392 cik_cp_compute_fini(rdev);
4396 /* doorbell offset */
4397 rdev->ring[idx].doorbell_offset =
4398 (rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
4400 /* init the mqd struct */
4401 memset(buf, 0, sizeof(struct bonaire_mqd));
4403 mqd = (struct bonaire_mqd *)buf;
4404 mqd->header = 0xC0310800;
4405 mqd->static_thread_mgmt01[0] = 0xffffffff;
4406 mqd->static_thread_mgmt01[1] = 0xffffffff;
4407 mqd->static_thread_mgmt23[0] = 0xffffffff;
4408 mqd->static_thread_mgmt23[1] = 0xffffffff;
4410 mutex_lock(&rdev->srbm_mutex);
4411 cik_srbm_select(rdev, rdev->ring[idx].me,
4412 rdev->ring[idx].pipe,
4413 rdev->ring[idx].queue, 0);
4415 /* disable wptr polling */
4416 tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
4417 tmp &= ~WPTR_POLL_EN;
4418 WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
4420 /* enable doorbell? */
4421 mqd->queue_state.cp_hqd_pq_doorbell_control =
4422 RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
4424 mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
4426 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_EN;
4427 WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
4428 mqd->queue_state.cp_hqd_pq_doorbell_control);
4430 /* disable the queue if it's active */
4431 mqd->queue_state.cp_hqd_dequeue_request = 0;
4432 mqd->queue_state.cp_hqd_pq_rptr = 0;
4433 mqd->queue_state.cp_hqd_pq_wptr= 0;
4434 if (RREG32(CP_HQD_ACTIVE) & 1) {
4435 WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
4436 for (i = 0; i < rdev->usec_timeout; i++) {
4437 if (!(RREG32(CP_HQD_ACTIVE) & 1))
4441 WREG32(CP_HQD_DEQUEUE_REQUEST, mqd->queue_state.cp_hqd_dequeue_request);
4442 WREG32(CP_HQD_PQ_RPTR, mqd->queue_state.cp_hqd_pq_rptr);
4443 WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
4446 /* set the pointer to the MQD */
4447 mqd->queue_state.cp_mqd_base_addr = mqd_gpu_addr & 0xfffffffc;
4448 mqd->queue_state.cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
4449 WREG32(CP_MQD_BASE_ADDR, mqd->queue_state.cp_mqd_base_addr);
4450 WREG32(CP_MQD_BASE_ADDR_HI, mqd->queue_state.cp_mqd_base_addr_hi);
4451 /* set MQD vmid to 0 */
4452 mqd->queue_state.cp_mqd_control = RREG32(CP_MQD_CONTROL);
4453 mqd->queue_state.cp_mqd_control &= ~MQD_VMID_MASK;
4454 WREG32(CP_MQD_CONTROL, mqd->queue_state.cp_mqd_control);
4456 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4457 hqd_gpu_addr = rdev->ring[idx].gpu_addr >> 8;
4458 mqd->queue_state.cp_hqd_pq_base = hqd_gpu_addr;
4459 mqd->queue_state.cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4460 WREG32(CP_HQD_PQ_BASE, mqd->queue_state.cp_hqd_pq_base);
4461 WREG32(CP_HQD_PQ_BASE_HI, mqd->queue_state.cp_hqd_pq_base_hi);
4463 /* set up the HQD, this is similar to CP_RB0_CNTL */
4464 mqd->queue_state.cp_hqd_pq_control = RREG32(CP_HQD_PQ_CONTROL);
4465 mqd->queue_state.cp_hqd_pq_control &=
4466 ~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
4468 mqd->queue_state.cp_hqd_pq_control |=
4469 order_base_2(rdev->ring[idx].ring_size / 8);
4470 mqd->queue_state.cp_hqd_pq_control |=
4471 (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
4473 mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
4475 mqd->queue_state.cp_hqd_pq_control &=
4476 ~(UNORD_DISPATCH | ROQ_PQ_IB_FLIP | PQ_VOLATILE);
4477 mqd->queue_state.cp_hqd_pq_control |=
4478 PRIV_STATE | KMD_QUEUE; /* assuming kernel queue control */
4479 WREG32(CP_HQD_PQ_CONTROL, mqd->queue_state.cp_hqd_pq_control);
4481 /* only used if CP_PQ_WPTR_POLL_CNTL.WPTR_POLL_EN=1 */
4483 wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP1_WPTR_OFFSET;
4485 wb_gpu_addr = rdev->wb.gpu_addr + CIK_WB_CP2_WPTR_OFFSET;
4486 mqd->queue_state.cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc;
4487 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4488 WREG32(CP_HQD_PQ_WPTR_POLL_ADDR, mqd->queue_state.cp_hqd_pq_wptr_poll_addr);
4489 WREG32(CP_HQD_PQ_WPTR_POLL_ADDR_HI,
4490 mqd->queue_state.cp_hqd_pq_wptr_poll_addr_hi);
4492 /* set the wb address wether it's enabled or not */
4494 wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET;
4496 wb_gpu_addr = rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET;
4497 mqd->queue_state.cp_hqd_pq_rptr_report_addr = wb_gpu_addr & 0xfffffffc;
4498 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi =
4499 upper_32_bits(wb_gpu_addr) & 0xffff;
4500 WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR,
4501 mqd->queue_state.cp_hqd_pq_rptr_report_addr);
4502 WREG32(CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
4503 mqd->queue_state.cp_hqd_pq_rptr_report_addr_hi);
4505 /* enable the doorbell if requested */
4507 mqd->queue_state.cp_hqd_pq_doorbell_control =
4508 RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
4509 mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
4510 mqd->queue_state.cp_hqd_pq_doorbell_control |=
4511 DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4);
4512 mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
4513 mqd->queue_state.cp_hqd_pq_doorbell_control &=
4514 ~(DOORBELL_SOURCE | DOORBELL_HIT);
4517 mqd->queue_state.cp_hqd_pq_doorbell_control = 0;
4519 WREG32(CP_HQD_PQ_DOORBELL_CONTROL,
4520 mqd->queue_state.cp_hqd_pq_doorbell_control);
4522 /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4523 rdev->ring[idx].wptr = 0;
4524 mqd->queue_state.cp_hqd_pq_wptr = rdev->ring[idx].wptr;
4525 WREG32(CP_HQD_PQ_WPTR, mqd->queue_state.cp_hqd_pq_wptr);
4526 rdev->ring[idx].rptr = RREG32(CP_HQD_PQ_RPTR);
4527 mqd->queue_state.cp_hqd_pq_rptr = rdev->ring[idx].rptr;
4529 /* set the vmid for the queue */
4530 mqd->queue_state.cp_hqd_vmid = 0;
4531 WREG32(CP_HQD_VMID, mqd->queue_state.cp_hqd_vmid);
4533 /* activate the queue */
4534 mqd->queue_state.cp_hqd_active = 1;
4535 WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
4537 cik_srbm_select(rdev, 0, 0, 0, 0);
4538 mutex_unlock(&rdev->srbm_mutex);
4540 radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
4541 radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
4543 rdev->ring[idx].ready = true;
4544 r = radeon_ring_test(rdev, idx, &rdev->ring[idx]);
4546 rdev->ring[idx].ready = false;
4552 static void cik_cp_enable(struct radeon_device *rdev, bool enable)
4554 cik_cp_gfx_enable(rdev, enable);
4555 cik_cp_compute_enable(rdev, enable);
4558 static int cik_cp_load_microcode(struct radeon_device *rdev)
4562 r = cik_cp_gfx_load_microcode(rdev);
4565 r = cik_cp_compute_load_microcode(rdev);
4572 static void cik_cp_fini(struct radeon_device *rdev)
4574 cik_cp_gfx_fini(rdev);
4575 cik_cp_compute_fini(rdev);
4578 static int cik_cp_resume(struct radeon_device *rdev)
4582 cik_enable_gui_idle_interrupt(rdev, false);
4584 r = cik_cp_load_microcode(rdev);
4588 r = cik_cp_gfx_resume(rdev);
4591 r = cik_cp_compute_resume(rdev);
4595 cik_enable_gui_idle_interrupt(rdev, true);
4600 static void cik_print_gpu_status_regs(struct radeon_device *rdev)
4602 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
4603 RREG32(GRBM_STATUS));
4604 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
4605 RREG32(GRBM_STATUS2));
4606 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
4607 RREG32(GRBM_STATUS_SE0));
4608 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
4609 RREG32(GRBM_STATUS_SE1));
4610 dev_info(rdev->dev, " GRBM_STATUS_SE2=0x%08X\n",
4611 RREG32(GRBM_STATUS_SE2));
4612 dev_info(rdev->dev, " GRBM_STATUS_SE3=0x%08X\n",
4613 RREG32(GRBM_STATUS_SE3));
4614 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
4615 RREG32(SRBM_STATUS));
4616 dev_info(rdev->dev, " SRBM_STATUS2=0x%08X\n",
4617 RREG32(SRBM_STATUS2));
4618 dev_info(rdev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
4619 RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
4620 dev_info(rdev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
4621 RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
4622 dev_info(rdev->dev, " CP_STAT = 0x%08x\n", RREG32(CP_STAT));
4623 dev_info(rdev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
4624 RREG32(CP_STALLED_STAT1));
4625 dev_info(rdev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
4626 RREG32(CP_STALLED_STAT2));
4627 dev_info(rdev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
4628 RREG32(CP_STALLED_STAT3));
4629 dev_info(rdev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
4630 RREG32(CP_CPF_BUSY_STAT));
4631 dev_info(rdev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
4632 RREG32(CP_CPF_STALLED_STAT1));
4633 dev_info(rdev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(CP_CPF_STATUS));
4634 dev_info(rdev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(CP_CPC_BUSY_STAT));
4635 dev_info(rdev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
4636 RREG32(CP_CPC_STALLED_STAT1));
4637 dev_info(rdev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(CP_CPC_STATUS));
4641 * cik_gpu_check_soft_reset - check which blocks are busy
4643 * @rdev: radeon_device pointer
4645 * Check which blocks are busy and return the relevant reset
4646 * mask to be used by cik_gpu_soft_reset().
4647 * Returns a mask of the blocks to be reset.
4649 u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
4655 tmp = RREG32(GRBM_STATUS);
4656 if (tmp & (PA_BUSY | SC_BUSY |
4657 BCI_BUSY | SX_BUSY |
4658 TA_BUSY | VGT_BUSY |
4660 GDS_BUSY | SPI_BUSY |
4661 IA_BUSY | IA_BUSY_NO_DMA))
4662 reset_mask |= RADEON_RESET_GFX;
4664 if (tmp & (CP_BUSY | CP_COHERENCY_BUSY))
4665 reset_mask |= RADEON_RESET_CP;
4668 tmp = RREG32(GRBM_STATUS2);
4670 reset_mask |= RADEON_RESET_RLC;
4672 /* SDMA0_STATUS_REG */
4673 tmp = RREG32(SDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET);
4674 if (!(tmp & SDMA_IDLE))
4675 reset_mask |= RADEON_RESET_DMA;
4677 /* SDMA1_STATUS_REG */
4678 tmp = RREG32(SDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
4679 if (!(tmp & SDMA_IDLE))
4680 reset_mask |= RADEON_RESET_DMA1;
4683 tmp = RREG32(SRBM_STATUS2);
4684 if (tmp & SDMA_BUSY)
4685 reset_mask |= RADEON_RESET_DMA;
4687 if (tmp & SDMA1_BUSY)
4688 reset_mask |= RADEON_RESET_DMA1;
4691 tmp = RREG32(SRBM_STATUS);
4694 reset_mask |= RADEON_RESET_IH;
4697 reset_mask |= RADEON_RESET_SEM;
4699 if (tmp & GRBM_RQ_PENDING)
4700 reset_mask |= RADEON_RESET_GRBM;
4703 reset_mask |= RADEON_RESET_VMC;
4705 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
4706 MCC_BUSY | MCD_BUSY))
4707 reset_mask |= RADEON_RESET_MC;
4709 if (evergreen_is_display_hung(rdev))
4710 reset_mask |= RADEON_RESET_DISPLAY;
4712 /* Skip MC reset as it's mostly likely not hung, just busy */
4713 if (reset_mask & RADEON_RESET_MC) {
4714 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
4715 reset_mask &= ~RADEON_RESET_MC;
4722 * cik_gpu_soft_reset - soft reset GPU
4724 * @rdev: radeon_device pointer
4725 * @reset_mask: mask of which blocks to reset
4727 * Soft reset the blocks specified in @reset_mask.
4729 static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
4731 struct evergreen_mc_save save;
4732 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4735 if (reset_mask == 0)
4738 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
4740 cik_print_gpu_status_regs(rdev);
4741 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
4742 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
4743 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
4744 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
4753 /* Disable GFX parsing/prefetching */
4754 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
4756 /* Disable MEC parsing/prefetching */
4757 WREG32(CP_MEC_CNTL, MEC_ME1_HALT | MEC_ME2_HALT);
4759 if (reset_mask & RADEON_RESET_DMA) {
4761 tmp = RREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET);
4763 WREG32(SDMA0_ME_CNTL + SDMA0_REGISTER_OFFSET, tmp);
4765 if (reset_mask & RADEON_RESET_DMA1) {
4767 tmp = RREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET);
4769 WREG32(SDMA0_ME_CNTL + SDMA1_REGISTER_OFFSET, tmp);
4772 evergreen_mc_stop(rdev, &save);
4773 if (evergreen_mc_wait_for_idle(rdev)) {
4774 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4777 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP))
4778 grbm_soft_reset = SOFT_RESET_CP | SOFT_RESET_GFX;
4780 if (reset_mask & RADEON_RESET_CP) {
4781 grbm_soft_reset |= SOFT_RESET_CP;
4783 srbm_soft_reset |= SOFT_RESET_GRBM;
4786 if (reset_mask & RADEON_RESET_DMA)
4787 srbm_soft_reset |= SOFT_RESET_SDMA;
4789 if (reset_mask & RADEON_RESET_DMA1)
4790 srbm_soft_reset |= SOFT_RESET_SDMA1;
4792 if (reset_mask & RADEON_RESET_DISPLAY)
4793 srbm_soft_reset |= SOFT_RESET_DC;
4795 if (reset_mask & RADEON_RESET_RLC)
4796 grbm_soft_reset |= SOFT_RESET_RLC;
4798 if (reset_mask & RADEON_RESET_SEM)
4799 srbm_soft_reset |= SOFT_RESET_SEM;
4801 if (reset_mask & RADEON_RESET_IH)
4802 srbm_soft_reset |= SOFT_RESET_IH;
4804 if (reset_mask & RADEON_RESET_GRBM)
4805 srbm_soft_reset |= SOFT_RESET_GRBM;
4807 if (reset_mask & RADEON_RESET_VMC)
4808 srbm_soft_reset |= SOFT_RESET_VMC;
4810 if (!(rdev->flags & RADEON_IS_IGP)) {
4811 if (reset_mask & RADEON_RESET_MC)
4812 srbm_soft_reset |= SOFT_RESET_MC;
4815 if (grbm_soft_reset) {
4816 tmp = RREG32(GRBM_SOFT_RESET);
4817 tmp |= grbm_soft_reset;
4818 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
4819 WREG32(GRBM_SOFT_RESET, tmp);
4820 tmp = RREG32(GRBM_SOFT_RESET);
4824 tmp &= ~grbm_soft_reset;
4825 WREG32(GRBM_SOFT_RESET, tmp);
4826 tmp = RREG32(GRBM_SOFT_RESET);
4829 if (srbm_soft_reset) {
4830 tmp = RREG32(SRBM_SOFT_RESET);
4831 tmp |= srbm_soft_reset;
4832 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
4833 WREG32(SRBM_SOFT_RESET, tmp);
4834 tmp = RREG32(SRBM_SOFT_RESET);
4838 tmp &= ~srbm_soft_reset;
4839 WREG32(SRBM_SOFT_RESET, tmp);
4840 tmp = RREG32(SRBM_SOFT_RESET);
4843 /* Wait a little for things to settle down */
4846 evergreen_mc_resume(rdev, &save);
4849 cik_print_gpu_status_regs(rdev);
4853 * cik_asic_reset - soft reset GPU
4855 * @rdev: radeon_device pointer
4857 * Look up which blocks are hung and attempt
4859 * Returns 0 for success.
4861 int cik_asic_reset(struct radeon_device *rdev)
4865 reset_mask = cik_gpu_check_soft_reset(rdev);
4868 r600_set_bios_scratch_engine_hung(rdev, true);
4870 cik_gpu_soft_reset(rdev, reset_mask);
4872 reset_mask = cik_gpu_check_soft_reset(rdev);
4875 r600_set_bios_scratch_engine_hung(rdev, false);
4881 * cik_gfx_is_lockup - check if the 3D engine is locked up
4883 * @rdev: radeon_device pointer
4884 * @ring: radeon_ring structure holding ring information
4886 * Check if the 3D engine is locked up (CIK).
4887 * Returns true if the engine is locked, false if not.
4889 bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4891 u32 reset_mask = cik_gpu_check_soft_reset(rdev);
4893 if (!(reset_mask & (RADEON_RESET_GFX |
4894 RADEON_RESET_COMPUTE |
4895 RADEON_RESET_CP))) {
4896 radeon_ring_lockup_update(ring);
4899 /* force CP activities */
4900 radeon_ring_force_activity(rdev, ring);
4901 return radeon_ring_test_lockup(rdev, ring);
4906 * cik_mc_program - program the GPU memory controller
4908 * @rdev: radeon_device pointer
4910 * Set the location of vram, gart, and AGP in the GPU's
4911 * physical address space (CIK).
4913 static void cik_mc_program(struct radeon_device *rdev)
4915 struct evergreen_mc_save save;
4919 /* Initialize HDP */
4920 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
4921 WREG32((0x2c14 + j), 0x00000000);
4922 WREG32((0x2c18 + j), 0x00000000);
4923 WREG32((0x2c1c + j), 0x00000000);
4924 WREG32((0x2c20 + j), 0x00000000);
4925 WREG32((0x2c24 + j), 0x00000000);
4927 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
4929 evergreen_mc_stop(rdev, &save);
4930 if (radeon_mc_wait_for_idle(rdev)) {
4931 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4933 /* Lockout access through VGA aperture*/
4934 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
4935 /* Update configuration */
4936 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
4937 rdev->mc.vram_start >> 12);
4938 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
4939 rdev->mc.vram_end >> 12);
4940 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
4941 rdev->vram_scratch.gpu_addr >> 12);
4942 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
4943 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
4944 WREG32(MC_VM_FB_LOCATION, tmp);
4945 /* XXX double check these! */
4946 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
4947 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
4948 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
4949 WREG32(MC_VM_AGP_BASE, 0);
4950 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
4951 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
4952 if (radeon_mc_wait_for_idle(rdev)) {
4953 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4955 evergreen_mc_resume(rdev, &save);
4956 /* we need to own VRAM, so turn off the VGA renderer here
4957 * to stop it overwriting our objects */
4958 rv515_vga_render_disable(rdev);
4962 * cik_mc_init - initialize the memory controller driver params
4964 * @rdev: radeon_device pointer
4966 * Look up the amount of vram, vram width, and decide how to place
4967 * vram and gart within the GPU's physical address space (CIK).
4968 * Returns 0 for success.
4970 static int cik_mc_init(struct radeon_device *rdev)
4973 int chansize, numchan;
4975 /* Get VRAM informations */
4976 rdev->mc.vram_is_ddr = true;
4977 tmp = RREG32(MC_ARB_RAMCFG);
4978 if (tmp & CHANSIZE_MASK) {
4983 tmp = RREG32(MC_SHARED_CHMAP);
4984 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
5014 rdev->mc.vram_width = numchan * chansize;
5015 /* Could aper size report 0 ? */
5016 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
5017 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
5018 /* size in MB on si */
5019 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
5020 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
5021 rdev->mc.visible_vram_size = rdev->mc.aper_size;
5022 si_vram_gtt_location(rdev, &rdev->mc);
5023 radeon_update_bandwidth_info(rdev);
5030 * VMID 0 is the physical GPU addresses as used by the kernel.
5031 * VMIDs 1-15 are used for userspace clients and are handled
5032 * by the radeon vm/hsa code.
5035 * cik_pcie_gart_tlb_flush - gart tlb flush callback
5037 * @rdev: radeon_device pointer
5039 * Flush the TLB for the VMID 0 page table (CIK).
5041 void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
5043 /* flush hdp cache */
5044 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0);
5046 /* bits 0-15 are the VM contexts0-15 */
5047 WREG32(VM_INVALIDATE_REQUEST, 0x1);
5051 * cik_pcie_gart_enable - gart enable
5053 * @rdev: radeon_device pointer
5055 * This sets up the TLBs, programs the page tables for VMID0,
5056 * sets up the hw for VMIDs 1-15 which are allocated on
5057 * demand, and sets up the global locations for the LDS, GDS,
5058 * and GPUVM for FSA64 clients (CIK).
5059 * Returns 0 for success, errors for failure.
5061 static int cik_pcie_gart_enable(struct radeon_device *rdev)
5065 if (rdev->gart.robj == NULL) {
5066 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
5069 r = radeon_gart_table_vram_pin(rdev);
5072 radeon_gart_restore(rdev);
5073 /* Setup TLB control */
5074 WREG32(MC_VM_MX_L1_TLB_CNTL,
5077 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
5078 ENABLE_ADVANCED_DRIVER_MODEL |
5079 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
5080 /* Setup L2 cache */
5081 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
5082 ENABLE_L2_FRAGMENT_PROCESSING |
5083 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
5084 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
5085 EFFECTIVE_L2_QUEUE_SIZE(7) |
5086 CONTEXT1_IDENTITY_ACCESS_MODE(1));
5087 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
5088 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
5089 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
5090 /* setup context0 */
5091 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
5092 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
5093 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
5094 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
5095 (u32)(rdev->dummy_page.addr >> 12));
5096 WREG32(VM_CONTEXT0_CNTL2, 0);
5097 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
5098 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
5104 /* empty context1-15 */
5105 /* FIXME start with 4G, once using 2 level pt switch to full
5108 /* set vm size, must be a multiple of 4 */
5109 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
5110 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
5111 for (i = 1; i < 16; i++) {
5113 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
5114 rdev->gart.table_addr >> 12);
5116 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
5117 rdev->gart.table_addr >> 12);
5120 /* enable context1-15 */
5121 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
5122 (u32)(rdev->dummy_page.addr >> 12));
5123 WREG32(VM_CONTEXT1_CNTL2, 4);
5124 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
5125 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
5126 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
5127 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
5128 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
5129 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
5130 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
5131 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
5132 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
5133 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
5134 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
5135 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
5136 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
5138 /* TC cache setup ??? */
5139 WREG32(TC_CFG_L1_LOAD_POLICY0, 0);
5140 WREG32(TC_CFG_L1_LOAD_POLICY1, 0);
5141 WREG32(TC_CFG_L1_STORE_POLICY, 0);
5143 WREG32(TC_CFG_L2_LOAD_POLICY0, 0);
5144 WREG32(TC_CFG_L2_LOAD_POLICY1, 0);
5145 WREG32(TC_CFG_L2_STORE_POLICY0, 0);
5146 WREG32(TC_CFG_L2_STORE_POLICY1, 0);
5147 WREG32(TC_CFG_L2_ATOMIC_POLICY, 0);
5149 WREG32(TC_CFG_L1_VOLATILE, 0);
5150 WREG32(TC_CFG_L2_VOLATILE, 0);
5152 if (rdev->family == CHIP_KAVERI) {
5153 u32 tmp = RREG32(CHUB_CONTROL);
5155 WREG32(CHUB_CONTROL, tmp);
5158 /* XXX SH_MEM regs */
5159 /* where to put LDS, scratch, GPUVM in FSA64 space */
5160 mutex_lock(&rdev->srbm_mutex);
5161 for (i = 0; i < 16; i++) {
5162 cik_srbm_select(rdev, 0, 0, 0, i);
5163 /* CP and shaders */
5164 WREG32(SH_MEM_CONFIG, 0);
5165 WREG32(SH_MEM_APE1_BASE, 1);
5166 WREG32(SH_MEM_APE1_LIMIT, 0);
5167 WREG32(SH_MEM_BASES, 0);
5169 WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA0_REGISTER_OFFSET, 0);
5170 WREG32(SDMA0_GFX_APE1_CNTL + SDMA0_REGISTER_OFFSET, 0);
5171 WREG32(SDMA0_GFX_VIRTUAL_ADDR + SDMA1_REGISTER_OFFSET, 0);
5172 WREG32(SDMA0_GFX_APE1_CNTL + SDMA1_REGISTER_OFFSET, 0);
5173 /* XXX SDMA RLC - todo */
5175 cik_srbm_select(rdev, 0, 0, 0, 0);
5176 mutex_unlock(&rdev->srbm_mutex);
5178 cik_pcie_gart_tlb_flush(rdev);
5179 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
5180 (unsigned)(rdev->mc.gtt_size >> 20),
5181 (unsigned long long)rdev->gart.table_addr);
5182 rdev->gart.ready = true;
5187 * cik_pcie_gart_disable - gart disable
5189 * @rdev: radeon_device pointer
5191 * This disables all VM page table (CIK).
5193 static void cik_pcie_gart_disable(struct radeon_device *rdev)
5195 /* Disable all tables */
5196 WREG32(VM_CONTEXT0_CNTL, 0);
5197 WREG32(VM_CONTEXT1_CNTL, 0);
5198 /* Setup TLB control */
5199 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
5200 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
5201 /* Setup L2 cache */
5203 ENABLE_L2_FRAGMENT_PROCESSING |
5204 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
5205 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
5206 EFFECTIVE_L2_QUEUE_SIZE(7) |
5207 CONTEXT1_IDENTITY_ACCESS_MODE(1));
5208 WREG32(VM_L2_CNTL2, 0);
5209 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
5210 L2_CACHE_BIGK_FRAGMENT_SIZE(6));
5211 radeon_gart_table_vram_unpin(rdev);
5215 * cik_pcie_gart_fini - vm fini callback
5217 * @rdev: radeon_device pointer
5219 * Tears down the driver GART/VM setup (CIK).
5221 static void cik_pcie_gart_fini(struct radeon_device *rdev)
5223 cik_pcie_gart_disable(rdev);
5224 radeon_gart_table_vram_free(rdev);
5225 radeon_gart_fini(rdev);
5230 * cik_ib_parse - vm ib_parse callback
5232 * @rdev: radeon_device pointer
5233 * @ib: indirect buffer pointer
5235 * CIK uses hw IB checking so this is a nop (CIK).
5237 int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
5244 * VMID 0 is the physical GPU addresses as used by the kernel.
5245 * VMIDs 1-15 are used for userspace clients and are handled
5246 * by the radeon vm/hsa code.
5249 * cik_vm_init - cik vm init callback
5251 * @rdev: radeon_device pointer
5253 * Inits cik specific vm parameters (number of VMs, base of vram for
5254 * VMIDs 1-15) (CIK).
5255 * Returns 0 for success.
5257 int cik_vm_init(struct radeon_device *rdev)
5260 rdev->vm_manager.nvm = 16;
5261 /* base offset of vram pages */
5262 if (rdev->flags & RADEON_IS_IGP) {
5263 u64 tmp = RREG32(MC_VM_FB_OFFSET);
5265 rdev->vm_manager.vram_base_offset = tmp;
5267 rdev->vm_manager.vram_base_offset = 0;
5273 * cik_vm_fini - cik vm fini callback
5275 * @rdev: radeon_device pointer
5277 * Tear down any asic specific VM setup (CIK).
5279 void cik_vm_fini(struct radeon_device *rdev)
5284 * cik_vm_decode_fault - print human readable fault info
5286 * @rdev: radeon_device pointer
5287 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
5288 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
5290 * Print human readable fault information (CIK).
5292 static void cik_vm_decode_fault(struct radeon_device *rdev,
5293 u32 status, u32 addr, u32 mc_client)
5296 u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
5297 u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
5298 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
5299 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
5301 if (rdev->family == CHIP_HAWAII)
5302 mc_id = (status & HAWAII_MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
5304 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
5306 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
5307 protections, vmid, addr,
5308 (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
5309 block, mc_client, mc_id);
5313 * cik_vm_flush - cik vm flush using the CP
5315 * @rdev: radeon_device pointer
5317 * Update the page table base and flush the VM TLB
5318 * using the CP (CIK).
5320 void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5322 struct radeon_ring *ring = &rdev->ring[ridx];
5327 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5328 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5329 WRITE_DATA_DST_SEL(0)));
5331 radeon_ring_write(ring,
5332 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
5334 radeon_ring_write(ring,
5335 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
5337 radeon_ring_write(ring, 0);
5338 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
5340 /* update SH_MEM_* regs */
5341 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5342 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5343 WRITE_DATA_DST_SEL(0)));
5344 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
5345 radeon_ring_write(ring, 0);
5346 radeon_ring_write(ring, VMID(vm->id));
5348 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
5349 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5350 WRITE_DATA_DST_SEL(0)));
5351 radeon_ring_write(ring, SH_MEM_BASES >> 2);
5352 radeon_ring_write(ring, 0);
5354 radeon_ring_write(ring, 0); /* SH_MEM_BASES */
5355 radeon_ring_write(ring, 0); /* SH_MEM_CONFIG */
5356 radeon_ring_write(ring, 1); /* SH_MEM_APE1_BASE */
5357 radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */
5359 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5360 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5361 WRITE_DATA_DST_SEL(0)));
5362 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
5363 radeon_ring_write(ring, 0);
5364 radeon_ring_write(ring, VMID(0));
5367 /* We should be using the WAIT_REG_MEM packet here like in
5368 * cik_fence_ring_emit(), but it causes the CP to hang in this
5371 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5372 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5373 WRITE_DATA_DST_SEL(0)));
5374 radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
5375 radeon_ring_write(ring, 0);
5376 radeon_ring_write(ring, 0);
5378 /* bits 0-15 are the VM contexts0-15 */
5379 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5380 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5381 WRITE_DATA_DST_SEL(0)));
5382 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5383 radeon_ring_write(ring, 0);
5384 radeon_ring_write(ring, 1 << vm->id);
5386 /* compute doesn't have PFP */
5387 if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
5388 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5389 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5390 radeon_ring_write(ring, 0x0);
5396 * The RLC is a multi-purpose microengine that handles a
5397 * variety of functions, the most important of which is
5398 * the interrupt controller.
5400 static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
5403 u32 tmp = RREG32(CP_INT_CNTL_RING0);
5406 tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5408 tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5409 WREG32(CP_INT_CNTL_RING0, tmp);
5412 static void cik_enable_lbpw(struct radeon_device *rdev, bool enable)
5416 tmp = RREG32(RLC_LB_CNTL);
5418 tmp |= LOAD_BALANCE_ENABLE;
5420 tmp &= ~LOAD_BALANCE_ENABLE;
5421 WREG32(RLC_LB_CNTL, tmp);
5424 static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
5429 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
5430 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
5431 cik_select_se_sh(rdev, i, j);
5432 for (k = 0; k < rdev->usec_timeout; k++) {
5433 if (RREG32(RLC_SERDES_CU_MASTER_BUSY) == 0)
5439 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5441 mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
5442 for (k = 0; k < rdev->usec_timeout; k++) {
5443 if ((RREG32(RLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
5449 static void cik_update_rlc(struct radeon_device *rdev, u32 rlc)
5453 tmp = RREG32(RLC_CNTL);
5455 WREG32(RLC_CNTL, rlc);
5458 static u32 cik_halt_rlc(struct radeon_device *rdev)
5462 orig = data = RREG32(RLC_CNTL);
5464 if (data & RLC_ENABLE) {
5467 data &= ~RLC_ENABLE;
5468 WREG32(RLC_CNTL, data);
5470 for (i = 0; i < rdev->usec_timeout; i++) {
5471 if ((RREG32(RLC_GPM_STAT) & RLC_GPM_BUSY) == 0)
5476 cik_wait_for_rlc_serdes(rdev);
5482 void cik_enter_rlc_safe_mode(struct radeon_device *rdev)
5486 tmp = REQ | MESSAGE(MSG_ENTER_RLC_SAFE_MODE);
5487 WREG32(RLC_GPR_REG2, tmp);
5489 mask = GFX_POWER_STATUS | GFX_CLOCK_STATUS;
5490 for (i = 0; i < rdev->usec_timeout; i++) {
5491 if ((RREG32(RLC_GPM_STAT) & mask) == mask)
5496 for (i = 0; i < rdev->usec_timeout; i++) {
5497 if ((RREG32(RLC_GPR_REG2) & REQ) == 0)
5503 void cik_exit_rlc_safe_mode(struct radeon_device *rdev)
5507 tmp = REQ | MESSAGE(MSG_EXIT_RLC_SAFE_MODE);
5508 WREG32(RLC_GPR_REG2, tmp);
5512 * cik_rlc_stop - stop the RLC ME
5514 * @rdev: radeon_device pointer
5516 * Halt the RLC ME (MicroEngine) (CIK).
5518 static void cik_rlc_stop(struct radeon_device *rdev)
5520 WREG32(RLC_CNTL, 0);
5522 cik_enable_gui_idle_interrupt(rdev, false);
5524 cik_wait_for_rlc_serdes(rdev);
5528 * cik_rlc_start - start the RLC ME
5530 * @rdev: radeon_device pointer
5532 * Unhalt the RLC ME (MicroEngine) (CIK).
5534 static void cik_rlc_start(struct radeon_device *rdev)
5536 WREG32(RLC_CNTL, RLC_ENABLE);
5538 cik_enable_gui_idle_interrupt(rdev, true);
5544 * cik_rlc_resume - setup the RLC hw
5546 * @rdev: radeon_device pointer
5548 * Initialize the RLC registers, load the ucode,
5549 * and start the RLC (CIK).
5550 * Returns 0 for success, -EINVAL if the ucode is not available.
5552 static int cik_rlc_resume(struct radeon_device *rdev)
5555 const __be32 *fw_data;
5560 switch (rdev->family) {
5564 size = BONAIRE_RLC_UCODE_SIZE;
5567 size = KV_RLC_UCODE_SIZE;
5570 size = KB_RLC_UCODE_SIZE;
5577 tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
5578 WREG32(RLC_CGCG_CGLS_CTRL, tmp);
5586 WREG32(RLC_LB_CNTR_INIT, 0);
5587 WREG32(RLC_LB_CNTR_MAX, 0x00008000);
5589 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5590 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
5591 WREG32(RLC_LB_PARAMS, 0x00600408);
5592 WREG32(RLC_LB_CNTL, 0x80000004);
5594 WREG32(RLC_MC_CNTL, 0);
5595 WREG32(RLC_UCODE_CNTL, 0);
5597 fw_data = (const __be32 *)rdev->rlc_fw->data;
5598 WREG32(RLC_GPM_UCODE_ADDR, 0);
5599 for (i = 0; i < size; i++)
5600 WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
5601 WREG32(RLC_GPM_UCODE_ADDR, 0);
5603 /* XXX - find out what chips support lbpw */
5604 cik_enable_lbpw(rdev, false);
5606 if (rdev->family == CHIP_BONAIRE)
5607 WREG32(RLC_DRIVER_DMA_STATUS, 0);
5609 cik_rlc_start(rdev);
5614 static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
5616 u32 data, orig, tmp, tmp2;
5618 orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
5620 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
5621 cik_enable_gui_idle_interrupt(rdev, true);
5623 tmp = cik_halt_rlc(rdev);
5625 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5626 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5627 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5628 tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
5629 WREG32(RLC_SERDES_WR_CTRL, tmp2);
5631 cik_update_rlc(rdev, tmp);
5633 data |= CGCG_EN | CGLS_EN;
5635 cik_enable_gui_idle_interrupt(rdev, false);
5637 RREG32(CB_CGTT_SCLK_CTRL);
5638 RREG32(CB_CGTT_SCLK_CTRL);
5639 RREG32(CB_CGTT_SCLK_CTRL);
5640 RREG32(CB_CGTT_SCLK_CTRL);
5642 data &= ~(CGCG_EN | CGLS_EN);
5646 WREG32(RLC_CGCG_CGLS_CTRL, data);
5650 static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
5652 u32 data, orig, tmp = 0;
5654 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
5655 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) {
5656 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
5657 orig = data = RREG32(CP_MEM_SLP_CNTL);
5658 data |= CP_MEM_LS_EN;
5660 WREG32(CP_MEM_SLP_CNTL, data);
5664 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5667 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5669 tmp = cik_halt_rlc(rdev);
5671 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5672 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5673 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5674 data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
5675 WREG32(RLC_SERDES_WR_CTRL, data);
5677 cik_update_rlc(rdev, tmp);
5679 if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) {
5680 orig = data = RREG32(CGTS_SM_CTRL_REG);
5681 data &= ~SM_MODE_MASK;
5682 data |= SM_MODE(0x2);
5683 data |= SM_MODE_ENABLE;
5684 data &= ~CGTS_OVERRIDE;
5685 if ((rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) &&
5686 (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS_LS))
5687 data &= ~CGTS_LS_OVERRIDE;
5688 data &= ~ON_MONITOR_ADD_MASK;
5689 data |= ON_MONITOR_ADD_EN;
5690 data |= ON_MONITOR_ADD(0x96);
5692 WREG32(CGTS_SM_CTRL_REG, data);
5695 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5698 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5700 data = RREG32(RLC_MEM_SLP_CNTL);
5701 if (data & RLC_MEM_LS_EN) {
5702 data &= ~RLC_MEM_LS_EN;
5703 WREG32(RLC_MEM_SLP_CNTL, data);
5706 data = RREG32(CP_MEM_SLP_CNTL);
5707 if (data & CP_MEM_LS_EN) {
5708 data &= ~CP_MEM_LS_EN;
5709 WREG32(CP_MEM_SLP_CNTL, data);
5712 orig = data = RREG32(CGTS_SM_CTRL_REG);
5713 data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE;
5715 WREG32(CGTS_SM_CTRL_REG, data);
5717 tmp = cik_halt_rlc(rdev);
5719 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5720 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5721 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5722 data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
5723 WREG32(RLC_SERDES_WR_CTRL, data);
5725 cik_update_rlc(rdev, tmp);
5729 static const u32 mc_cg_registers[] =
5742 static void cik_enable_mc_ls(struct radeon_device *rdev,
5748 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5749 orig = data = RREG32(mc_cg_registers[i]);
5750 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
5751 data |= MC_LS_ENABLE;
5753 data &= ~MC_LS_ENABLE;
5755 WREG32(mc_cg_registers[i], data);
5759 static void cik_enable_mc_mgcg(struct radeon_device *rdev,
5765 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5766 orig = data = RREG32(mc_cg_registers[i]);
5767 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
5768 data |= MC_CG_ENABLE;
5770 data &= ~MC_CG_ENABLE;
5772 WREG32(mc_cg_registers[i], data);
5776 static void cik_enable_sdma_mgcg(struct radeon_device *rdev,
5781 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
5782 WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
5783 WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
5785 orig = data = RREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
5788 WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
5790 orig = data = RREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
5793 WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
5797 static void cik_enable_sdma_mgls(struct radeon_device *rdev,
5802 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_LS)) {
5803 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
5806 WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
5808 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
5811 WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
5813 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
5816 WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
5818 orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
5821 WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
5825 static void cik_enable_uvd_mgcg(struct radeon_device *rdev,
5830 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
5831 data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5833 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
5835 orig = data = RREG32(UVD_CGC_CTRL);
5838 WREG32(UVD_CGC_CTRL, data);
5840 data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5842 WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
5844 orig = data = RREG32(UVD_CGC_CTRL);
5847 WREG32(UVD_CGC_CTRL, data);
5851 static void cik_enable_bif_mgls(struct radeon_device *rdev,
5856 orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
5858 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
5859 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
5860 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
5862 data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
5863 REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
5866 WREG32_PCIE_PORT(PCIE_CNTL2, data);
5869 static void cik_enable_hdp_mgcg(struct radeon_device *rdev,
5874 orig = data = RREG32(HDP_HOST_PATH_CNTL);
5876 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
5877 data &= ~CLOCK_GATING_DIS;
5879 data |= CLOCK_GATING_DIS;
5882 WREG32(HDP_HOST_PATH_CNTL, data);
5885 static void cik_enable_hdp_ls(struct radeon_device *rdev,
5890 orig = data = RREG32(HDP_MEM_POWER_LS);
5892 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
5893 data |= HDP_LS_ENABLE;
5895 data &= ~HDP_LS_ENABLE;
5898 WREG32(HDP_MEM_POWER_LS, data);
5901 void cik_update_cg(struct radeon_device *rdev,
5902 u32 block, bool enable)
5905 if (block & RADEON_CG_BLOCK_GFX) {
5906 cik_enable_gui_idle_interrupt(rdev, false);
5907 /* order matters! */
5909 cik_enable_mgcg(rdev, true);
5910 cik_enable_cgcg(rdev, true);
5912 cik_enable_cgcg(rdev, false);
5913 cik_enable_mgcg(rdev, false);
5915 cik_enable_gui_idle_interrupt(rdev, true);
5918 if (block & RADEON_CG_BLOCK_MC) {
5919 if (!(rdev->flags & RADEON_IS_IGP)) {
5920 cik_enable_mc_mgcg(rdev, enable);
5921 cik_enable_mc_ls(rdev, enable);
5925 if (block & RADEON_CG_BLOCK_SDMA) {
5926 cik_enable_sdma_mgcg(rdev, enable);
5927 cik_enable_sdma_mgls(rdev, enable);
5930 if (block & RADEON_CG_BLOCK_BIF) {
5931 cik_enable_bif_mgls(rdev, enable);
5934 if (block & RADEON_CG_BLOCK_UVD) {
5936 cik_enable_uvd_mgcg(rdev, enable);
5939 if (block & RADEON_CG_BLOCK_HDP) {
5940 cik_enable_hdp_mgcg(rdev, enable);
5941 cik_enable_hdp_ls(rdev, enable);
5945 static void cik_init_cg(struct radeon_device *rdev)
5948 cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, true);
5951 si_init_uvd_internal_cg(rdev);
5953 cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
5954 RADEON_CG_BLOCK_SDMA |
5955 RADEON_CG_BLOCK_BIF |
5956 RADEON_CG_BLOCK_UVD |
5957 RADEON_CG_BLOCK_HDP), true);
5960 static void cik_fini_cg(struct radeon_device *rdev)
5962 cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
5963 RADEON_CG_BLOCK_SDMA |
5964 RADEON_CG_BLOCK_BIF |
5965 RADEON_CG_BLOCK_UVD |
5966 RADEON_CG_BLOCK_HDP), false);
5968 cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false);
5971 static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev,
5976 orig = data = RREG32(RLC_PG_CNTL);
5977 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
5978 data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
5980 data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
5982 WREG32(RLC_PG_CNTL, data);
5985 static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev,
5990 orig = data = RREG32(RLC_PG_CNTL);
5991 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
5992 data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
5994 data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
5996 WREG32(RLC_PG_CNTL, data);
5999 static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable)
6003 orig = data = RREG32(RLC_PG_CNTL);
6004 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_CP))
6005 data &= ~DISABLE_CP_PG;
6007 data |= DISABLE_CP_PG;
6009 WREG32(RLC_PG_CNTL, data);
6012 static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
6016 orig = data = RREG32(RLC_PG_CNTL);
6017 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GDS))
6018 data &= ~DISABLE_GDS_PG;
6020 data |= DISABLE_GDS_PG;
6022 WREG32(RLC_PG_CNTL, data);
6025 #define CP_ME_TABLE_SIZE 96
6026 #define CP_ME_TABLE_OFFSET 2048
6027 #define CP_MEC_TABLE_OFFSET 4096
6029 void cik_init_cp_pg_table(struct radeon_device *rdev)
6031 const __be32 *fw_data;
6032 volatile u32 *dst_ptr;
6033 int me, i, max_me = 4;
6037 if (rdev->family == CHIP_KAVERI)
6040 if (rdev->rlc.cp_table_ptr == NULL)
6043 /* write the cp table buffer */
6044 dst_ptr = rdev->rlc.cp_table_ptr;
6045 for (me = 0; me < max_me; me++) {
6047 fw_data = (const __be32 *)rdev->ce_fw->data;
6048 table_offset = CP_ME_TABLE_OFFSET;
6049 } else if (me == 1) {
6050 fw_data = (const __be32 *)rdev->pfp_fw->data;
6051 table_offset = CP_ME_TABLE_OFFSET;
6052 } else if (me == 2) {
6053 fw_data = (const __be32 *)rdev->me_fw->data;
6054 table_offset = CP_ME_TABLE_OFFSET;
6056 fw_data = (const __be32 *)rdev->mec_fw->data;
6057 table_offset = CP_MEC_TABLE_OFFSET;
6060 for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
6061 dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
6063 bo_offset += CP_ME_TABLE_SIZE;
6067 static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
6072 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
6073 orig = data = RREG32(RLC_PG_CNTL);
6074 data |= GFX_PG_ENABLE;
6076 WREG32(RLC_PG_CNTL, data);
6078 orig = data = RREG32(RLC_AUTO_PG_CTRL);
6081 WREG32(RLC_AUTO_PG_CTRL, data);
6083 orig = data = RREG32(RLC_PG_CNTL);
6084 data &= ~GFX_PG_ENABLE;
6086 WREG32(RLC_PG_CNTL, data);
6088 orig = data = RREG32(RLC_AUTO_PG_CTRL);
6089 data &= ~AUTO_PG_EN;
6091 WREG32(RLC_AUTO_PG_CTRL, data);
6093 data = RREG32(DB_RENDER_CONTROL);
6097 static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
6099 u32 mask = 0, tmp, tmp1;
6102 cik_select_se_sh(rdev, se, sh);
6103 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
6104 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
6105 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
6112 for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) {
6117 return (~tmp) & mask;
6120 static void cik_init_ao_cu_mask(struct radeon_device *rdev)
6122 u32 i, j, k, active_cu_number = 0;
6123 u32 mask, counter, cu_bitmap;
6126 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
6127 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
6131 for (k = 0; k < rdev->config.cik.max_cu_per_sh; k ++) {
6132 if (cik_get_cu_active_bitmap(rdev, i, j) & mask) {
6140 active_cu_number += counter;
6141 tmp |= (cu_bitmap << (i * 16 + j * 8));
6145 WREG32(RLC_PG_AO_CU_MASK, tmp);
6147 tmp = RREG32(RLC_MAX_PG_CU);
6148 tmp &= ~MAX_PU_CU_MASK;
6149 tmp |= MAX_PU_CU(active_cu_number);
6150 WREG32(RLC_MAX_PG_CU, tmp);
6153 static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev,
6158 orig = data = RREG32(RLC_PG_CNTL);
6159 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_SMG))
6160 data |= STATIC_PER_CU_PG_ENABLE;
6162 data &= ~STATIC_PER_CU_PG_ENABLE;
6164 WREG32(RLC_PG_CNTL, data);
6167 static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev,
6172 orig = data = RREG32(RLC_PG_CNTL);
6173 if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_DMG))
6174 data |= DYN_PER_CU_PG_ENABLE;
6176 data &= ~DYN_PER_CU_PG_ENABLE;
6178 WREG32(RLC_PG_CNTL, data);
6181 #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
6182 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
6184 static void cik_init_gfx_cgpg(struct radeon_device *rdev)
6189 if (rdev->rlc.cs_data) {
6190 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
6191 WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr));
6192 WREG32(RLC_GPM_SCRATCH_DATA, lower_32_bits(rdev->rlc.clear_state_gpu_addr));
6193 WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size);
6195 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
6196 for (i = 0; i < 3; i++)
6197 WREG32(RLC_GPM_SCRATCH_DATA, 0);
6199 if (rdev->rlc.reg_list) {
6200 WREG32(RLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
6201 for (i = 0; i < rdev->rlc.reg_list_size; i++)
6202 WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.reg_list[i]);
6205 orig = data = RREG32(RLC_PG_CNTL);
6208 WREG32(RLC_PG_CNTL, data);
6210 WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
6211 WREG32(RLC_CP_TABLE_RESTORE, rdev->rlc.cp_table_gpu_addr >> 8);
6213 data = RREG32(CP_RB_WPTR_POLL_CNTL);
6214 data &= ~IDLE_POLL_COUNT_MASK;
6215 data |= IDLE_POLL_COUNT(0x60);
6216 WREG32(CP_RB_WPTR_POLL_CNTL, data);
6219 WREG32(RLC_PG_DELAY, data);
6221 data = RREG32(RLC_PG_DELAY_2);
6224 WREG32(RLC_PG_DELAY_2, data);
6226 data = RREG32(RLC_AUTO_PG_CTRL);
6227 data &= ~GRBM_REG_SGIT_MASK;
6228 data |= GRBM_REG_SGIT(0x700);
6229 WREG32(RLC_AUTO_PG_CTRL, data);
6233 static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable)
6235 cik_enable_gfx_cgpg(rdev, enable);
6236 cik_enable_gfx_static_mgpg(rdev, enable);
6237 cik_enable_gfx_dynamic_mgpg(rdev, enable);
6240 u32 cik_get_csb_size(struct radeon_device *rdev)
6243 const struct cs_section_def *sect = NULL;
6244 const struct cs_extent_def *ext = NULL;
6246 if (rdev->rlc.cs_data == NULL)
6249 /* begin clear state */
6251 /* context control state */
6254 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
6255 for (ext = sect->section; ext->extent != NULL; ++ext) {
6256 if (sect->id == SECT_CONTEXT)
6257 count += 2 + ext->reg_count;
6262 /* pa_sc_raster_config/pa_sc_raster_config1 */
6264 /* end clear state */
6272 void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
6275 const struct cs_section_def *sect = NULL;
6276 const struct cs_extent_def *ext = NULL;
6278 if (rdev->rlc.cs_data == NULL)
6283 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
6284 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
6286 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
6287 buffer[count++] = cpu_to_le32(0x80000000);
6288 buffer[count++] = cpu_to_le32(0x80000000);
6290 for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
6291 for (ext = sect->section; ext->extent != NULL; ++ext) {
6292 if (sect->id == SECT_CONTEXT) {
6294 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
6295 buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
6296 for (i = 0; i < ext->reg_count; i++)
6297 buffer[count++] = cpu_to_le32(ext->extent[i]);
6304 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
6305 buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
6306 switch (rdev->family) {
6308 buffer[count++] = cpu_to_le32(0x16000012);
6309 buffer[count++] = cpu_to_le32(0x00000000);
6312 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
6313 buffer[count++] = cpu_to_le32(0x00000000);
6316 buffer[count++] = cpu_to_le32(0x00000000); /* XXX */
6317 buffer[count++] = cpu_to_le32(0x00000000);
6320 buffer[count++] = 0x3a00161a;
6321 buffer[count++] = 0x0000002e;
6324 buffer[count++] = cpu_to_le32(0x00000000);
6325 buffer[count++] = cpu_to_le32(0x00000000);
6329 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
6330 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
6332 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
6333 buffer[count++] = cpu_to_le32(0);
6336 static void cik_init_pg(struct radeon_device *rdev)
6338 if (rdev->pg_flags) {
6339 cik_enable_sck_slowdown_on_pu(rdev, true);
6340 cik_enable_sck_slowdown_on_pd(rdev, true);
6341 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
6342 cik_init_gfx_cgpg(rdev);
6343 cik_enable_cp_pg(rdev, true);
6344 cik_enable_gds_pg(rdev, true);
6346 cik_init_ao_cu_mask(rdev);
6347 cik_update_gfx_pg(rdev, true);
6351 static void cik_fini_pg(struct radeon_device *rdev)
6353 if (rdev->pg_flags) {
6354 cik_update_gfx_pg(rdev, false);
6355 if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
6356 cik_enable_cp_pg(rdev, false);
6357 cik_enable_gds_pg(rdev, false);
6364 * Starting with r6xx, interrupts are handled via a ring buffer.
6365 * Ring buffers are areas of GPU accessible memory that the GPU
6366 * writes interrupt vectors into and the host reads vectors out of.
6367 * There is a rptr (read pointer) that determines where the
6368 * host is currently reading, and a wptr (write pointer)
6369 * which determines where the GPU has written. When the
6370 * pointers are equal, the ring is idle. When the GPU
6371 * writes vectors to the ring buffer, it increments the
6372 * wptr. When there is an interrupt, the host then starts
6373 * fetching commands and processing them until the pointers are
6374 * equal again at which point it updates the rptr.
6378 * cik_enable_interrupts - Enable the interrupt ring buffer
6380 * @rdev: radeon_device pointer
6382 * Enable the interrupt ring buffer (CIK).
6384 static void cik_enable_interrupts(struct radeon_device *rdev)
6386 u32 ih_cntl = RREG32(IH_CNTL);
6387 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
6389 ih_cntl |= ENABLE_INTR;
6390 ih_rb_cntl |= IH_RB_ENABLE;
6391 WREG32(IH_CNTL, ih_cntl);
6392 WREG32(IH_RB_CNTL, ih_rb_cntl);
6393 rdev->ih.enabled = true;
6397 * cik_disable_interrupts - Disable the interrupt ring buffer
6399 * @rdev: radeon_device pointer
6401 * Disable the interrupt ring buffer (CIK).
6403 static void cik_disable_interrupts(struct radeon_device *rdev)
6405 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
6406 u32 ih_cntl = RREG32(IH_CNTL);
6408 ih_rb_cntl &= ~IH_RB_ENABLE;
6409 ih_cntl &= ~ENABLE_INTR;
6410 WREG32(IH_RB_CNTL, ih_rb_cntl);
6411 WREG32(IH_CNTL, ih_cntl);
6412 /* set rptr, wptr to 0 */
6413 WREG32(IH_RB_RPTR, 0);
6414 WREG32(IH_RB_WPTR, 0);
6415 rdev->ih.enabled = false;
6420 * cik_disable_interrupt_state - Disable all interrupt sources
6422 * @rdev: radeon_device pointer
6424 * Clear all interrupt enable bits used by the driver (CIK).
6426 static void cik_disable_interrupt_state(struct radeon_device *rdev)
6431 tmp = RREG32(CP_INT_CNTL_RING0) &
6432 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
6433 WREG32(CP_INT_CNTL_RING0, tmp);
6435 tmp = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
6436 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, tmp);
6437 tmp = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
6438 WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, tmp);
6439 /* compute queues */
6440 WREG32(CP_ME1_PIPE0_INT_CNTL, 0);
6441 WREG32(CP_ME1_PIPE1_INT_CNTL, 0);
6442 WREG32(CP_ME1_PIPE2_INT_CNTL, 0);
6443 WREG32(CP_ME1_PIPE3_INT_CNTL, 0);
6444 WREG32(CP_ME2_PIPE0_INT_CNTL, 0);
6445 WREG32(CP_ME2_PIPE1_INT_CNTL, 0);
6446 WREG32(CP_ME2_PIPE2_INT_CNTL, 0);
6447 WREG32(CP_ME2_PIPE3_INT_CNTL, 0);
6449 WREG32(GRBM_INT_CNTL, 0);
6450 /* vline/vblank, etc. */
6451 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
6452 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
6453 if (rdev->num_crtc >= 4) {
6454 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
6455 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
6457 if (rdev->num_crtc >= 6) {
6458 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
6459 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
6463 WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
6465 /* digital hotplug */
6466 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
6467 WREG32(DC_HPD1_INT_CONTROL, tmp);
6468 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
6469 WREG32(DC_HPD2_INT_CONTROL, tmp);
6470 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
6471 WREG32(DC_HPD3_INT_CONTROL, tmp);
6472 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
6473 WREG32(DC_HPD4_INT_CONTROL, tmp);
6474 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
6475 WREG32(DC_HPD5_INT_CONTROL, tmp);
6476 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
6477 WREG32(DC_HPD6_INT_CONTROL, tmp);
6482 * cik_irq_init - init and enable the interrupt ring
6484 * @rdev: radeon_device pointer
6486 * Allocate a ring buffer for the interrupt controller,
6487 * enable the RLC, disable interrupts, enable the IH
6488 * ring buffer and enable it (CIK).
6489 * Called at device load and reume.
6490 * Returns 0 for success, errors for failure.
6492 static int cik_irq_init(struct radeon_device *rdev)
6496 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
6499 ret = r600_ih_ring_alloc(rdev);
6504 cik_disable_interrupts(rdev);
6507 ret = cik_rlc_resume(rdev);
6509 r600_ih_ring_fini(rdev);
6513 /* setup interrupt control */
6514 /* XXX this should actually be a bus address, not an MC address. same on older asics */
6515 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
6516 interrupt_cntl = RREG32(INTERRUPT_CNTL);
6517 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
6518 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
6520 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
6521 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
6522 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
6523 WREG32(INTERRUPT_CNTL, interrupt_cntl);
6525 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
6526 rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
6528 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
6529 IH_WPTR_OVERFLOW_CLEAR |
6532 if (rdev->wb.enabled)
6533 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
6535 /* set the writeback address whether it's enabled or not */
6536 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
6537 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
6539 WREG32(IH_RB_CNTL, ih_rb_cntl);
6541 /* set rptr, wptr to 0 */
6542 WREG32(IH_RB_RPTR, 0);
6543 WREG32(IH_RB_WPTR, 0);
6545 /* Default settings for IH_CNTL (disabled at first) */
6546 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
6547 /* RPTR_REARM only works if msi's are enabled */
6548 if (rdev->msi_enabled)
6549 ih_cntl |= RPTR_REARM;
6550 WREG32(IH_CNTL, ih_cntl);
6552 /* force the active interrupt state to all disabled */
6553 cik_disable_interrupt_state(rdev);
6555 pci_set_master(rdev->pdev);
6558 cik_enable_interrupts(rdev);
6564 * cik_irq_set - enable/disable interrupt sources
6566 * @rdev: radeon_device pointer
6568 * Enable interrupt sources on the GPU (vblanks, hpd,
6570 * Returns 0 for success, errors for failure.
6572 int cik_irq_set(struct radeon_device *rdev)
6575 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3;
6576 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
6577 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
6578 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
6579 u32 grbm_int_cntl = 0;
6580 u32 dma_cntl, dma_cntl1;
6583 if (!rdev->irq.installed) {
6584 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
6587 /* don't enable anything if the ih is disabled */
6588 if (!rdev->ih.enabled) {
6589 cik_disable_interrupts(rdev);
6590 /* force the active interrupt state to all disabled */
6591 cik_disable_interrupt_state(rdev);
6595 cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
6596 (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
6597 cp_int_cntl |= PRIV_INSTR_INT_ENABLE | PRIV_REG_INT_ENABLE;
6599 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
6600 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
6601 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
6602 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
6603 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
6604 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
6606 dma_cntl = RREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
6607 dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
6609 cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6610 cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6611 cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6612 cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6613 cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6614 cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6615 cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6616 cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
6618 if (rdev->flags & RADEON_IS_IGP)
6619 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
6620 ~(THERM_INTH_MASK | THERM_INTL_MASK);
6622 thermal_int = RREG32_SMC(CG_THERMAL_INT) &
6623 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
6625 /* enable CP interrupts on all rings */
6626 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
6627 DRM_DEBUG("cik_irq_set: sw int gfx\n");
6628 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
6630 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
6631 struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
6632 DRM_DEBUG("si_irq_set: sw int cp1\n");
6633 if (ring->me == 1) {
6634 switch (ring->pipe) {
6636 cp_m1p0 |= TIME_STAMP_INT_ENABLE;
6639 cp_m1p1 |= TIME_STAMP_INT_ENABLE;
6642 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
6645 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
6648 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
6651 } else if (ring->me == 2) {
6652 switch (ring->pipe) {
6654 cp_m2p0 |= TIME_STAMP_INT_ENABLE;
6657 cp_m2p1 |= TIME_STAMP_INT_ENABLE;
6660 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
6663 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
6666 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
6670 DRM_DEBUG("si_irq_set: sw int cp1 invalid me %d\n", ring->me);
6673 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
6674 struct radeon_ring *ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
6675 DRM_DEBUG("si_irq_set: sw int cp2\n");
6676 if (ring->me == 1) {
6677 switch (ring->pipe) {
6679 cp_m1p0 |= TIME_STAMP_INT_ENABLE;
6682 cp_m1p1 |= TIME_STAMP_INT_ENABLE;
6685 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
6688 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
6691 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
6694 } else if (ring->me == 2) {
6695 switch (ring->pipe) {
6697 cp_m2p0 |= TIME_STAMP_INT_ENABLE;
6700 cp_m2p1 |= TIME_STAMP_INT_ENABLE;
6703 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
6706 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
6709 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
6713 DRM_DEBUG("si_irq_set: sw int cp2 invalid me %d\n", ring->me);
6717 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
6718 DRM_DEBUG("cik_irq_set: sw int dma\n");
6719 dma_cntl |= TRAP_ENABLE;
6722 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
6723 DRM_DEBUG("cik_irq_set: sw int dma1\n");
6724 dma_cntl1 |= TRAP_ENABLE;
6727 if (rdev->irq.crtc_vblank_int[0] ||
6728 atomic_read(&rdev->irq.pflip[0])) {
6729 DRM_DEBUG("cik_irq_set: vblank 0\n");
6730 crtc1 |= VBLANK_INTERRUPT_MASK;
6732 if (rdev->irq.crtc_vblank_int[1] ||
6733 atomic_read(&rdev->irq.pflip[1])) {
6734 DRM_DEBUG("cik_irq_set: vblank 1\n");
6735 crtc2 |= VBLANK_INTERRUPT_MASK;
6737 if (rdev->irq.crtc_vblank_int[2] ||
6738 atomic_read(&rdev->irq.pflip[2])) {
6739 DRM_DEBUG("cik_irq_set: vblank 2\n");
6740 crtc3 |= VBLANK_INTERRUPT_MASK;
6742 if (rdev->irq.crtc_vblank_int[3] ||
6743 atomic_read(&rdev->irq.pflip[3])) {
6744 DRM_DEBUG("cik_irq_set: vblank 3\n");
6745 crtc4 |= VBLANK_INTERRUPT_MASK;
6747 if (rdev->irq.crtc_vblank_int[4] ||
6748 atomic_read(&rdev->irq.pflip[4])) {
6749 DRM_DEBUG("cik_irq_set: vblank 4\n");
6750 crtc5 |= VBLANK_INTERRUPT_MASK;
6752 if (rdev->irq.crtc_vblank_int[5] ||
6753 atomic_read(&rdev->irq.pflip[5])) {
6754 DRM_DEBUG("cik_irq_set: vblank 5\n");
6755 crtc6 |= VBLANK_INTERRUPT_MASK;
6757 if (rdev->irq.hpd[0]) {
6758 DRM_DEBUG("cik_irq_set: hpd 1\n");
6759 hpd1 |= DC_HPDx_INT_EN;
6761 if (rdev->irq.hpd[1]) {
6762 DRM_DEBUG("cik_irq_set: hpd 2\n");
6763 hpd2 |= DC_HPDx_INT_EN;
6765 if (rdev->irq.hpd[2]) {
6766 DRM_DEBUG("cik_irq_set: hpd 3\n");
6767 hpd3 |= DC_HPDx_INT_EN;
6769 if (rdev->irq.hpd[3]) {
6770 DRM_DEBUG("cik_irq_set: hpd 4\n");
6771 hpd4 |= DC_HPDx_INT_EN;
6773 if (rdev->irq.hpd[4]) {
6774 DRM_DEBUG("cik_irq_set: hpd 5\n");
6775 hpd5 |= DC_HPDx_INT_EN;
6777 if (rdev->irq.hpd[5]) {
6778 DRM_DEBUG("cik_irq_set: hpd 6\n");
6779 hpd6 |= DC_HPDx_INT_EN;
6782 if (rdev->irq.dpm_thermal) {
6783 DRM_DEBUG("dpm thermal\n");
6784 if (rdev->flags & RADEON_IS_IGP)
6785 thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
6787 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
6790 WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
6792 WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
6793 WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
6795 WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
6796 WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
6797 WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
6798 WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
6799 WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
6800 WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
6801 WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
6802 WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
6804 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
6806 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
6807 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
6808 if (rdev->num_crtc >= 4) {
6809 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
6810 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
6812 if (rdev->num_crtc >= 6) {
6813 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
6814 WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
6817 WREG32(DC_HPD1_INT_CONTROL, hpd1);
6818 WREG32(DC_HPD2_INT_CONTROL, hpd2);
6819 WREG32(DC_HPD3_INT_CONTROL, hpd3);
6820 WREG32(DC_HPD4_INT_CONTROL, hpd4);
6821 WREG32(DC_HPD5_INT_CONTROL, hpd5);
6822 WREG32(DC_HPD6_INT_CONTROL, hpd6);
6824 if (rdev->flags & RADEON_IS_IGP)
6825 WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
6827 WREG32_SMC(CG_THERMAL_INT, thermal_int);
6833 * cik_irq_ack - ack interrupt sources
6835 * @rdev: radeon_device pointer
6837 * Ack interrupt sources on the GPU (vblanks, hpd,
6838 * etc.) (CIK). Certain interrupts sources are sw
6839 * generated and do not require an explicit ack.
6841 static inline void cik_irq_ack(struct radeon_device *rdev)
6845 rdev->irq.stat_regs.cik.disp_int = RREG32(DISP_INTERRUPT_STATUS);
6846 rdev->irq.stat_regs.cik.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
6847 rdev->irq.stat_regs.cik.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
6848 rdev->irq.stat_regs.cik.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
6849 rdev->irq.stat_regs.cik.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
6850 rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
6851 rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6);
6853 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT)
6854 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
6855 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT)
6856 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
6857 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
6858 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
6859 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT)
6860 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
6862 if (rdev->num_crtc >= 4) {
6863 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
6864 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
6865 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
6866 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
6867 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
6868 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
6869 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
6870 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
6873 if (rdev->num_crtc >= 6) {
6874 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
6875 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
6876 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
6877 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
6878 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
6879 WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
6880 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
6881 WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
6884 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
6885 tmp = RREG32(DC_HPD1_INT_CONTROL);
6886 tmp |= DC_HPDx_INT_ACK;
6887 WREG32(DC_HPD1_INT_CONTROL, tmp);
6889 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
6890 tmp = RREG32(DC_HPD2_INT_CONTROL);
6891 tmp |= DC_HPDx_INT_ACK;
6892 WREG32(DC_HPD2_INT_CONTROL, tmp);
6894 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
6895 tmp = RREG32(DC_HPD3_INT_CONTROL);
6896 tmp |= DC_HPDx_INT_ACK;
6897 WREG32(DC_HPD3_INT_CONTROL, tmp);
6899 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
6900 tmp = RREG32(DC_HPD4_INT_CONTROL);
6901 tmp |= DC_HPDx_INT_ACK;
6902 WREG32(DC_HPD4_INT_CONTROL, tmp);
6904 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
6905 tmp = RREG32(DC_HPD5_INT_CONTROL);
6906 tmp |= DC_HPDx_INT_ACK;
6907 WREG32(DC_HPD5_INT_CONTROL, tmp);
6909 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
6910 tmp = RREG32(DC_HPD5_INT_CONTROL);
6911 tmp |= DC_HPDx_INT_ACK;
6912 WREG32(DC_HPD6_INT_CONTROL, tmp);
6917 * cik_irq_disable - disable interrupts
6919 * @rdev: radeon_device pointer
6921 * Disable interrupts on the hw (CIK).
6923 static void cik_irq_disable(struct radeon_device *rdev)
6925 cik_disable_interrupts(rdev);
6926 /* Wait and acknowledge irq */
6929 cik_disable_interrupt_state(rdev);
6933 * cik_irq_disable - disable interrupts for suspend
6935 * @rdev: radeon_device pointer
6937 * Disable interrupts and stop the RLC (CIK).
6940 static void cik_irq_suspend(struct radeon_device *rdev)
6942 cik_irq_disable(rdev);
6947 * cik_irq_fini - tear down interrupt support
6949 * @rdev: radeon_device pointer
6951 * Disable interrupts on the hw and free the IH ring
6953 * Used for driver unload.
6955 static void cik_irq_fini(struct radeon_device *rdev)
6957 cik_irq_suspend(rdev);
6958 r600_ih_ring_fini(rdev);
6962 * cik_get_ih_wptr - get the IH ring buffer wptr
6964 * @rdev: radeon_device pointer
6966 * Get the IH ring buffer wptr from either the register
6967 * or the writeback memory buffer (CIK). Also check for
6968 * ring buffer overflow and deal with it.
6969 * Used by cik_irq_process().
6970 * Returns the value of the wptr.
6972 static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
6976 if (rdev->wb.enabled)
6977 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
6979 wptr = RREG32(IH_RB_WPTR);
6981 if (wptr & RB_OVERFLOW) {
6982 /* When a ring buffer overflow happen start parsing interrupt
6983 * from the last not overwritten vector (wptr + 16). Hopefully
6984 * this should allow us to catchup.
6986 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
6987 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
6988 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
6989 tmp = RREG32(IH_RB_CNTL);
6990 tmp |= IH_WPTR_OVERFLOW_CLEAR;
6991 WREG32(IH_RB_CNTL, tmp);
6993 return (wptr & rdev->ih.ptr_mask);
6997 * Each IV ring entry is 128 bits:
6998 * [7:0] - interrupt source id
7000 * [59:32] - interrupt source data
7001 * [63:60] - reserved
7004 * ME_ID [1:0], PIPE_ID[1:0], QUEUE_ID[2:0]
7005 * QUEUE_ID - for compute, which of the 8 queues owned by the dispatcher
7006 * - for gfx, hw shader state (0=PS...5=LS, 6=CS)
7007 * ME_ID - 0 = gfx, 1 = first 4 CS pipes, 2 = second 4 CS pipes
7008 * PIPE_ID - ME0 0=3D
7009 * - ME1&2 compute dispatcher (4 pipes each)
7011 * INSTANCE_ID [1:0], QUEUE_ID[1:0]
7012 * INSTANCE_ID - 0 = sdma0, 1 = sdma1
7013 * QUEUE_ID - 0 = gfx, 1 = rlc0, 2 = rlc1
7016 * [127:96] - reserved
7019 * cik_irq_process - interrupt handler
7021 * @rdev: radeon_device pointer
7023 * Interrupt hander (CIK). Walk the IH ring,
7024 * ack interrupts and schedule work to handle
7026 * Returns irq process return code.
7028 int cik_irq_process(struct radeon_device *rdev)
7030 struct radeon_ring *cp1_ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7031 struct radeon_ring *cp2_ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7034 u32 src_id, src_data, ring_id;
7035 u8 me_id, pipe_id, queue_id;
7037 bool queue_hotplug = false;
7038 bool queue_reset = false;
7039 u32 addr, status, mc_client;
7040 bool queue_thermal = false;
7042 if (!rdev->ih.enabled || rdev->shutdown)
7045 wptr = cik_get_ih_wptr(rdev);
7048 /* is somebody else already processing irqs? */
7049 if (atomic_xchg(&rdev->ih.lock, 1))
7052 rptr = rdev->ih.rptr;
7053 DRM_DEBUG("cik_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
7055 /* Order reading of wptr vs. reading of IH ring data */
7058 /* display interrupts */
7061 while (rptr != wptr) {
7062 /* wptr/rptr are in bytes! */
7063 ring_index = rptr / 4;
7064 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
7065 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
7066 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
7069 case 1: /* D1 vblank/vline */
7071 case 0: /* D1 vblank */
7072 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
7073 if (rdev->irq.crtc_vblank_int[0]) {
7074 drm_handle_vblank(rdev->ddev, 0);
7075 rdev->pm.vblank_sync = true;
7076 wake_up(&rdev->irq.vblank_queue);
7078 if (atomic_read(&rdev->irq.pflip[0]))
7079 radeon_crtc_handle_flip(rdev, 0);
7080 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
7081 DRM_DEBUG("IH: D1 vblank\n");
7084 case 1: /* D1 vline */
7085 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
7086 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
7087 DRM_DEBUG("IH: D1 vline\n");
7091 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7095 case 2: /* D2 vblank/vline */
7097 case 0: /* D2 vblank */
7098 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
7099 if (rdev->irq.crtc_vblank_int[1]) {
7100 drm_handle_vblank(rdev->ddev, 1);
7101 rdev->pm.vblank_sync = true;
7102 wake_up(&rdev->irq.vblank_queue);
7104 if (atomic_read(&rdev->irq.pflip[1]))
7105 radeon_crtc_handle_flip(rdev, 1);
7106 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
7107 DRM_DEBUG("IH: D2 vblank\n");
7110 case 1: /* D2 vline */
7111 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
7112 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
7113 DRM_DEBUG("IH: D2 vline\n");
7117 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7121 case 3: /* D3 vblank/vline */
7123 case 0: /* D3 vblank */
7124 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
7125 if (rdev->irq.crtc_vblank_int[2]) {
7126 drm_handle_vblank(rdev->ddev, 2);
7127 rdev->pm.vblank_sync = true;
7128 wake_up(&rdev->irq.vblank_queue);
7130 if (atomic_read(&rdev->irq.pflip[2]))
7131 radeon_crtc_handle_flip(rdev, 2);
7132 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
7133 DRM_DEBUG("IH: D3 vblank\n");
7136 case 1: /* D3 vline */
7137 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
7138 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
7139 DRM_DEBUG("IH: D3 vline\n");
7143 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7147 case 4: /* D4 vblank/vline */
7149 case 0: /* D4 vblank */
7150 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
7151 if (rdev->irq.crtc_vblank_int[3]) {
7152 drm_handle_vblank(rdev->ddev, 3);
7153 rdev->pm.vblank_sync = true;
7154 wake_up(&rdev->irq.vblank_queue);
7156 if (atomic_read(&rdev->irq.pflip[3]))
7157 radeon_crtc_handle_flip(rdev, 3);
7158 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
7159 DRM_DEBUG("IH: D4 vblank\n");
7162 case 1: /* D4 vline */
7163 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
7164 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
7165 DRM_DEBUG("IH: D4 vline\n");
7169 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7173 case 5: /* D5 vblank/vline */
7175 case 0: /* D5 vblank */
7176 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
7177 if (rdev->irq.crtc_vblank_int[4]) {
7178 drm_handle_vblank(rdev->ddev, 4);
7179 rdev->pm.vblank_sync = true;
7180 wake_up(&rdev->irq.vblank_queue);
7182 if (atomic_read(&rdev->irq.pflip[4]))
7183 radeon_crtc_handle_flip(rdev, 4);
7184 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
7185 DRM_DEBUG("IH: D5 vblank\n");
7188 case 1: /* D5 vline */
7189 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
7190 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
7191 DRM_DEBUG("IH: D5 vline\n");
7195 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7199 case 6: /* D6 vblank/vline */
7201 case 0: /* D6 vblank */
7202 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
7203 if (rdev->irq.crtc_vblank_int[5]) {
7204 drm_handle_vblank(rdev->ddev, 5);
7205 rdev->pm.vblank_sync = true;
7206 wake_up(&rdev->irq.vblank_queue);
7208 if (atomic_read(&rdev->irq.pflip[5]))
7209 radeon_crtc_handle_flip(rdev, 5);
7210 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
7211 DRM_DEBUG("IH: D6 vblank\n");
7214 case 1: /* D6 vline */
7215 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
7216 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
7217 DRM_DEBUG("IH: D6 vline\n");
7221 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7225 case 42: /* HPD hotplug */
7228 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
7229 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
7230 queue_hotplug = true;
7231 DRM_DEBUG("IH: HPD1\n");
7235 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
7236 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
7237 queue_hotplug = true;
7238 DRM_DEBUG("IH: HPD2\n");
7242 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
7243 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
7244 queue_hotplug = true;
7245 DRM_DEBUG("IH: HPD3\n");
7249 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
7250 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
7251 queue_hotplug = true;
7252 DRM_DEBUG("IH: HPD4\n");
7256 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
7257 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
7258 queue_hotplug = true;
7259 DRM_DEBUG("IH: HPD5\n");
7263 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
7264 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
7265 queue_hotplug = true;
7266 DRM_DEBUG("IH: HPD6\n");
7270 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7275 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
7276 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
7280 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
7281 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
7282 mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
7283 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
7284 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
7286 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
7288 cik_vm_decode_fault(rdev, status, addr, mc_client);
7289 /* reset addr and status */
7290 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
7292 case 176: /* GFX RB CP_INT */
7293 case 177: /* GFX IB CP_INT */
7294 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
7296 case 181: /* CP EOP event */
7297 DRM_DEBUG("IH: CP EOP\n");
7298 /* XXX check the bitfield order! */
7299 me_id = (ring_id & 0x60) >> 5;
7300 pipe_id = (ring_id & 0x18) >> 3;
7301 queue_id = (ring_id & 0x7) >> 0;
7304 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
7308 if ((cp1_ring->me == me_id) & (cp1_ring->pipe == pipe_id))
7309 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
7310 if ((cp2_ring->me == me_id) & (cp2_ring->pipe == pipe_id))
7311 radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
7315 case 184: /* CP Privileged reg access */
7316 DRM_ERROR("Illegal register access in command stream\n");
7317 /* XXX check the bitfield order! */
7318 me_id = (ring_id & 0x60) >> 5;
7319 pipe_id = (ring_id & 0x18) >> 3;
7320 queue_id = (ring_id & 0x7) >> 0;
7323 /* This results in a full GPU reset, but all we need to do is soft
7324 * reset the CP for gfx
7338 case 185: /* CP Privileged inst */
7339 DRM_ERROR("Illegal instruction in command stream\n");
7340 /* XXX check the bitfield order! */
7341 me_id = (ring_id & 0x60) >> 5;
7342 pipe_id = (ring_id & 0x18) >> 3;
7343 queue_id = (ring_id & 0x7) >> 0;
7346 /* This results in a full GPU reset, but all we need to do is soft
7347 * reset the CP for gfx
7361 case 224: /* SDMA trap event */
7362 /* XXX check the bitfield order! */
7363 me_id = (ring_id & 0x3) >> 0;
7364 queue_id = (ring_id & 0xc) >> 2;
7365 DRM_DEBUG("IH: SDMA trap\n");
7370 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
7383 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
7395 case 230: /* thermal low to high */
7396 DRM_DEBUG("IH: thermal low to high\n");
7397 rdev->pm.dpm.thermal.high_to_low = false;
7398 queue_thermal = true;
7400 case 231: /* thermal high to low */
7401 DRM_DEBUG("IH: thermal high to low\n");
7402 rdev->pm.dpm.thermal.high_to_low = true;
7403 queue_thermal = true;
7405 case 233: /* GUI IDLE */
7406 DRM_DEBUG("IH: GUI idle\n");
7408 case 241: /* SDMA Privileged inst */
7409 case 247: /* SDMA Privileged inst */
7410 DRM_ERROR("Illegal instruction in SDMA command stream\n");
7411 /* XXX check the bitfield order! */
7412 me_id = (ring_id & 0x3) >> 0;
7413 queue_id = (ring_id & 0xc) >> 2;
7448 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
7452 /* wptr/rptr are in bytes! */
7454 rptr &= rdev->ih.ptr_mask;
7457 schedule_work(&rdev->hotplug_work);
7459 schedule_work(&rdev->reset_work);
7461 schedule_work(&rdev->pm.dpm.thermal.work);
7462 rdev->ih.rptr = rptr;
7463 WREG32(IH_RB_RPTR, rdev->ih.rptr);
7464 atomic_set(&rdev->ih.lock, 0);
7466 /* make sure wptr hasn't changed while processing */
7467 wptr = cik_get_ih_wptr(rdev);
7475 * startup/shutdown callbacks
7478 * cik_startup - program the asic to a functional state
7480 * @rdev: radeon_device pointer
7482 * Programs the asic to a functional state (CIK).
7483 * Called by cik_init() and cik_resume().
7484 * Returns 0 for success, error for failure.
7486 static int cik_startup(struct radeon_device *rdev)
7488 struct radeon_ring *ring;
7491 /* enable pcie gen2/3 link */
7492 cik_pcie_gen3_enable(rdev);
7494 cik_program_aspm(rdev);
7496 /* scratch needs to be initialized before MC */
7497 r = r600_vram_scratch_init(rdev);
7501 cik_mc_program(rdev);
7503 if (rdev->flags & RADEON_IS_IGP) {
7504 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
7505 !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
7506 r = cik_init_microcode(rdev);
7508 DRM_ERROR("Failed to load firmware!\n");
7513 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
7514 !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw ||
7516 r = cik_init_microcode(rdev);
7518 DRM_ERROR("Failed to load firmware!\n");
7523 r = ci_mc_load_microcode(rdev);
7525 DRM_ERROR("Failed to load MC firmware!\n");
7530 r = cik_pcie_gart_enable(rdev);
7535 /* allocate rlc buffers */
7536 if (rdev->flags & RADEON_IS_IGP) {
7537 if (rdev->family == CHIP_KAVERI) {
7538 rdev->rlc.reg_list = spectre_rlc_save_restore_register_list;
7539 rdev->rlc.reg_list_size =
7540 (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
7542 rdev->rlc.reg_list = kalindi_rlc_save_restore_register_list;
7543 rdev->rlc.reg_list_size =
7544 (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
7547 rdev->rlc.cs_data = ci_cs_data;
7548 rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
7549 r = sumo_rlc_init(rdev);
7551 DRM_ERROR("Failed to init rlc BOs!\n");
7555 /* allocate wb buffer */
7556 r = radeon_wb_init(rdev);
7560 /* allocate mec buffers */
7561 r = cik_mec_init(rdev);
7563 DRM_ERROR("Failed to init MEC BOs!\n");
7567 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
7569 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7573 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
7575 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7579 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
7581 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7585 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
7587 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
7591 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
7593 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
7597 r = radeon_uvd_resume(rdev);
7599 r = uvd_v4_2_resume(rdev);
7601 r = radeon_fence_driver_start_ring(rdev,
7602 R600_RING_TYPE_UVD_INDEX);
7604 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
7608 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
7611 if (!rdev->irq.installed) {
7612 r = radeon_irq_kms_init(rdev);
7617 r = cik_irq_init(rdev);
7619 DRM_ERROR("radeon: IH init failed (%d).\n", r);
7620 radeon_irq_kms_fini(rdev);
7625 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7626 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
7627 CP_RB0_RPTR, CP_RB0_WPTR,
7628 PACKET3(PACKET3_NOP, 0x3FFF));
7632 /* set up the compute queues */
7633 /* type-2 packets are deprecated on MEC, use type-3 instead */
7634 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7635 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
7636 CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
7637 PACKET3(PACKET3_NOP, 0x3FFF));
7640 ring->me = 1; /* first MEC */
7641 ring->pipe = 0; /* first pipe */
7642 ring->queue = 0; /* first queue */
7643 ring->wptr_offs = CIK_WB_CP1_WPTR_OFFSET;
7645 /* type-2 packets are deprecated on MEC, use type-3 instead */
7646 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7647 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
7648 CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
7649 PACKET3(PACKET3_NOP, 0x3FFF));
7652 /* dGPU only have 1 MEC */
7653 ring->me = 1; /* first MEC */
7654 ring->pipe = 0; /* first pipe */
7655 ring->queue = 1; /* second queue */
7656 ring->wptr_offs = CIK_WB_CP2_WPTR_OFFSET;
7658 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
7659 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
7660 SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
7661 SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
7662 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
7666 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
7667 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
7668 SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
7669 SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
7670 SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
7674 r = cik_cp_resume(rdev);
7678 r = cik_sdma_resume(rdev);
7682 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
7683 if (ring->ring_size) {
7684 r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
7685 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
7688 r = uvd_v1_0_init(rdev);
7690 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
7693 r = radeon_ib_pool_init(rdev);
7695 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
7699 r = radeon_vm_manager_init(rdev);
7701 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
7705 r = dce6_audio_init(rdev);
7713 * cik_resume - resume the asic to a functional state
7715 * @rdev: radeon_device pointer
7717 * Programs the asic to a functional state (CIK).
7719 * Returns 0 for success, error for failure.
7721 int cik_resume(struct radeon_device *rdev)
7726 atom_asic_init(rdev->mode_info.atom_context);
7728 /* init golden registers */
7729 cik_init_golden_registers(rdev);
7731 rdev->accel_working = true;
7732 r = cik_startup(rdev);
7734 DRM_ERROR("cik startup failed on resume\n");
7735 rdev->accel_working = false;
7744 * cik_suspend - suspend the asic
7746 * @rdev: radeon_device pointer
7748 * Bring the chip into a state suitable for suspend (CIK).
7749 * Called at suspend.
7750 * Returns 0 for success.
7752 int cik_suspend(struct radeon_device *rdev)
7754 dce6_audio_fini(rdev);
7755 radeon_vm_manager_fini(rdev);
7756 cik_cp_enable(rdev, false);
7757 cik_sdma_enable(rdev, false);
7758 uvd_v1_0_fini(rdev);
7759 radeon_uvd_suspend(rdev);
7762 cik_irq_suspend(rdev);
7763 radeon_wb_disable(rdev);
7764 cik_pcie_gart_disable(rdev);
7768 /* Plan is to move initialization in that function and use
7769 * helper function so that radeon_device_init pretty much
7770 * do nothing more than calling asic specific function. This
7771 * should also allow to remove a bunch of callback function
7775 * cik_init - asic specific driver and hw init
7777 * @rdev: radeon_device pointer
7779 * Setup asic specific driver variables and program the hw
7780 * to a functional state (CIK).
7781 * Called at driver startup.
7782 * Returns 0 for success, errors for failure.
7784 int cik_init(struct radeon_device *rdev)
7786 struct radeon_ring *ring;
7790 if (!radeon_get_bios(rdev)) {
7791 if (ASIC_IS_AVIVO(rdev))
7794 /* Must be an ATOMBIOS */
7795 if (!rdev->is_atom_bios) {
7796 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
7799 r = radeon_atombios_init(rdev);
7803 /* Post card if necessary */
7804 if (!radeon_card_posted(rdev)) {
7806 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
7809 DRM_INFO("GPU not posted. posting now...\n");
7810 atom_asic_init(rdev->mode_info.atom_context);
7812 /* init golden registers */
7813 cik_init_golden_registers(rdev);
7814 /* Initialize scratch registers */
7815 cik_scratch_init(rdev);
7816 /* Initialize surface registers */
7817 radeon_surface_init(rdev);
7818 /* Initialize clocks */
7819 radeon_get_clock_info(rdev->ddev);
7822 r = radeon_fence_driver_init(rdev);
7826 /* initialize memory controller */
7827 r = cik_mc_init(rdev);
7830 /* Memory manager */
7831 r = radeon_bo_init(rdev);
7835 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7836 ring->ring_obj = NULL;
7837 r600_ring_init(rdev, ring, 1024 * 1024);
7839 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7840 ring->ring_obj = NULL;
7841 r600_ring_init(rdev, ring, 1024 * 1024);
7842 r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
7846 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7847 ring->ring_obj = NULL;
7848 r600_ring_init(rdev, ring, 1024 * 1024);
7849 r = radeon_doorbell_get(rdev, &ring->doorbell_page_num);
7853 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
7854 ring->ring_obj = NULL;
7855 r600_ring_init(rdev, ring, 256 * 1024);
7857 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
7858 ring->ring_obj = NULL;
7859 r600_ring_init(rdev, ring, 256 * 1024);
7861 r = radeon_uvd_init(rdev);
7863 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
7864 ring->ring_obj = NULL;
7865 r600_ring_init(rdev, ring, 4096);
7868 rdev->ih.ring_obj = NULL;
7869 r600_ih_ring_init(rdev, 64 * 1024);
7871 r = r600_pcie_gart_init(rdev);
7875 rdev->accel_working = true;
7876 r = cik_startup(rdev);
7878 dev_err(rdev->dev, "disabling GPU acceleration\n");
7880 cik_sdma_fini(rdev);
7882 sumo_rlc_fini(rdev);
7884 radeon_wb_fini(rdev);
7885 radeon_ib_pool_fini(rdev);
7886 radeon_vm_manager_fini(rdev);
7887 radeon_irq_kms_fini(rdev);
7888 cik_pcie_gart_fini(rdev);
7889 rdev->accel_working = false;
7892 /* Don't start up if the MC ucode is missing.
7893 * The default clocks and voltages before the MC ucode
7894 * is loaded are not suffient for advanced operations.
7896 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
7897 DRM_ERROR("radeon: MC ucode required for NI+.\n");
7905 * cik_fini - asic specific driver and hw fini
7907 * @rdev: radeon_device pointer
7909 * Tear down the asic specific driver variables and program the hw
7910 * to an idle state (CIK).
7911 * Called at driver unload.
7913 void cik_fini(struct radeon_device *rdev)
7916 cik_sdma_fini(rdev);
7920 sumo_rlc_fini(rdev);
7922 radeon_wb_fini(rdev);
7923 radeon_vm_manager_fini(rdev);
7924 radeon_ib_pool_fini(rdev);
7925 radeon_irq_kms_fini(rdev);
7926 uvd_v1_0_fini(rdev);
7927 radeon_uvd_fini(rdev);
7928 cik_pcie_gart_fini(rdev);
7929 r600_vram_scratch_fini(rdev);
7930 radeon_gem_fini(rdev);
7931 radeon_fence_driver_fini(rdev);
7932 radeon_bo_fini(rdev);
7933 radeon_atombios_fini(rdev);
7938 void dce8_program_fmt(struct drm_encoder *encoder)
7940 struct drm_device *dev = encoder->dev;
7941 struct radeon_device *rdev = dev->dev_private;
7942 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
7943 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
7944 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
7947 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE;
7950 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
7951 bpc = radeon_get_monitor_bpc(connector);
7952 dither = radeon_connector->dither;
7955 /* LVDS/eDP FMT is set up by atom */
7956 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
7959 /* not needed for analog */
7960 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
7961 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
7969 if (dither == RADEON_FMT_DITHER_ENABLE)
7970 /* XXX sort out optimal dither settings */
7971 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
7972 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(0));
7974 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(0));
7977 if (dither == RADEON_FMT_DITHER_ENABLE)
7978 /* XXX sort out optimal dither settings */
7979 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
7980 FMT_RGB_RANDOM_ENABLE |
7981 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(1));
7983 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(1));
7986 if (dither == RADEON_FMT_DITHER_ENABLE)
7987 /* XXX sort out optimal dither settings */
7988 tmp |= (FMT_FRAME_RANDOM_ENABLE | FMT_HIGHPASS_RANDOM_ENABLE |
7989 FMT_RGB_RANDOM_ENABLE |
7990 FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH(2));
7992 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH(2));
7999 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp);
8002 /* display watermark setup */
8004 * dce8_line_buffer_adjust - Set up the line buffer
8006 * @rdev: radeon_device pointer
8007 * @radeon_crtc: the selected display controller
8008 * @mode: the current display mode on the selected display
8011 * Setup up the line buffer allocation for
8012 * the selected display controller (CIK).
8013 * Returns the line buffer size in pixels.
8015 static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
8016 struct radeon_crtc *radeon_crtc,
8017 struct drm_display_mode *mode)
8019 u32 tmp, buffer_alloc, i;
8020 u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
8023 * There are 6 line buffers, one for each display controllers.
8024 * There are 3 partitions per LB. Select the number of partitions
8025 * to enable based on the display width. For display widths larger
8026 * than 4096, you need use to use 2 display controllers and combine
8027 * them using the stereo blender.
8029 if (radeon_crtc->base.enabled && mode) {
8030 if (mode->crtc_hdisplay < 1920) {
8033 } else if (mode->crtc_hdisplay < 2560) {
8036 } else if (mode->crtc_hdisplay < 4096) {
8038 buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
8040 DRM_DEBUG_KMS("Mode too big for LB!\n");
8042 buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
8049 WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
8050 LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
8052 WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
8053 DMIF_BUFFERS_ALLOCATED(buffer_alloc));
8054 for (i = 0; i < rdev->usec_timeout; i++) {
8055 if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
8056 DMIF_BUFFERS_ALLOCATED_COMPLETED)
8061 if (radeon_crtc->base.enabled && mode) {
8073 /* controller not enabled, so no lb used */
8078 * cik_get_number_of_dram_channels - get the number of dram channels
8080 * @rdev: radeon_device pointer
8082 * Look up the number of video ram channels (CIK).
8083 * Used for display watermark bandwidth calculations
8084 * Returns the number of dram channels
8086 static u32 cik_get_number_of_dram_channels(struct radeon_device *rdev)
8088 u32 tmp = RREG32(MC_SHARED_CHMAP);
8090 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
8113 struct dce8_wm_params {
8114 u32 dram_channels; /* number of dram channels */
8115 u32 yclk; /* bandwidth per dram data pin in kHz */
8116 u32 sclk; /* engine clock in kHz */
8117 u32 disp_clk; /* display clock in kHz */
8118 u32 src_width; /* viewport width */
8119 u32 active_time; /* active display time in ns */
8120 u32 blank_time; /* blank time in ns */
8121 bool interlaced; /* mode is interlaced */
8122 fixed20_12 vsc; /* vertical scale ratio */
8123 u32 num_heads; /* number of active crtcs */
8124 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
8125 u32 lb_size; /* line buffer allocated to pipe */
8126 u32 vtaps; /* vertical scaler taps */
8130 * dce8_dram_bandwidth - get the dram bandwidth
8132 * @wm: watermark calculation data
8134 * Calculate the raw dram bandwidth (CIK).
8135 * Used for display watermark bandwidth calculations
8136 * Returns the dram bandwidth in MBytes/s
8138 static u32 dce8_dram_bandwidth(struct dce8_wm_params *wm)
8140 /* Calculate raw DRAM Bandwidth */
8141 fixed20_12 dram_efficiency; /* 0.7 */
8142 fixed20_12 yclk, dram_channels, bandwidth;
8145 a.full = dfixed_const(1000);
8146 yclk.full = dfixed_const(wm->yclk);
8147 yclk.full = dfixed_div(yclk, a);
8148 dram_channels.full = dfixed_const(wm->dram_channels * 4);
8149 a.full = dfixed_const(10);
8150 dram_efficiency.full = dfixed_const(7);
8151 dram_efficiency.full = dfixed_div(dram_efficiency, a);
8152 bandwidth.full = dfixed_mul(dram_channels, yclk);
8153 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
8155 return dfixed_trunc(bandwidth);
8159 * dce8_dram_bandwidth_for_display - get the dram bandwidth for display
8161 * @wm: watermark calculation data
8163 * Calculate the dram bandwidth used for display (CIK).
8164 * Used for display watermark bandwidth calculations
8165 * Returns the dram bandwidth for display in MBytes/s
8167 static u32 dce8_dram_bandwidth_for_display(struct dce8_wm_params *wm)
8169 /* Calculate DRAM Bandwidth and the part allocated to display. */
8170 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
8171 fixed20_12 yclk, dram_channels, bandwidth;
8174 a.full = dfixed_const(1000);
8175 yclk.full = dfixed_const(wm->yclk);
8176 yclk.full = dfixed_div(yclk, a);
8177 dram_channels.full = dfixed_const(wm->dram_channels * 4);
8178 a.full = dfixed_const(10);
8179 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
8180 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
8181 bandwidth.full = dfixed_mul(dram_channels, yclk);
8182 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
8184 return dfixed_trunc(bandwidth);
8188 * dce8_data_return_bandwidth - get the data return bandwidth
8190 * @wm: watermark calculation data
8192 * Calculate the data return bandwidth used for display (CIK).
8193 * Used for display watermark bandwidth calculations
8194 * Returns the data return bandwidth in MBytes/s
8196 static u32 dce8_data_return_bandwidth(struct dce8_wm_params *wm)
8198 /* Calculate the display Data return Bandwidth */
8199 fixed20_12 return_efficiency; /* 0.8 */
8200 fixed20_12 sclk, bandwidth;
8203 a.full = dfixed_const(1000);
8204 sclk.full = dfixed_const(wm->sclk);
8205 sclk.full = dfixed_div(sclk, a);
8206 a.full = dfixed_const(10);
8207 return_efficiency.full = dfixed_const(8);
8208 return_efficiency.full = dfixed_div(return_efficiency, a);
8209 a.full = dfixed_const(32);
8210 bandwidth.full = dfixed_mul(a, sclk);
8211 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
8213 return dfixed_trunc(bandwidth);
8217 * dce8_dmif_request_bandwidth - get the dmif bandwidth
8219 * @wm: watermark calculation data
8221 * Calculate the dmif bandwidth used for display (CIK).
8222 * Used for display watermark bandwidth calculations
8223 * Returns the dmif bandwidth in MBytes/s
8225 static u32 dce8_dmif_request_bandwidth(struct dce8_wm_params *wm)
8227 /* Calculate the DMIF Request Bandwidth */
8228 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
8229 fixed20_12 disp_clk, bandwidth;
8232 a.full = dfixed_const(1000);
8233 disp_clk.full = dfixed_const(wm->disp_clk);
8234 disp_clk.full = dfixed_div(disp_clk, a);
8235 a.full = dfixed_const(32);
8236 b.full = dfixed_mul(a, disp_clk);
8238 a.full = dfixed_const(10);
8239 disp_clk_request_efficiency.full = dfixed_const(8);
8240 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
8242 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
8244 return dfixed_trunc(bandwidth);
8248 * dce8_available_bandwidth - get the min available bandwidth
8250 * @wm: watermark calculation data
8252 * Calculate the min available bandwidth used for display (CIK).
8253 * Used for display watermark bandwidth calculations
8254 * Returns the min available bandwidth in MBytes/s
8256 static u32 dce8_available_bandwidth(struct dce8_wm_params *wm)
8258 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
8259 u32 dram_bandwidth = dce8_dram_bandwidth(wm);
8260 u32 data_return_bandwidth = dce8_data_return_bandwidth(wm);
8261 u32 dmif_req_bandwidth = dce8_dmif_request_bandwidth(wm);
8263 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
8267 * dce8_average_bandwidth - get the average available bandwidth
8269 * @wm: watermark calculation data
8271 * Calculate the average available bandwidth used for display (CIK).
8272 * Used for display watermark bandwidth calculations
8273 * Returns the average available bandwidth in MBytes/s
8275 static u32 dce8_average_bandwidth(struct dce8_wm_params *wm)
8277 /* Calculate the display mode Average Bandwidth
8278 * DisplayMode should contain the source and destination dimensions,
8282 fixed20_12 line_time;
8283 fixed20_12 src_width;
8284 fixed20_12 bandwidth;
8287 a.full = dfixed_const(1000);
8288 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
8289 line_time.full = dfixed_div(line_time, a);
8290 bpp.full = dfixed_const(wm->bytes_per_pixel);
8291 src_width.full = dfixed_const(wm->src_width);
8292 bandwidth.full = dfixed_mul(src_width, bpp);
8293 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
8294 bandwidth.full = dfixed_div(bandwidth, line_time);
8296 return dfixed_trunc(bandwidth);
8300 * dce8_latency_watermark - get the latency watermark
8302 * @wm: watermark calculation data
8304 * Calculate the latency watermark (CIK).
8305 * Used for display watermark bandwidth calculations
8306 * Returns the latency watermark in ns
8308 static u32 dce8_latency_watermark(struct dce8_wm_params *wm)
8310 /* First calculate the latency in ns */
8311 u32 mc_latency = 2000; /* 2000 ns. */
8312 u32 available_bandwidth = dce8_available_bandwidth(wm);
8313 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
8314 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
8315 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
8316 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
8317 (wm->num_heads * cursor_line_pair_return_time);
8318 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
8319 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
8320 u32 tmp, dmif_size = 12288;
8323 if (wm->num_heads == 0)
8326 a.full = dfixed_const(2);
8327 b.full = dfixed_const(1);
8328 if ((wm->vsc.full > a.full) ||
8329 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
8331 ((wm->vsc.full >= a.full) && wm->interlaced))
8332 max_src_lines_per_dst_line = 4;
8334 max_src_lines_per_dst_line = 2;
8336 a.full = dfixed_const(available_bandwidth);
8337 b.full = dfixed_const(wm->num_heads);
8338 a.full = dfixed_div(a, b);
8340 b.full = dfixed_const(mc_latency + 512);
8341 c.full = dfixed_const(wm->disp_clk);
8342 b.full = dfixed_div(b, c);
8344 c.full = dfixed_const(dmif_size);
8345 b.full = dfixed_div(c, b);
8347 tmp = min(dfixed_trunc(a), dfixed_trunc(b));
8349 b.full = dfixed_const(1000);
8350 c.full = dfixed_const(wm->disp_clk);
8351 b.full = dfixed_div(c, b);
8352 c.full = dfixed_const(wm->bytes_per_pixel);
8353 b.full = dfixed_mul(b, c);
8355 lb_fill_bw = min(tmp, dfixed_trunc(b));
8357 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
8358 b.full = dfixed_const(1000);
8359 c.full = dfixed_const(lb_fill_bw);
8360 b.full = dfixed_div(c, b);
8361 a.full = dfixed_div(a, b);
8362 line_fill_time = dfixed_trunc(a);
8364 if (line_fill_time < wm->active_time)
8367 return latency + (line_fill_time - wm->active_time);
8372 * dce8_average_bandwidth_vs_dram_bandwidth_for_display - check
8373 * average and available dram bandwidth
8375 * @wm: watermark calculation data
8377 * Check if the display average bandwidth fits in the display
8378 * dram bandwidth (CIK).
8379 * Used for display watermark bandwidth calculations
8380 * Returns true if the display fits, false if not.
8382 static bool dce8_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
8384 if (dce8_average_bandwidth(wm) <=
8385 (dce8_dram_bandwidth_for_display(wm) / wm->num_heads))
8392 * dce8_average_bandwidth_vs_available_bandwidth - check
8393 * average and available bandwidth
8395 * @wm: watermark calculation data
8397 * Check if the display average bandwidth fits in the display
8398 * available bandwidth (CIK).
8399 * Used for display watermark bandwidth calculations
8400 * Returns true if the display fits, false if not.
8402 static bool dce8_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
8404 if (dce8_average_bandwidth(wm) <=
8405 (dce8_available_bandwidth(wm) / wm->num_heads))
8412 * dce8_check_latency_hiding - check latency hiding
8414 * @wm: watermark calculation data
8416 * Check latency hiding (CIK).
8417 * Used for display watermark bandwidth calculations
8418 * Returns true if the display fits, false if not.
8420 static bool dce8_check_latency_hiding(struct dce8_wm_params *wm)
8422 u32 lb_partitions = wm->lb_size / wm->src_width;
8423 u32 line_time = wm->active_time + wm->blank_time;
8424 u32 latency_tolerant_lines;
8428 a.full = dfixed_const(1);
8429 if (wm->vsc.full > a.full)
8430 latency_tolerant_lines = 1;
8432 if (lb_partitions <= (wm->vtaps + 1))
8433 latency_tolerant_lines = 1;
8435 latency_tolerant_lines = 2;
8438 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
8440 if (dce8_latency_watermark(wm) <= latency_hiding)
8447 * dce8_program_watermarks - program display watermarks
8449 * @rdev: radeon_device pointer
8450 * @radeon_crtc: the selected display controller
8451 * @lb_size: line buffer size
8452 * @num_heads: number of display controllers in use
8454 * Calculate and program the display watermarks for the
8455 * selected display controller (CIK).
8457 static void dce8_program_watermarks(struct radeon_device *rdev,
8458 struct radeon_crtc *radeon_crtc,
8459 u32 lb_size, u32 num_heads)
8461 struct drm_display_mode *mode = &radeon_crtc->base.mode;
8462 struct dce8_wm_params wm_low, wm_high;
8465 u32 latency_watermark_a = 0, latency_watermark_b = 0;
8468 if (radeon_crtc->base.enabled && num_heads && mode) {
8469 pixel_period = 1000000 / (u32)mode->clock;
8470 line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
8472 /* watermark for high clocks */
8473 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
8474 rdev->pm.dpm_enabled) {
8476 radeon_dpm_get_mclk(rdev, false) * 10;
8478 radeon_dpm_get_sclk(rdev, false) * 10;
8480 wm_high.yclk = rdev->pm.current_mclk * 10;
8481 wm_high.sclk = rdev->pm.current_sclk * 10;
8484 wm_high.disp_clk = mode->clock;
8485 wm_high.src_width = mode->crtc_hdisplay;
8486 wm_high.active_time = mode->crtc_hdisplay * pixel_period;
8487 wm_high.blank_time = line_time - wm_high.active_time;
8488 wm_high.interlaced = false;
8489 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
8490 wm_high.interlaced = true;
8491 wm_high.vsc = radeon_crtc->vsc;
8493 if (radeon_crtc->rmx_type != RMX_OFF)
8495 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
8496 wm_high.lb_size = lb_size;
8497 wm_high.dram_channels = cik_get_number_of_dram_channels(rdev);
8498 wm_high.num_heads = num_heads;
8500 /* set for high clocks */
8501 latency_watermark_a = min(dce8_latency_watermark(&wm_high), (u32)65535);
8503 /* possibly force display priority to high */
8504 /* should really do this at mode validation time... */
8505 if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
8506 !dce8_average_bandwidth_vs_available_bandwidth(&wm_high) ||
8507 !dce8_check_latency_hiding(&wm_high) ||
8508 (rdev->disp_priority == 2)) {
8509 DRM_DEBUG_KMS("force priority to high\n");
8512 /* watermark for low clocks */
8513 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
8514 rdev->pm.dpm_enabled) {
8516 radeon_dpm_get_mclk(rdev, true) * 10;
8518 radeon_dpm_get_sclk(rdev, true) * 10;
8520 wm_low.yclk = rdev->pm.current_mclk * 10;
8521 wm_low.sclk = rdev->pm.current_sclk * 10;
8524 wm_low.disp_clk = mode->clock;
8525 wm_low.src_width = mode->crtc_hdisplay;
8526 wm_low.active_time = mode->crtc_hdisplay * pixel_period;
8527 wm_low.blank_time = line_time - wm_low.active_time;
8528 wm_low.interlaced = false;
8529 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
8530 wm_low.interlaced = true;
8531 wm_low.vsc = radeon_crtc->vsc;
8533 if (radeon_crtc->rmx_type != RMX_OFF)
8535 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
8536 wm_low.lb_size = lb_size;
8537 wm_low.dram_channels = cik_get_number_of_dram_channels(rdev);
8538 wm_low.num_heads = num_heads;
8540 /* set for low clocks */
8541 latency_watermark_b = min(dce8_latency_watermark(&wm_low), (u32)65535);
8543 /* possibly force display priority to high */
8544 /* should really do this at mode validation time... */
8545 if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
8546 !dce8_average_bandwidth_vs_available_bandwidth(&wm_low) ||
8547 !dce8_check_latency_hiding(&wm_low) ||
8548 (rdev->disp_priority == 2)) {
8549 DRM_DEBUG_KMS("force priority to high\n");
8554 wm_mask = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
8556 tmp &= ~LATENCY_WATERMARK_MASK(3);
8557 tmp |= LATENCY_WATERMARK_MASK(1);
8558 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
8559 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
8560 (LATENCY_LOW_WATERMARK(latency_watermark_a) |
8561 LATENCY_HIGH_WATERMARK(line_time)));
8563 tmp = RREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset);
8564 tmp &= ~LATENCY_WATERMARK_MASK(3);
8565 tmp |= LATENCY_WATERMARK_MASK(2);
8566 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, tmp);
8567 WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
8568 (LATENCY_LOW_WATERMARK(latency_watermark_b) |
8569 LATENCY_HIGH_WATERMARK(line_time)));
8570 /* restore original selection */
8571 WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
8573 /* save values for DPM */
8574 radeon_crtc->line_time = line_time;
8575 radeon_crtc->wm_high = latency_watermark_a;
8576 radeon_crtc->wm_low = latency_watermark_b;
8580 * dce8_bandwidth_update - program display watermarks
8582 * @rdev: radeon_device pointer
8584 * Calculate and program the display watermarks and line
8585 * buffer allocation (CIK).
8587 void dce8_bandwidth_update(struct radeon_device *rdev)
8589 struct drm_display_mode *mode = NULL;
8590 u32 num_heads = 0, lb_size;
8593 radeon_update_display_priority(rdev);
8595 for (i = 0; i < rdev->num_crtc; i++) {
8596 if (rdev->mode_info.crtcs[i]->base.enabled)
8599 for (i = 0; i < rdev->num_crtc; i++) {
8600 mode = &rdev->mode_info.crtcs[i]->base.mode;
8601 lb_size = dce8_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode);
8602 dce8_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
8607 * cik_get_gpu_clock_counter - return GPU clock counter snapshot
8609 * @rdev: radeon_device pointer
8611 * Fetches a GPU clock counter snapshot (SI).
8612 * Returns the 64 bit clock counter snapshot.
8614 uint64_t cik_get_gpu_clock_counter(struct radeon_device *rdev)
8618 mutex_lock(&rdev->gpu_clock_mutex);
8619 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
8620 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
8621 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
8622 mutex_unlock(&rdev->gpu_clock_mutex);
8626 static int cik_set_uvd_clock(struct radeon_device *rdev, u32 clock,
8627 u32 cntl_reg, u32 status_reg)
8630 struct atom_clock_dividers dividers;
8633 r = radeon_atom_get_clock_dividers(rdev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
8634 clock, false, ÷rs);
8638 tmp = RREG32_SMC(cntl_reg);
8639 tmp &= ~(DCLK_DIR_CNTL_EN|DCLK_DIVIDER_MASK);
8640 tmp |= dividers.post_divider;
8641 WREG32_SMC(cntl_reg, tmp);
8643 for (i = 0; i < 100; i++) {
8644 if (RREG32_SMC(status_reg) & DCLK_STATUS)
8654 int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
8658 r = cik_set_uvd_clock(rdev, vclk, CG_VCLK_CNTL, CG_VCLK_STATUS);
8662 r = cik_set_uvd_clock(rdev, dclk, CG_DCLK_CNTL, CG_DCLK_STATUS);
8666 static void cik_pcie_gen3_enable(struct radeon_device *rdev)
8668 struct pci_dev *root = rdev->pdev->bus->self;
8669 int bridge_pos, gpu_pos;
8670 u32 speed_cntl, mask, current_data_rate;
8674 if (radeon_pcie_gen2 == 0)
8677 if (rdev->flags & RADEON_IS_IGP)
8680 if (!(rdev->flags & RADEON_IS_PCIE))
8683 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
8687 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
8690 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
8691 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
8692 LC_CURRENT_DATA_RATE_SHIFT;
8693 if (mask & DRM_PCIE_SPEED_80) {
8694 if (current_data_rate == 2) {
8695 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
8698 DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
8699 } else if (mask & DRM_PCIE_SPEED_50) {
8700 if (current_data_rate == 1) {
8701 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
8704 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
8707 bridge_pos = pci_pcie_cap(root);
8711 gpu_pos = pci_pcie_cap(rdev->pdev);
8715 if (mask & DRM_PCIE_SPEED_80) {
8716 /* re-try equalization if gen3 is not already enabled */
8717 if (current_data_rate != 2) {
8718 u16 bridge_cfg, gpu_cfg;
8719 u16 bridge_cfg2, gpu_cfg2;
8720 u32 max_lw, current_lw, tmp;
8722 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
8723 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
8725 tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
8726 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
8728 tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
8729 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
8731 tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
8732 max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
8733 current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
8735 if (current_lw < max_lw) {
8736 tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
8737 if (tmp & LC_RENEGOTIATION_SUPPORT) {
8738 tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
8739 tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
8740 tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
8741 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
8745 for (i = 0; i < 10; i++) {
8747 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
8748 if (tmp16 & PCI_EXP_DEVSTA_TRPND)
8751 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
8752 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
8754 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
8755 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
8757 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
8758 tmp |= LC_SET_QUIESCE;
8759 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
8761 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
8763 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
8768 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
8769 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
8770 tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
8771 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
8773 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
8774 tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
8775 tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
8776 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
8779 pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
8780 tmp16 &= ~((1 << 4) | (7 << 9));
8781 tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
8782 pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
8784 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
8785 tmp16 &= ~((1 << 4) | (7 << 9));
8786 tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
8787 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
8789 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
8790 tmp &= ~LC_SET_QUIESCE;
8791 WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
8796 /* set the link speed */
8797 speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
8798 speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
8799 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
8801 pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
8803 if (mask & DRM_PCIE_SPEED_80)
8804 tmp16 |= 3; /* gen3 */
8805 else if (mask & DRM_PCIE_SPEED_50)
8806 tmp16 |= 2; /* gen2 */
8808 tmp16 |= 1; /* gen1 */
8809 pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
8811 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
8812 speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
8813 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
8815 for (i = 0; i < rdev->usec_timeout; i++) {
8816 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
8817 if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
8823 static void cik_program_aspm(struct radeon_device *rdev)
8826 bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
8827 bool disable_clkreq = false;
8829 if (radeon_aspm == 0)
8832 /* XXX double check IGPs */
8833 if (rdev->flags & RADEON_IS_IGP)
8836 if (!(rdev->flags & RADEON_IS_PCIE))
8839 orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
8840 data &= ~LC_XMIT_N_FTS_MASK;
8841 data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
8843 WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
8845 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
8846 data |= LC_GO_TO_RECOVERY;
8848 WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
8850 orig = data = RREG32_PCIE_PORT(PCIE_P_CNTL);
8851 data |= P_IGNORE_EDB_ERR;
8853 WREG32_PCIE_PORT(PCIE_P_CNTL, data);
8855 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
8856 data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
8857 data |= LC_PMI_TO_L1_DIS;
8859 data |= LC_L0S_INACTIVITY(7);
8862 data |= LC_L1_INACTIVITY(7);
8863 data &= ~LC_PMI_TO_L1_DIS;
8865 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
8867 if (!disable_plloff_in_l1) {
8868 bool clk_req_support;
8870 orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0);
8871 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
8872 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
8874 WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0, data);
8876 orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1);
8877 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
8878 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
8880 WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1, data);
8882 orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0);
8883 data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
8884 data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
8886 WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0, data);
8888 orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1);
8889 data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
8890 data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
8892 WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data);
8894 orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
8895 data &= ~LC_DYN_LANES_PWR_STATE_MASK;
8896 data |= LC_DYN_LANES_PWR_STATE(3);
8898 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
8900 if (!disable_clkreq) {
8901 struct pci_dev *root = rdev->pdev->bus->self;
8904 clk_req_support = false;
8905 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
8906 if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
8907 clk_req_support = true;
8909 clk_req_support = false;
8912 if (clk_req_support) {
8913 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
8914 data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
8916 WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
8918 orig = data = RREG32_SMC(THM_CLK_CNTL);
8919 data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
8920 data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
8922 WREG32_SMC(THM_CLK_CNTL, data);
8924 orig = data = RREG32_SMC(MISC_CLK_CTRL);
8925 data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
8926 data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
8928 WREG32_SMC(MISC_CLK_CTRL, data);
8930 orig = data = RREG32_SMC(CG_CLKPIN_CNTL);
8931 data &= ~BCLK_AS_XCLK;
8933 WREG32_SMC(CG_CLKPIN_CNTL, data);
8935 orig = data = RREG32_SMC(CG_CLKPIN_CNTL_2);
8936 data &= ~FORCE_BIF_REFCLK_EN;
8938 WREG32_SMC(CG_CLKPIN_CNTL_2, data);
8940 orig = data = RREG32_SMC(MPLL_BYPASSCLK_SEL);
8941 data &= ~MPLL_CLKOUT_SEL_MASK;
8942 data |= MPLL_CLKOUT_SEL(4);
8944 WREG32_SMC(MPLL_BYPASSCLK_SEL, data);
8949 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
8952 orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
8953 data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
8955 WREG32_PCIE_PORT(PCIE_CNTL2, data);
8958 data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
8959 if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
8960 data = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
8961 if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
8962 orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
8963 data &= ~LC_L0S_INACTIVITY_MASK;
8965 WREG32_PCIE_PORT(PCIE_LC_CNTL, data);