278416acf0608ac731d7d898fe3c85da0f9e650c
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / amdgpu / gfx_v8_0.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/delay.h>
25 #include <linux/kernel.h>
26 #include <linux/firmware.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_gfx.h"
32 #include "amdgpu_ring.h"
33 #include "vi.h"
34 #include "vi_structs.h"
35 #include "vid.h"
36 #include "amdgpu_ucode.h"
37 #include "amdgpu_atombios.h"
38 #include "atombios_i2c.h"
39 #include "clearstate_vi.h"
40
41 #include "gmc/gmc_8_2_d.h"
42 #include "gmc/gmc_8_2_sh_mask.h"
43
44 #include "oss/oss_3_0_d.h"
45 #include "oss/oss_3_0_sh_mask.h"
46
47 #include "bif/bif_5_0_d.h"
48 #include "bif/bif_5_0_sh_mask.h"
49 #include "gca/gfx_8_0_d.h"
50 #include "gca/gfx_8_0_enum.h"
51 #include "gca/gfx_8_0_sh_mask.h"
52
53 #include "dce/dce_10_0_d.h"
54 #include "dce/dce_10_0_sh_mask.h"
55
56 #include "smu/smu_7_1_3_d.h"
57
58 #include "ivsrcid/ivsrcid_vislands30.h"
59
60 #define GFX8_NUM_GFX_RINGS     1
61 #define GFX8_MEC_HPD_SIZE 4096
62
63 #define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
64 #define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
65 #define POLARIS11_GB_ADDR_CONFIG_GOLDEN 0x22011002
66 #define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003
67
68 #define ARRAY_MODE(x)                                   ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
69 #define PIPE_CONFIG(x)                                  ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
70 #define TILE_SPLIT(x)                                   ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
71 #define MICRO_TILE_MODE_NEW(x)                          ((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT)
72 #define SAMPLE_SPLIT(x)                                 ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
73 #define BANK_WIDTH(x)                                   ((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT)
74 #define BANK_HEIGHT(x)                                  ((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT)
75 #define MACRO_TILE_ASPECT(x)                            ((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT)
76 #define NUM_BANKS(x)                                    ((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT)
77
78 #define RLC_CGTT_MGCG_OVERRIDE__CPF_MASK            0x00000001L
79 #define RLC_CGTT_MGCG_OVERRIDE__RLC_MASK            0x00000002L
80 #define RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK           0x00000004L
81 #define RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK           0x00000008L
82 #define RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK           0x00000010L
83 #define RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK           0x00000020L
84
85 /* BPM SERDES CMD */
86 #define SET_BPM_SERDES_CMD    1
87 #define CLE_BPM_SERDES_CMD    0
88
89 /* BPM Register Address*/
90 enum {
91         BPM_REG_CGLS_EN = 0,        /* Enable/Disable CGLS */
92         BPM_REG_CGLS_ON,            /* ON/OFF CGLS: shall be controlled by RLC FW */
93         BPM_REG_CGCG_OVERRIDE,      /* Set/Clear CGCG Override */
94         BPM_REG_MGCG_OVERRIDE,      /* Set/Clear MGCG Override */
95         BPM_REG_FGCG_OVERRIDE,      /* Set/Clear FGCG Override */
96         BPM_REG_FGCG_MAX
97 };
98
99 #define RLC_FormatDirectRegListLength        14
100
101 MODULE_FIRMWARE("amdgpu/carrizo_ce.bin");
102 MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin");
103 MODULE_FIRMWARE("amdgpu/carrizo_me.bin");
104 MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
105 MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
106 MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
107
108 MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
109 MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
110 MODULE_FIRMWARE("amdgpu/stoney_me.bin");
111 MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
112 MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
113
114 MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
115 MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
116 MODULE_FIRMWARE("amdgpu/tonga_me.bin");
117 MODULE_FIRMWARE("amdgpu/tonga_mec.bin");
118 MODULE_FIRMWARE("amdgpu/tonga_mec2.bin");
119 MODULE_FIRMWARE("amdgpu/tonga_rlc.bin");
120
121 MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
122 MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
123 MODULE_FIRMWARE("amdgpu/topaz_me.bin");
124 MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
125 MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
126
127 MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
128 MODULE_FIRMWARE("amdgpu/fiji_pfp.bin");
129 MODULE_FIRMWARE("amdgpu/fiji_me.bin");
130 MODULE_FIRMWARE("amdgpu/fiji_mec.bin");
131 MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
132 MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
133
134 MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
135 MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
136 MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
137 MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
138 MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
139 MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
140 MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
141 MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
142 MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
143 MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
144 MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
145
146 MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
147 MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
148 MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
149 MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
150 MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
151 MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
152 MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
153 MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
154 MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
155 MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
156 MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
157
158 MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
159 MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
160 MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
161 MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
162 MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
163 MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
164 MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
165 MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
166 MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
167 MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
168 MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
169
170 MODULE_FIRMWARE("amdgpu/vegam_ce.bin");
171 MODULE_FIRMWARE("amdgpu/vegam_pfp.bin");
172 MODULE_FIRMWARE("amdgpu/vegam_me.bin");
173 MODULE_FIRMWARE("amdgpu/vegam_mec.bin");
174 MODULE_FIRMWARE("amdgpu/vegam_mec2.bin");
175 MODULE_FIRMWARE("amdgpu/vegam_rlc.bin");
176
177 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
178 {
179         {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
180         {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1},
181         {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2},
182         {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3},
183         {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4},
184         {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5},
185         {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6},
186         {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7},
187         {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8},
188         {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9},
189         {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10},
190         {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11},
191         {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12},
192         {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13},
193         {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14},
194         {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15}
195 };
196
197 static const u32 golden_settings_tonga_a11[] =
198 {
199         mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
200         mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
201         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
202         mmGB_GPU_ID, 0x0000000f, 0x00000000,
203         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
204         mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
205         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
206         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
207         mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
208         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
209         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
210         mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
211         mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
212         mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
213         mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
214         mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
215 };
216
217 static const u32 tonga_golden_common_all[] =
218 {
219         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
220         mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
221         mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
222         mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
223         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
224         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
225         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
226         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
227 };
228
229 static const u32 tonga_mgcg_cgcg_init[] =
230 {
231         mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
232         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
233         mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
234         mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
235         mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
236         mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
237         mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
238         mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
239         mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
240         mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
241         mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
242         mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
243         mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
244         mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
245         mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
246         mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
247         mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
248         mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
249         mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
250         mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
251         mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
252         mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
253         mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
254         mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
255         mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
256         mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
257         mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
258         mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
259         mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
260         mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
261         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
262         mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
263         mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
264         mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
265         mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
266         mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
267         mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
268         mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
269         mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
270         mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
271         mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
272         mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
273         mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
274         mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
275         mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
276         mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
277         mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
278         mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
279         mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
280         mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
281         mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
282         mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
283         mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
284         mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
285         mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
286         mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
287         mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
288         mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
289         mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
290         mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
291         mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
292         mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
293         mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
294         mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
295         mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
296         mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
297         mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
298         mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
299         mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
300         mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
301         mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
302         mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
303         mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
304         mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
305         mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
306 };
307
308 static const u32 golden_settings_vegam_a11[] =
309 {
310         mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
311         mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000,
312         mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
313         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
314         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
315         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
316         mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a,
317         mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e,
318         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
319         mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
320         mmSQ_CONFIG, 0x07f80000, 0x01180000,
321         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
322         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
323         mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
324         mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
325         mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054,
326         mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
327 };
328
329 static const u32 vegam_golden_common_all[] =
330 {
331         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
332         mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
333         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
334         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
335         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
336         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
337 };
338
339 static const u32 golden_settings_polaris11_a11[] =
340 {
341         mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208,
342         mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
343         mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
344         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
345         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
346         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
347         mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
348         mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
349         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
350         mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
351         mmSQ_CONFIG, 0x07f80000, 0x01180000,
352         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
353         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
354         mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
355         mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
356         mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
357         mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
358 };
359
360 static const u32 polaris11_golden_common_all[] =
361 {
362         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
363         mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
364         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
365         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
366         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
367         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
368 };
369
370 static const u32 golden_settings_polaris10_a11[] =
371 {
372         mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
373         mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208,
374         mmCB_HW_CONTROL_2, 0x0f000000, 0x0f000000,
375         mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
376         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
377         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
378         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
379         mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
380         mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002a,
381         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
382         mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
383         mmSQ_CONFIG, 0x07f80000, 0x07180000,
384         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
385         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
386         mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
387         mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
388         mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
389 };
390
391 static const u32 polaris10_golden_common_all[] =
392 {
393         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
394         mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
395         mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A,
396         mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
397         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
398         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
399         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
400         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
401 };
402
403 static const u32 fiji_golden_common_all[] =
404 {
405         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
406         mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
407         mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
408         mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
409         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
410         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
411         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
412         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
413         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
414         mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
415 };
416
417 static const u32 golden_settings_fiji_a10[] =
418 {
419         mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
420         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
421         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
422         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
423         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
424         mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
425         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
426         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
427         mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
428         mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
429         mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
430 };
431
432 static const u32 fiji_mgcg_cgcg_init[] =
433 {
434         mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
435         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
436         mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
437         mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
438         mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
439         mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
440         mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
441         mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
442         mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
443         mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
444         mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
445         mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
446         mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
447         mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
448         mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
449         mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
450         mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
451         mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
452         mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
453         mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
454         mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
455         mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
456         mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
457         mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
458         mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
459         mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
460         mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
461         mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
462         mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
463         mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
464         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
465         mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
466         mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
467         mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
468         mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
469 };
470
471 static const u32 golden_settings_iceland_a11[] =
472 {
473         mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
474         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
475         mmDB_DEBUG3, 0xc0000000, 0xc0000000,
476         mmGB_GPU_ID, 0x0000000f, 0x00000000,
477         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
478         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
479         mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002,
480         mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
481         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
482         mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
483         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
484         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
485         mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
486         mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1,
487         mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
488         mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010,
489 };
490
491 static const u32 iceland_golden_common_all[] =
492 {
493         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
494         mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
495         mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
496         mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
497         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
498         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
499         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
500         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
501 };
502
503 static const u32 iceland_mgcg_cgcg_init[] =
504 {
505         mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
506         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
507         mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
508         mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
509         mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100,
510         mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100,
511         mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100,
512         mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
513         mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
514         mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
515         mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
516         mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
517         mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
518         mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
519         mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
520         mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
521         mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
522         mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
523         mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
524         mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
525         mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
526         mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
527         mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100,
528         mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
529         mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
530         mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
531         mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
532         mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
533         mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
534         mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
535         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
536         mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
537         mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
538         mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
539         mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
540         mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
541         mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
542         mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
543         mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
544         mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
545         mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
546         mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
547         mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
548         mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
549         mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
550         mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
551         mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
552         mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
553         mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
554         mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
555         mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
556         mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
557         mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
558         mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87,
559         mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
560         mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
561         mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
562         mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
563         mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
564         mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
565         mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
566         mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
567         mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
568         mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
569 };
570
571 static const u32 cz_golden_settings_a11[] =
572 {
573         mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
574         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
575         mmGB_GPU_ID, 0x0000000f, 0x00000000,
576         mmPA_SC_ENHANCE, 0xffffffff, 0x00000001,
577         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
578         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
579         mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
580         mmTA_CNTL_AUX, 0x000f000f, 0x00010000,
581         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
582         mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
583         mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3,
584         mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302
585 };
586
587 static const u32 cz_golden_common_all[] =
588 {
589         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
590         mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002,
591         mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
592         mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001,
593         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
594         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
595         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
596         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF
597 };
598
599 static const u32 cz_mgcg_cgcg_init[] =
600 {
601         mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
602         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
603         mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
604         mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
605         mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
606         mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
607         mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100,
608         mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
609         mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
610         mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
611         mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
612         mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
613         mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
614         mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
615         mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
616         mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
617         mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
618         mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
619         mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
620         mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
621         mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
622         mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
623         mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
624         mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
625         mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
626         mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
627         mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
628         mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
629         mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
630         mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
631         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
632         mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000,
633         mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
634         mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
635         mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005,
636         mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
637         mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000,
638         mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
639         mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007,
640         mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005,
641         mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
642         mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000,
643         mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
644         mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007,
645         mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005,
646         mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
647         mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000,
648         mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
649         mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007,
650         mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005,
651         mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
652         mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000,
653         mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
654         mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
655         mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005,
656         mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
657         mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000,
658         mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
659         mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007,
660         mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005,
661         mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
662         mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000,
663         mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
664         mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007,
665         mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005,
666         mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
667         mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000,
668         mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
669         mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007,
670         mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005,
671         mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
672         mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
673         mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
674         mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
675         mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
676 };
677
678 static const u32 stoney_golden_settings_a11[] =
679 {
680         mmDB_DEBUG2, 0xf00fffff, 0x00000400,
681         mmGB_GPU_ID, 0x0000000f, 0x00000000,
682         mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
683         mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
684         mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
685         mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
686         mmTCC_CTRL, 0x00100000, 0xf31fff7f,
687         mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
688         mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
689         mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
690 };
691
692 static const u32 stoney_golden_common_all[] =
693 {
694         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
695         mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
696         mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
697         mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
698         mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
699         mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
700         mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF,
701         mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF,
702 };
703
704 static const u32 stoney_mgcg_cgcg_init[] =
705 {
706         mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
707         mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
708         mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
709         mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
710         mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
711 };
712
713
714 static const char * const sq_edc_source_names[] = {
715         "SQ_EDC_INFO_SOURCE_INVALID: No EDC error has occurred",
716         "SQ_EDC_INFO_SOURCE_INST: EDC source is Instruction Fetch",
717         "SQ_EDC_INFO_SOURCE_SGPR: EDC source is SGPR or SQC data return",
718         "SQ_EDC_INFO_SOURCE_VGPR: EDC source is VGPR",
719         "SQ_EDC_INFO_SOURCE_LDS: EDC source is LDS",
720         "SQ_EDC_INFO_SOURCE_GDS: EDC source is GDS",
721         "SQ_EDC_INFO_SOURCE_TA: EDC source is TA",
722 };
723
724 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
725 static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
726 static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
727 static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev);
728 static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
729 static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
730 static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring);
731 static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring);
732
733 #define CG_ACLK_CNTL__ACLK_DIVIDER_MASK                    0x0000007fL
734 #define CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT                  0x00000000L
735
736 static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
737 {
738         uint32_t data;
739
740         switch (adev->asic_type) {
741         case CHIP_TOPAZ:
742                 amdgpu_device_program_register_sequence(adev,
743                                                         iceland_mgcg_cgcg_init,
744                                                         ARRAY_SIZE(iceland_mgcg_cgcg_init));
745                 amdgpu_device_program_register_sequence(adev,
746                                                         golden_settings_iceland_a11,
747                                                         ARRAY_SIZE(golden_settings_iceland_a11));
748                 amdgpu_device_program_register_sequence(adev,
749                                                         iceland_golden_common_all,
750                                                         ARRAY_SIZE(iceland_golden_common_all));
751                 break;
752         case CHIP_FIJI:
753                 amdgpu_device_program_register_sequence(adev,
754                                                         fiji_mgcg_cgcg_init,
755                                                         ARRAY_SIZE(fiji_mgcg_cgcg_init));
756                 amdgpu_device_program_register_sequence(adev,
757                                                         golden_settings_fiji_a10,
758                                                         ARRAY_SIZE(golden_settings_fiji_a10));
759                 amdgpu_device_program_register_sequence(adev,
760                                                         fiji_golden_common_all,
761                                                         ARRAY_SIZE(fiji_golden_common_all));
762                 break;
763
764         case CHIP_TONGA:
765                 amdgpu_device_program_register_sequence(adev,
766                                                         tonga_mgcg_cgcg_init,
767                                                         ARRAY_SIZE(tonga_mgcg_cgcg_init));
768                 amdgpu_device_program_register_sequence(adev,
769                                                         golden_settings_tonga_a11,
770                                                         ARRAY_SIZE(golden_settings_tonga_a11));
771                 amdgpu_device_program_register_sequence(adev,
772                                                         tonga_golden_common_all,
773                                                         ARRAY_SIZE(tonga_golden_common_all));
774                 break;
775         case CHIP_VEGAM:
776                 amdgpu_device_program_register_sequence(adev,
777                                                         golden_settings_vegam_a11,
778                                                         ARRAY_SIZE(golden_settings_vegam_a11));
779                 amdgpu_device_program_register_sequence(adev,
780                                                         vegam_golden_common_all,
781                                                         ARRAY_SIZE(vegam_golden_common_all));
782                 break;
783         case CHIP_POLARIS11:
784         case CHIP_POLARIS12:
785                 amdgpu_device_program_register_sequence(adev,
786                                                         golden_settings_polaris11_a11,
787                                                         ARRAY_SIZE(golden_settings_polaris11_a11));
788                 amdgpu_device_program_register_sequence(adev,
789                                                         polaris11_golden_common_all,
790                                                         ARRAY_SIZE(polaris11_golden_common_all));
791                 break;
792         case CHIP_POLARIS10:
793                 amdgpu_device_program_register_sequence(adev,
794                                                         golden_settings_polaris10_a11,
795                                                         ARRAY_SIZE(golden_settings_polaris10_a11));
796                 amdgpu_device_program_register_sequence(adev,
797                                                         polaris10_golden_common_all,
798                                                         ARRAY_SIZE(polaris10_golden_common_all));
799                 data = RREG32_SMC(ixCG_ACLK_CNTL);
800                 data &= ~CG_ACLK_CNTL__ACLK_DIVIDER_MASK;
801                 data |= 0x18 << CG_ACLK_CNTL__ACLK_DIVIDER__SHIFT;
802                 WREG32_SMC(ixCG_ACLK_CNTL, data);
803                 if ((adev->pdev->device == 0x67DF) && (adev->pdev->revision == 0xc7) &&
804                     ((adev->pdev->subsystem_device == 0xb37 && adev->pdev->subsystem_vendor == 0x1002) ||
805                      (adev->pdev->subsystem_device == 0x4a8 && adev->pdev->subsystem_vendor == 0x1043) ||
806                      (adev->pdev->subsystem_device == 0x9480 && adev->pdev->subsystem_vendor == 0x1680))) {
807                         amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
808                         amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
809                 }
810                 break;
811         case CHIP_CARRIZO:
812                 amdgpu_device_program_register_sequence(adev,
813                                                         cz_mgcg_cgcg_init,
814                                                         ARRAY_SIZE(cz_mgcg_cgcg_init));
815                 amdgpu_device_program_register_sequence(adev,
816                                                         cz_golden_settings_a11,
817                                                         ARRAY_SIZE(cz_golden_settings_a11));
818                 amdgpu_device_program_register_sequence(adev,
819                                                         cz_golden_common_all,
820                                                         ARRAY_SIZE(cz_golden_common_all));
821                 break;
822         case CHIP_STONEY:
823                 amdgpu_device_program_register_sequence(adev,
824                                                         stoney_mgcg_cgcg_init,
825                                                         ARRAY_SIZE(stoney_mgcg_cgcg_init));
826                 amdgpu_device_program_register_sequence(adev,
827                                                         stoney_golden_settings_a11,
828                                                         ARRAY_SIZE(stoney_golden_settings_a11));
829                 amdgpu_device_program_register_sequence(adev,
830                                                         stoney_golden_common_all,
831                                                         ARRAY_SIZE(stoney_golden_common_all));
832                 break;
833         default:
834                 break;
835         }
836 }
837
838 static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
839 {
840         struct amdgpu_device *adev = ring->adev;
841         uint32_t tmp = 0;
842         unsigned i;
843         int r;
844
845         WREG32(mmSCRATCH_REG0, 0xCAFEDEAD);
846         r = amdgpu_ring_alloc(ring, 3);
847         if (r)
848                 return r;
849
850         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
851         amdgpu_ring_write(ring, mmSCRATCH_REG0 - PACKET3_SET_UCONFIG_REG_START);
852         amdgpu_ring_write(ring, 0xDEADBEEF);
853         amdgpu_ring_commit(ring);
854
855         for (i = 0; i < adev->usec_timeout; i++) {
856                 tmp = RREG32(mmSCRATCH_REG0);
857                 if (tmp == 0xDEADBEEF)
858                         break;
859                 udelay(1);
860         }
861
862         if (i >= adev->usec_timeout)
863                 r = -ETIMEDOUT;
864
865         return r;
866 }
867
868 static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
869 {
870         struct amdgpu_device *adev = ring->adev;
871         struct amdgpu_ib ib;
872         struct dma_fence *f = NULL;
873
874         unsigned int index;
875         uint64_t gpu_addr;
876         uint32_t tmp;
877         long r;
878
879         r = amdgpu_device_wb_get(adev, &index);
880         if (r)
881                 return r;
882
883         gpu_addr = adev->wb.gpu_addr + (index * 4);
884         adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
885         memset(&ib, 0, sizeof(ib));
886         r = amdgpu_ib_get(adev, NULL, 16,
887                                         AMDGPU_IB_POOL_DIRECT, &ib);
888         if (r)
889                 goto err1;
890
891         ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
892         ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
893         ib.ptr[2] = lower_32_bits(gpu_addr);
894         ib.ptr[3] = upper_32_bits(gpu_addr);
895         ib.ptr[4] = 0xDEADBEEF;
896         ib.length_dw = 5;
897
898         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
899         if (r)
900                 goto err2;
901
902         r = dma_fence_wait_timeout(f, false, timeout);
903         if (r == 0) {
904                 r = -ETIMEDOUT;
905                 goto err2;
906         } else if (r < 0) {
907                 goto err2;
908         }
909
910         tmp = adev->wb.wb[index];
911         if (tmp == 0xDEADBEEF)
912                 r = 0;
913         else
914                 r = -EINVAL;
915
916 err2:
917         amdgpu_ib_free(adev, &ib, NULL);
918         dma_fence_put(f);
919 err1:
920         amdgpu_device_wb_free(adev, index);
921         return r;
922 }
923
924
925 static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
926 {
927         amdgpu_ucode_release(&adev->gfx.pfp_fw);
928         amdgpu_ucode_release(&adev->gfx.me_fw);
929         amdgpu_ucode_release(&adev->gfx.ce_fw);
930         amdgpu_ucode_release(&adev->gfx.rlc_fw);
931         amdgpu_ucode_release(&adev->gfx.mec_fw);
932         if ((adev->asic_type != CHIP_STONEY) &&
933             (adev->asic_type != CHIP_TOPAZ))
934                 amdgpu_ucode_release(&adev->gfx.mec2_fw);
935
936         kfree(adev->gfx.rlc.register_list_format);
937 }
938
939 static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
940 {
941         const char *chip_name;
942         char fw_name[30];
943         int err;
944         struct amdgpu_firmware_info *info = NULL;
945         const struct common_firmware_header *header = NULL;
946         const struct gfx_firmware_header_v1_0 *cp_hdr;
947         const struct rlc_firmware_header_v2_0 *rlc_hdr;
948         unsigned int *tmp = NULL, i;
949
950         DRM_DEBUG("\n");
951
952         switch (adev->asic_type) {
953         case CHIP_TOPAZ:
954                 chip_name = "topaz";
955                 break;
956         case CHIP_TONGA:
957                 chip_name = "tonga";
958                 break;
959         case CHIP_CARRIZO:
960                 chip_name = "carrizo";
961                 break;
962         case CHIP_FIJI:
963                 chip_name = "fiji";
964                 break;
965         case CHIP_STONEY:
966                 chip_name = "stoney";
967                 break;
968         case CHIP_POLARIS10:
969                 chip_name = "polaris10";
970                 break;
971         case CHIP_POLARIS11:
972                 chip_name = "polaris11";
973                 break;
974         case CHIP_POLARIS12:
975                 chip_name = "polaris12";
976                 break;
977         case CHIP_VEGAM:
978                 chip_name = "vegam";
979                 break;
980         default:
981                 BUG();
982         }
983
984         if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
985                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
986                 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
987                 if (err == -ENODEV) {
988                         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
989                         err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
990                 }
991         } else {
992                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
993                 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
994         }
995         if (err)
996                 goto out;
997         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
998         adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
999         adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1000
1001         if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1002                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
1003                 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
1004                 if (err == -ENODEV) {
1005                         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1006                         err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
1007                 }
1008         } else {
1009                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
1010                 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
1011         }
1012         if (err)
1013                 goto out;
1014         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1015         adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1016
1017         adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1018
1019         if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1020                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
1021                 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
1022                 if (err == -ENODEV) {
1023                         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1024                         err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
1025                 }
1026         } else {
1027                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
1028                 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
1029         }
1030         if (err)
1031                 goto out;
1032         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1033         adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1034         adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1035
1036         /*
1037          * Support for MCBP/Virtualization in combination with chained IBs is
1038          * formal released on feature version #46
1039          */
1040         if (adev->gfx.ce_feature_version >= 46 &&
1041             adev->gfx.pfp_feature_version >= 46) {
1042                 adev->virt.chained_ib_support = true;
1043                 DRM_INFO("Chained IB support enabled!\n");
1044         } else
1045                 adev->virt.chained_ib_support = false;
1046
1047         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
1048         err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
1049         if (err)
1050                 goto out;
1051         rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1052         adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1053         adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1054
1055         adev->gfx.rlc.save_and_restore_offset =
1056                         le32_to_cpu(rlc_hdr->save_and_restore_offset);
1057         adev->gfx.rlc.clear_state_descriptor_offset =
1058                         le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
1059         adev->gfx.rlc.avail_scratch_ram_locations =
1060                         le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
1061         adev->gfx.rlc.reg_restore_list_size =
1062                         le32_to_cpu(rlc_hdr->reg_restore_list_size);
1063         adev->gfx.rlc.reg_list_format_start =
1064                         le32_to_cpu(rlc_hdr->reg_list_format_start);
1065         adev->gfx.rlc.reg_list_format_separate_start =
1066                         le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
1067         adev->gfx.rlc.starting_offsets_start =
1068                         le32_to_cpu(rlc_hdr->starting_offsets_start);
1069         adev->gfx.rlc.reg_list_format_size_bytes =
1070                         le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
1071         adev->gfx.rlc.reg_list_size_bytes =
1072                         le32_to_cpu(rlc_hdr->reg_list_size_bytes);
1073
1074         adev->gfx.rlc.register_list_format =
1075                         kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1076                                         adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1077
1078         if (!adev->gfx.rlc.register_list_format) {
1079                 err = -ENOMEM;
1080                 goto out;
1081         }
1082
1083         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1084                         le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
1085         for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1086                 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1087
1088         adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1089
1090         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
1091                         le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
1092         for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1093                 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1094
1095         if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1096                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
1097                 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
1098                 if (err == -ENODEV) {
1099                         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1100                         err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
1101                 }
1102         } else {
1103                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
1104                 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
1105         }
1106         if (err)
1107                 goto out;
1108         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1109         adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1110         adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1111
1112         if ((adev->asic_type != CHIP_STONEY) &&
1113             (adev->asic_type != CHIP_TOPAZ)) {
1114                 if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
1115                         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
1116                         err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
1117                         if (err == -ENODEV) {
1118                                 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1119                                 err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
1120                         }
1121                 } else {
1122                         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
1123                         err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
1124                 }
1125                 if (!err) {
1126                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1127                                 adev->gfx.mec2_fw->data;
1128                         adev->gfx.mec2_fw_version =
1129                                 le32_to_cpu(cp_hdr->header.ucode_version);
1130                         adev->gfx.mec2_feature_version =
1131                                 le32_to_cpu(cp_hdr->ucode_feature_version);
1132                 } else {
1133                         err = 0;
1134                         adev->gfx.mec2_fw = NULL;
1135                 }
1136         }
1137
1138         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
1139         info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
1140         info->fw = adev->gfx.pfp_fw;
1141         header = (const struct common_firmware_header *)info->fw->data;
1142         adev->firmware.fw_size +=
1143                 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1144
1145         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
1146         info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
1147         info->fw = adev->gfx.me_fw;
1148         header = (const struct common_firmware_header *)info->fw->data;
1149         adev->firmware.fw_size +=
1150                 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1151
1152         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
1153         info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
1154         info->fw = adev->gfx.ce_fw;
1155         header = (const struct common_firmware_header *)info->fw->data;
1156         adev->firmware.fw_size +=
1157                 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1158
1159         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
1160         info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
1161         info->fw = adev->gfx.rlc_fw;
1162         header = (const struct common_firmware_header *)info->fw->data;
1163         adev->firmware.fw_size +=
1164                 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1165
1166         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
1167         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
1168         info->fw = adev->gfx.mec_fw;
1169         header = (const struct common_firmware_header *)info->fw->data;
1170         adev->firmware.fw_size +=
1171                 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1172
1173         /* we need account JT in */
1174         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1175         adev->firmware.fw_size +=
1176                 ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
1177
1178         if (amdgpu_sriov_vf(adev)) {
1179                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
1180                 info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
1181                 info->fw = adev->gfx.mec_fw;
1182                 adev->firmware.fw_size +=
1183                         ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
1184         }
1185
1186         if (adev->gfx.mec2_fw) {
1187                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
1188                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
1189                 info->fw = adev->gfx.mec2_fw;
1190                 header = (const struct common_firmware_header *)info->fw->data;
1191                 adev->firmware.fw_size +=
1192                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
1193         }
1194
1195 out:
1196         if (err) {
1197                 dev_err(adev->dev,
1198                         "gfx8: Failed to load firmware \"%s\"\n",
1199                         fw_name);
1200                 amdgpu_ucode_release(&adev->gfx.pfp_fw);
1201                 amdgpu_ucode_release(&adev->gfx.me_fw);
1202                 amdgpu_ucode_release(&adev->gfx.ce_fw);
1203                 amdgpu_ucode_release(&adev->gfx.rlc_fw);
1204                 amdgpu_ucode_release(&adev->gfx.mec_fw);
1205                 amdgpu_ucode_release(&adev->gfx.mec2_fw);
1206         }
1207         return err;
1208 }
1209
1210 static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
1211                                     volatile u32 *buffer)
1212 {
1213         u32 count = 0, i;
1214         const struct cs_section_def *sect = NULL;
1215         const struct cs_extent_def *ext = NULL;
1216
1217         if (adev->gfx.rlc.cs_data == NULL)
1218                 return;
1219         if (buffer == NULL)
1220                 return;
1221
1222         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1223         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1224
1225         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
1226         buffer[count++] = cpu_to_le32(0x80000000);
1227         buffer[count++] = cpu_to_le32(0x80000000);
1228
1229         for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1230                 for (ext = sect->section; ext->extent != NULL; ++ext) {
1231                         if (sect->id == SECT_CONTEXT) {
1232                                 buffer[count++] =
1233                                         cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
1234                                 buffer[count++] = cpu_to_le32(ext->reg_index -
1235                                                 PACKET3_SET_CONTEXT_REG_START);
1236                                 for (i = 0; i < ext->reg_count; i++)
1237                                         buffer[count++] = cpu_to_le32(ext->extent[i]);
1238                         } else {
1239                                 return;
1240                         }
1241                 }
1242         }
1243
1244         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2));
1245         buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG -
1246                         PACKET3_SET_CONTEXT_REG_START);
1247         buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
1248         buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
1249
1250         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1251         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
1252
1253         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
1254         buffer[count++] = cpu_to_le32(0);
1255 }
1256
1257 static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
1258 {
1259         if (adev->asic_type == CHIP_CARRIZO)
1260                 return 5;
1261         else
1262                 return 4;
1263 }
1264
1265 static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
1266 {
1267         const struct cs_section_def *cs_data;
1268         int r;
1269
1270         adev->gfx.rlc.cs_data = vi_cs_data;
1271
1272         cs_data = adev->gfx.rlc.cs_data;
1273
1274         if (cs_data) {
1275                 /* init clear state block */
1276                 r = amdgpu_gfx_rlc_init_csb(adev);
1277                 if (r)
1278                         return r;
1279         }
1280
1281         if ((adev->asic_type == CHIP_CARRIZO) ||
1282             (adev->asic_type == CHIP_STONEY)) {
1283                 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1284                 r = amdgpu_gfx_rlc_init_cpt(adev);
1285                 if (r)
1286                         return r;
1287         }
1288
1289         /* init spm vmid with 0xf */
1290         if (adev->gfx.rlc.funcs->update_spm_vmid)
1291                 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1292
1293         return 0;
1294 }
1295
1296 static void gfx_v8_0_mec_fini(struct amdgpu_device *adev)
1297 {
1298         amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1299 }
1300
1301 static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
1302 {
1303         int r;
1304         u32 *hpd;
1305         size_t mec_hpd_size;
1306
1307         bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1308
1309         /* take ownership of the relevant compute queues */
1310         amdgpu_gfx_compute_queue_acquire(adev);
1311
1312         mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
1313         if (mec_hpd_size) {
1314                 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1315                                               AMDGPU_GEM_DOMAIN_VRAM |
1316                                               AMDGPU_GEM_DOMAIN_GTT,
1317                                               &adev->gfx.mec.hpd_eop_obj,
1318                                               &adev->gfx.mec.hpd_eop_gpu_addr,
1319                                               (void **)&hpd);
1320                 if (r) {
1321                         dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1322                         return r;
1323                 }
1324
1325                 memset(hpd, 0, mec_hpd_size);
1326
1327                 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1328                 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1329         }
1330
1331         return 0;
1332 }
1333
1334 static const u32 vgpr_init_compute_shader[] =
1335 {
1336         0x7e000209, 0x7e020208,
1337         0x7e040207, 0x7e060206,
1338         0x7e080205, 0x7e0a0204,
1339         0x7e0c0203, 0x7e0e0202,
1340         0x7e100201, 0x7e120200,
1341         0x7e140209, 0x7e160208,
1342         0x7e180207, 0x7e1a0206,
1343         0x7e1c0205, 0x7e1e0204,
1344         0x7e200203, 0x7e220202,
1345         0x7e240201, 0x7e260200,
1346         0x7e280209, 0x7e2a0208,
1347         0x7e2c0207, 0x7e2e0206,
1348         0x7e300205, 0x7e320204,
1349         0x7e340203, 0x7e360202,
1350         0x7e380201, 0x7e3a0200,
1351         0x7e3c0209, 0x7e3e0208,
1352         0x7e400207, 0x7e420206,
1353         0x7e440205, 0x7e460204,
1354         0x7e480203, 0x7e4a0202,
1355         0x7e4c0201, 0x7e4e0200,
1356         0x7e500209, 0x7e520208,
1357         0x7e540207, 0x7e560206,
1358         0x7e580205, 0x7e5a0204,
1359         0x7e5c0203, 0x7e5e0202,
1360         0x7e600201, 0x7e620200,
1361         0x7e640209, 0x7e660208,
1362         0x7e680207, 0x7e6a0206,
1363         0x7e6c0205, 0x7e6e0204,
1364         0x7e700203, 0x7e720202,
1365         0x7e740201, 0x7e760200,
1366         0x7e780209, 0x7e7a0208,
1367         0x7e7c0207, 0x7e7e0206,
1368         0xbf8a0000, 0xbf810000,
1369 };
1370
1371 static const u32 sgpr_init_compute_shader[] =
1372 {
1373         0xbe8a0100, 0xbe8c0102,
1374         0xbe8e0104, 0xbe900106,
1375         0xbe920108, 0xbe940100,
1376         0xbe960102, 0xbe980104,
1377         0xbe9a0106, 0xbe9c0108,
1378         0xbe9e0100, 0xbea00102,
1379         0xbea20104, 0xbea40106,
1380         0xbea60108, 0xbea80100,
1381         0xbeaa0102, 0xbeac0104,
1382         0xbeae0106, 0xbeb00108,
1383         0xbeb20100, 0xbeb40102,
1384         0xbeb60104, 0xbeb80106,
1385         0xbeba0108, 0xbebc0100,
1386         0xbebe0102, 0xbec00104,
1387         0xbec20106, 0xbec40108,
1388         0xbec60100, 0xbec80102,
1389         0xbee60004, 0xbee70005,
1390         0xbeea0006, 0xbeeb0007,
1391         0xbee80008, 0xbee90009,
1392         0xbefc0000, 0xbf8a0000,
1393         0xbf810000, 0x00000000,
1394 };
1395
1396 static const u32 vgpr_init_regs[] =
1397 {
1398         mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1399         mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1400         mmCOMPUTE_NUM_THREAD_X, 256*4,
1401         mmCOMPUTE_NUM_THREAD_Y, 1,
1402         mmCOMPUTE_NUM_THREAD_Z, 1,
1403         mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
1404         mmCOMPUTE_PGM_RSRC2, 20,
1405         mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1406         mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1407         mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1408         mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1409         mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1410         mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1411         mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1412         mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1413         mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1414         mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1415 };
1416
1417 static const u32 sgpr1_init_regs[] =
1418 {
1419         mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1420         mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1421         mmCOMPUTE_NUM_THREAD_X, 256*5,
1422         mmCOMPUTE_NUM_THREAD_Y, 1,
1423         mmCOMPUTE_NUM_THREAD_Z, 1,
1424         mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1425         mmCOMPUTE_PGM_RSRC2, 20,
1426         mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1427         mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1428         mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1429         mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1430         mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1431         mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1432         mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1433         mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1434         mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1435         mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1436 };
1437
1438 static const u32 sgpr2_init_regs[] =
1439 {
1440         mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xf0,
1441         mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1442         mmCOMPUTE_NUM_THREAD_X, 256*5,
1443         mmCOMPUTE_NUM_THREAD_Y, 1,
1444         mmCOMPUTE_NUM_THREAD_Z, 1,
1445         mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1446         mmCOMPUTE_PGM_RSRC2, 20,
1447         mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1448         mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1449         mmCOMPUTE_USER_DATA_2, 0xedcedc02,
1450         mmCOMPUTE_USER_DATA_3, 0xedcedc03,
1451         mmCOMPUTE_USER_DATA_4, 0xedcedc04,
1452         mmCOMPUTE_USER_DATA_5, 0xedcedc05,
1453         mmCOMPUTE_USER_DATA_6, 0xedcedc06,
1454         mmCOMPUTE_USER_DATA_7, 0xedcedc07,
1455         mmCOMPUTE_USER_DATA_8, 0xedcedc08,
1456         mmCOMPUTE_USER_DATA_9, 0xedcedc09,
1457 };
1458
1459 static const u32 sec_ded_counter_registers[] =
1460 {
1461         mmCPC_EDC_ATC_CNT,
1462         mmCPC_EDC_SCRATCH_CNT,
1463         mmCPC_EDC_UCODE_CNT,
1464         mmCPF_EDC_ATC_CNT,
1465         mmCPF_EDC_ROQ_CNT,
1466         mmCPF_EDC_TAG_CNT,
1467         mmCPG_EDC_ATC_CNT,
1468         mmCPG_EDC_DMA_CNT,
1469         mmCPG_EDC_TAG_CNT,
1470         mmDC_EDC_CSINVOC_CNT,
1471         mmDC_EDC_RESTORE_CNT,
1472         mmDC_EDC_STATE_CNT,
1473         mmGDS_EDC_CNT,
1474         mmGDS_EDC_GRBM_CNT,
1475         mmGDS_EDC_OA_DED,
1476         mmSPI_EDC_CNT,
1477         mmSQC_ATC_EDC_GATCL1_CNT,
1478         mmSQC_EDC_CNT,
1479         mmSQ_EDC_DED_CNT,
1480         mmSQ_EDC_INFO,
1481         mmSQ_EDC_SEC_CNT,
1482         mmTCC_EDC_CNT,
1483         mmTCP_ATC_EDC_GATCL1_CNT,
1484         mmTCP_EDC_CNT,
1485         mmTD_EDC_CNT
1486 };
1487
1488 static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1489 {
1490         struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
1491         struct amdgpu_ib ib;
1492         struct dma_fence *f = NULL;
1493         int r, i;
1494         u32 tmp;
1495         unsigned total_size, vgpr_offset, sgpr_offset;
1496         u64 gpu_addr;
1497
1498         /* only supported on CZ */
1499         if (adev->asic_type != CHIP_CARRIZO)
1500                 return 0;
1501
1502         /* bail if the compute ring is not ready */
1503         if (!ring->sched.ready)
1504                 return 0;
1505
1506         tmp = RREG32(mmGB_EDC_MODE);
1507         WREG32(mmGB_EDC_MODE, 0);
1508
1509         total_size =
1510                 (((ARRAY_SIZE(vgpr_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1511         total_size +=
1512                 (((ARRAY_SIZE(sgpr1_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1513         total_size +=
1514                 (((ARRAY_SIZE(sgpr2_init_regs) / 2) * 3) + 4 + 5 + 2) * 4;
1515         total_size = ALIGN(total_size, 256);
1516         vgpr_offset = total_size;
1517         total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
1518         sgpr_offset = total_size;
1519         total_size += sizeof(sgpr_init_compute_shader);
1520
1521         /* allocate an indirect buffer to put the commands in */
1522         memset(&ib, 0, sizeof(ib));
1523         r = amdgpu_ib_get(adev, NULL, total_size,
1524                                         AMDGPU_IB_POOL_DIRECT, &ib);
1525         if (r) {
1526                 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
1527                 return r;
1528         }
1529
1530         /* load the compute shaders */
1531         for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
1532                 ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
1533
1534         for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
1535                 ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
1536
1537         /* init the ib length to 0 */
1538         ib.length_dw = 0;
1539
1540         /* VGPR */
1541         /* write the register state for the compute dispatch */
1542         for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i += 2) {
1543                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1544                 ib.ptr[ib.length_dw++] = vgpr_init_regs[i] - PACKET3_SET_SH_REG_START;
1545                 ib.ptr[ib.length_dw++] = vgpr_init_regs[i + 1];
1546         }
1547         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1548         gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
1549         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1550         ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1551         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1552         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1553
1554         /* write dispatch packet */
1555         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1556         ib.ptr[ib.length_dw++] = 8; /* x */
1557         ib.ptr[ib.length_dw++] = 1; /* y */
1558         ib.ptr[ib.length_dw++] = 1; /* z */
1559         ib.ptr[ib.length_dw++] =
1560                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1561
1562         /* write CS partial flush packet */
1563         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1564         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1565
1566         /* SGPR1 */
1567         /* write the register state for the compute dispatch */
1568         for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i += 2) {
1569                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1570                 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i] - PACKET3_SET_SH_REG_START;
1571                 ib.ptr[ib.length_dw++] = sgpr1_init_regs[i + 1];
1572         }
1573         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1574         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1575         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1576         ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1577         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1578         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1579
1580         /* write dispatch packet */
1581         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1582         ib.ptr[ib.length_dw++] = 8; /* x */
1583         ib.ptr[ib.length_dw++] = 1; /* y */
1584         ib.ptr[ib.length_dw++] = 1; /* z */
1585         ib.ptr[ib.length_dw++] =
1586                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1587
1588         /* write CS partial flush packet */
1589         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1590         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1591
1592         /* SGPR2 */
1593         /* write the register state for the compute dispatch */
1594         for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i += 2) {
1595                 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
1596                 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i] - PACKET3_SET_SH_REG_START;
1597                 ib.ptr[ib.length_dw++] = sgpr2_init_regs[i + 1];
1598         }
1599         /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
1600         gpu_addr = (ib.gpu_addr + (u64)sgpr_offset) >> 8;
1601         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2);
1602         ib.ptr[ib.length_dw++] = mmCOMPUTE_PGM_LO - PACKET3_SET_SH_REG_START;
1603         ib.ptr[ib.length_dw++] = lower_32_bits(gpu_addr);
1604         ib.ptr[ib.length_dw++] = upper_32_bits(gpu_addr);
1605
1606         /* write dispatch packet */
1607         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
1608         ib.ptr[ib.length_dw++] = 8; /* x */
1609         ib.ptr[ib.length_dw++] = 1; /* y */
1610         ib.ptr[ib.length_dw++] = 1; /* z */
1611         ib.ptr[ib.length_dw++] =
1612                 REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1);
1613
1614         /* write CS partial flush packet */
1615         ib.ptr[ib.length_dw++] = PACKET3(PACKET3_EVENT_WRITE, 0);
1616         ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
1617
1618         /* shedule the ib on the ring */
1619         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1620         if (r) {
1621                 DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
1622                 goto fail;
1623         }
1624
1625         /* wait for the GPU to finish processing the IB */
1626         r = dma_fence_wait(f, false);
1627         if (r) {
1628                 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
1629                 goto fail;
1630         }
1631
1632         tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, DED_MODE, 2);
1633         tmp = REG_SET_FIELD(tmp, GB_EDC_MODE, PROP_FED, 1);
1634         WREG32(mmGB_EDC_MODE, tmp);
1635
1636         tmp = RREG32(mmCC_GC_EDC_CONFIG);
1637         tmp = REG_SET_FIELD(tmp, CC_GC_EDC_CONFIG, DIS_EDC, 0) | 1;
1638         WREG32(mmCC_GC_EDC_CONFIG, tmp);
1639
1640
1641         /* read back registers to clear the counters */
1642         for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++)
1643                 RREG32(sec_ded_counter_registers[i]);
1644
1645 fail:
1646         amdgpu_ib_free(adev, &ib, NULL);
1647         dma_fence_put(f);
1648
1649         return r;
1650 }
1651
1652 static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
1653 {
1654         u32 gb_addr_config;
1655         u32 mc_arb_ramcfg;
1656         u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map;
1657         u32 tmp;
1658         int ret;
1659
1660         switch (adev->asic_type) {
1661         case CHIP_TOPAZ:
1662                 adev->gfx.config.max_shader_engines = 1;
1663                 adev->gfx.config.max_tile_pipes = 2;
1664                 adev->gfx.config.max_cu_per_sh = 6;
1665                 adev->gfx.config.max_sh_per_se = 1;
1666                 adev->gfx.config.max_backends_per_se = 2;
1667                 adev->gfx.config.max_texture_channel_caches = 2;
1668                 adev->gfx.config.max_gprs = 256;
1669                 adev->gfx.config.max_gs_threads = 32;
1670                 adev->gfx.config.max_hw_contexts = 8;
1671
1672                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1673                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1674                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1675                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1676                 gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN;
1677                 break;
1678         case CHIP_FIJI:
1679                 adev->gfx.config.max_shader_engines = 4;
1680                 adev->gfx.config.max_tile_pipes = 16;
1681                 adev->gfx.config.max_cu_per_sh = 16;
1682                 adev->gfx.config.max_sh_per_se = 1;
1683                 adev->gfx.config.max_backends_per_se = 4;
1684                 adev->gfx.config.max_texture_channel_caches = 16;
1685                 adev->gfx.config.max_gprs = 256;
1686                 adev->gfx.config.max_gs_threads = 32;
1687                 adev->gfx.config.max_hw_contexts = 8;
1688
1689                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1690                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1691                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1692                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1693                 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1694                 break;
1695         case CHIP_POLARIS11:
1696         case CHIP_POLARIS12:
1697                 ret = amdgpu_atombios_get_gfx_info(adev);
1698                 if (ret)
1699                         return ret;
1700                 adev->gfx.config.max_gprs = 256;
1701                 adev->gfx.config.max_gs_threads = 32;
1702                 adev->gfx.config.max_hw_contexts = 8;
1703
1704                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1705                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1706                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1707                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1708                 gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN;
1709                 break;
1710         case CHIP_POLARIS10:
1711         case CHIP_VEGAM:
1712                 ret = amdgpu_atombios_get_gfx_info(adev);
1713                 if (ret)
1714                         return ret;
1715                 adev->gfx.config.max_gprs = 256;
1716                 adev->gfx.config.max_gs_threads = 32;
1717                 adev->gfx.config.max_hw_contexts = 8;
1718
1719                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1720                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1721                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1722                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1723                 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1724                 break;
1725         case CHIP_TONGA:
1726                 adev->gfx.config.max_shader_engines = 4;
1727                 adev->gfx.config.max_tile_pipes = 8;
1728                 adev->gfx.config.max_cu_per_sh = 8;
1729                 adev->gfx.config.max_sh_per_se = 1;
1730                 adev->gfx.config.max_backends_per_se = 2;
1731                 adev->gfx.config.max_texture_channel_caches = 8;
1732                 adev->gfx.config.max_gprs = 256;
1733                 adev->gfx.config.max_gs_threads = 32;
1734                 adev->gfx.config.max_hw_contexts = 8;
1735
1736                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1737                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1738                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1739                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1740                 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1741                 break;
1742         case CHIP_CARRIZO:
1743                 adev->gfx.config.max_shader_engines = 1;
1744                 adev->gfx.config.max_tile_pipes = 2;
1745                 adev->gfx.config.max_sh_per_se = 1;
1746                 adev->gfx.config.max_backends_per_se = 2;
1747                 adev->gfx.config.max_cu_per_sh = 8;
1748                 adev->gfx.config.max_texture_channel_caches = 2;
1749                 adev->gfx.config.max_gprs = 256;
1750                 adev->gfx.config.max_gs_threads = 32;
1751                 adev->gfx.config.max_hw_contexts = 8;
1752
1753                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1754                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1755                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1756                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1757                 gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1758                 break;
1759         case CHIP_STONEY:
1760                 adev->gfx.config.max_shader_engines = 1;
1761                 adev->gfx.config.max_tile_pipes = 2;
1762                 adev->gfx.config.max_sh_per_se = 1;
1763                 adev->gfx.config.max_backends_per_se = 1;
1764                 adev->gfx.config.max_cu_per_sh = 3;
1765                 adev->gfx.config.max_texture_channel_caches = 2;
1766                 adev->gfx.config.max_gprs = 256;
1767                 adev->gfx.config.max_gs_threads = 16;
1768                 adev->gfx.config.max_hw_contexts = 8;
1769
1770                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1771                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1772                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1773                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1774                 gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
1775                 break;
1776         default:
1777                 adev->gfx.config.max_shader_engines = 2;
1778                 adev->gfx.config.max_tile_pipes = 4;
1779                 adev->gfx.config.max_cu_per_sh = 2;
1780                 adev->gfx.config.max_sh_per_se = 1;
1781                 adev->gfx.config.max_backends_per_se = 2;
1782                 adev->gfx.config.max_texture_channel_caches = 4;
1783                 adev->gfx.config.max_gprs = 256;
1784                 adev->gfx.config.max_gs_threads = 32;
1785                 adev->gfx.config.max_hw_contexts = 8;
1786
1787                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1788                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1789                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1790                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1791                 gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
1792                 break;
1793         }
1794
1795         adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
1796         mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
1797
1798         adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
1799                                 MC_ARB_RAMCFG, NOOFBANK);
1800         adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
1801                                 MC_ARB_RAMCFG, NOOFRANKS);
1802
1803         adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
1804         adev->gfx.config.mem_max_burst_length_bytes = 256;
1805         if (adev->flags & AMD_IS_APU) {
1806                 /* Get memory bank mapping mode. */
1807                 tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING);
1808                 dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1809                 dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1810
1811                 tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING);
1812                 dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP);
1813                 dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP);
1814
1815                 /* Validate settings in case only one DIMM installed. */
1816                 if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12))
1817                         dimm00_addr_map = 0;
1818                 if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12))
1819                         dimm01_addr_map = 0;
1820                 if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12))
1821                         dimm10_addr_map = 0;
1822                 if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12))
1823                         dimm11_addr_map = 0;
1824
1825                 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
1826                 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
1827                 if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11))
1828                         adev->gfx.config.mem_row_size_in_kb = 2;
1829                 else
1830                         adev->gfx.config.mem_row_size_in_kb = 1;
1831         } else {
1832                 tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS);
1833                 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1834                 if (adev->gfx.config.mem_row_size_in_kb > 4)
1835                         adev->gfx.config.mem_row_size_in_kb = 4;
1836         }
1837
1838         adev->gfx.config.shader_engine_tile_size = 32;
1839         adev->gfx.config.num_gpus = 1;
1840         adev->gfx.config.multi_gpu_tile_size = 64;
1841
1842         /* fix up row size */
1843         switch (adev->gfx.config.mem_row_size_in_kb) {
1844         case 1:
1845         default:
1846                 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0);
1847                 break;
1848         case 2:
1849                 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1);
1850                 break;
1851         case 4:
1852                 gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2);
1853                 break;
1854         }
1855         adev->gfx.config.gb_addr_config = gb_addr_config;
1856
1857         return 0;
1858 }
1859
1860 static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1861                                         int mec, int pipe, int queue)
1862 {
1863         int r;
1864         unsigned irq_type;
1865         struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1866         unsigned int hw_prio;
1867
1868         ring = &adev->gfx.compute_ring[ring_id];
1869
1870         /* mec0 is me1 */
1871         ring->me = mec + 1;
1872         ring->pipe = pipe;
1873         ring->queue = queue;
1874
1875         ring->ring_obj = NULL;
1876         ring->use_doorbell = true;
1877         ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
1878         ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1879                                 + (ring_id * GFX8_MEC_HPD_SIZE);
1880         sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1881
1882         irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1883                 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1884                 + ring->pipe;
1885
1886         hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1887                         AMDGPU_RING_PRIO_2 : AMDGPU_RING_PRIO_DEFAULT;
1888         /* type-2 packets are deprecated on MEC, use type-3 instead */
1889         r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1890                              hw_prio, NULL);
1891         if (r)
1892                 return r;
1893
1894
1895         return 0;
1896 }
1897
1898 static void gfx_v8_0_sq_irq_work_func(struct work_struct *work);
1899
1900 static int gfx_v8_0_sw_init(void *handle)
1901 {
1902         int i, j, k, r, ring_id;
1903         struct amdgpu_ring *ring;
1904         struct amdgpu_kiq *kiq;
1905         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1906
1907         switch (adev->asic_type) {
1908         case CHIP_TONGA:
1909         case CHIP_CARRIZO:
1910         case CHIP_FIJI:
1911         case CHIP_POLARIS10:
1912         case CHIP_POLARIS11:
1913         case CHIP_POLARIS12:
1914         case CHIP_VEGAM:
1915                 adev->gfx.mec.num_mec = 2;
1916                 break;
1917         case CHIP_TOPAZ:
1918         case CHIP_STONEY:
1919         default:
1920                 adev->gfx.mec.num_mec = 1;
1921                 break;
1922         }
1923
1924         adev->gfx.mec.num_pipe_per_mec = 4;
1925         adev->gfx.mec.num_queue_per_pipe = 8;
1926
1927         /* EOP Event */
1928         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
1929         if (r)
1930                 return r;
1931
1932         /* Privileged reg */
1933         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT,
1934                               &adev->gfx.priv_reg_irq);
1935         if (r)
1936                 return r;
1937
1938         /* Privileged inst */
1939         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT,
1940                               &adev->gfx.priv_inst_irq);
1941         if (r)
1942                 return r;
1943
1944         /* Add CP EDC/ECC irq  */
1945         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR,
1946                               &adev->gfx.cp_ecc_error_irq);
1947         if (r)
1948                 return r;
1949
1950         /* SQ interrupts. */
1951         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG,
1952                               &adev->gfx.sq_irq);
1953         if (r) {
1954                 DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r);
1955                 return r;
1956         }
1957
1958         INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
1959
1960         adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1961
1962         r = gfx_v8_0_init_microcode(adev);
1963         if (r) {
1964                 DRM_ERROR("Failed to load gfx firmware!\n");
1965                 return r;
1966         }
1967
1968         r = adev->gfx.rlc.funcs->init(adev);
1969         if (r) {
1970                 DRM_ERROR("Failed to init rlc BOs!\n");
1971                 return r;
1972         }
1973
1974         r = gfx_v8_0_mec_init(adev);
1975         if (r) {
1976                 DRM_ERROR("Failed to init MEC BOs!\n");
1977                 return r;
1978         }
1979
1980         /* set up the gfx ring */
1981         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1982                 ring = &adev->gfx.gfx_ring[i];
1983                 ring->ring_obj = NULL;
1984                 sprintf(ring->name, "gfx");
1985                 /* no gfx doorbells on iceland */
1986                 if (adev->asic_type != CHIP_TOPAZ) {
1987                         ring->use_doorbell = true;
1988                         ring->doorbell_index = adev->doorbell_index.gfx_ring0;
1989                 }
1990
1991                 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
1992                                      AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
1993                                      AMDGPU_RING_PRIO_DEFAULT, NULL);
1994                 if (r)
1995                         return r;
1996         }
1997
1998
1999         /* set up the compute queues - allocate horizontally across pipes */
2000         ring_id = 0;
2001         for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2002                 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2003                         for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2004                                 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
2005                                                                      k, j))
2006                                         continue;
2007
2008                                 r = gfx_v8_0_compute_ring_init(adev,
2009                                                                 ring_id,
2010                                                                 i, k, j);
2011                                 if (r)
2012                                         return r;
2013
2014                                 ring_id++;
2015                         }
2016                 }
2017         }
2018
2019         r = amdgpu_gfx_kiq_init(adev, GFX8_MEC_HPD_SIZE, 0);
2020         if (r) {
2021                 DRM_ERROR("Failed to init KIQ BOs!\n");
2022                 return r;
2023         }
2024
2025         kiq = &adev->gfx.kiq[0];
2026         r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
2027         if (r)
2028                 return r;
2029
2030         /* create MQD for all compute queues as well as KIQ for SRIOV case */
2031         r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct vi_mqd_allocation), 0);
2032         if (r)
2033                 return r;
2034
2035         adev->gfx.ce_ram_size = 0x8000;
2036
2037         r = gfx_v8_0_gpu_early_init(adev);
2038         if (r)
2039                 return r;
2040
2041         return 0;
2042 }
2043
2044 static int gfx_v8_0_sw_fini(void *handle)
2045 {
2046         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2047         int i;
2048
2049         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2050                 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2051         for (i = 0; i < adev->gfx.num_compute_rings; i++)
2052                 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2053
2054         amdgpu_gfx_mqd_sw_fini(adev, 0);
2055         amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
2056         amdgpu_gfx_kiq_fini(adev, 0);
2057
2058         gfx_v8_0_mec_fini(adev);
2059         amdgpu_gfx_rlc_fini(adev);
2060         amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
2061                                 &adev->gfx.rlc.clear_state_gpu_addr,
2062                                 (void **)&adev->gfx.rlc.cs_ptr);
2063         if ((adev->asic_type == CHIP_CARRIZO) ||
2064             (adev->asic_type == CHIP_STONEY)) {
2065                 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2066                                 &adev->gfx.rlc.cp_table_gpu_addr,
2067                                 (void **)&adev->gfx.rlc.cp_table_ptr);
2068         }
2069         gfx_v8_0_free_microcode(adev);
2070
2071         return 0;
2072 }
2073
2074 static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
2075 {
2076         uint32_t *modearray, *mod2array;
2077         const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2078         const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2079         u32 reg_offset;
2080
2081         modearray = adev->gfx.config.tile_mode_array;
2082         mod2array = adev->gfx.config.macrotile_mode_array;
2083
2084         for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2085                 modearray[reg_offset] = 0;
2086
2087         for (reg_offset = 0; reg_offset <  num_secondary_tile_mode_states; reg_offset++)
2088                 mod2array[reg_offset] = 0;
2089
2090         switch (adev->asic_type) {
2091         case CHIP_TOPAZ:
2092                 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2093                                 PIPE_CONFIG(ADDR_SURF_P2) |
2094                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2095                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2096                 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2097                                 PIPE_CONFIG(ADDR_SURF_P2) |
2098                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2099                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2100                 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2101                                 PIPE_CONFIG(ADDR_SURF_P2) |
2102                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2103                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2104                 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2105                                 PIPE_CONFIG(ADDR_SURF_P2) |
2106                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2107                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2108                 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2109                                 PIPE_CONFIG(ADDR_SURF_P2) |
2110                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2111                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2112                 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2113                                 PIPE_CONFIG(ADDR_SURF_P2) |
2114                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2115                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2116                 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2117                                 PIPE_CONFIG(ADDR_SURF_P2) |
2118                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2119                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2120                 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2121                                 PIPE_CONFIG(ADDR_SURF_P2));
2122                 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2123                                 PIPE_CONFIG(ADDR_SURF_P2) |
2124                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2125                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2126                 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2127                                  PIPE_CONFIG(ADDR_SURF_P2) |
2128                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2129                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2130                 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2131                                  PIPE_CONFIG(ADDR_SURF_P2) |
2132                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2133                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2134                 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2135                                  PIPE_CONFIG(ADDR_SURF_P2) |
2136                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2137                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2138                 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2139                                  PIPE_CONFIG(ADDR_SURF_P2) |
2140                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2141                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2142                 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2143                                  PIPE_CONFIG(ADDR_SURF_P2) |
2144                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2145                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2146                 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2147                                  PIPE_CONFIG(ADDR_SURF_P2) |
2148                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2149                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2150                 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2151                                  PIPE_CONFIG(ADDR_SURF_P2) |
2152                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2153                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2154                 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2155                                  PIPE_CONFIG(ADDR_SURF_P2) |
2156                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2157                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2158                 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2159                                  PIPE_CONFIG(ADDR_SURF_P2) |
2160                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2161                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2162                 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2163                                  PIPE_CONFIG(ADDR_SURF_P2) |
2164                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2165                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2166                 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2167                                  PIPE_CONFIG(ADDR_SURF_P2) |
2168                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2169                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2170                 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2171                                  PIPE_CONFIG(ADDR_SURF_P2) |
2172                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2173                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2174                 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2175                                  PIPE_CONFIG(ADDR_SURF_P2) |
2176                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2177                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2178                 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2179                                  PIPE_CONFIG(ADDR_SURF_P2) |
2180                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2181                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2182                 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2183                                  PIPE_CONFIG(ADDR_SURF_P2) |
2184                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2185                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2186                 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2187                                  PIPE_CONFIG(ADDR_SURF_P2) |
2188                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2189                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2190                 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2191                                  PIPE_CONFIG(ADDR_SURF_P2) |
2192                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2193                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2194
2195                 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2196                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2197                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2198                                 NUM_BANKS(ADDR_SURF_8_BANK));
2199                 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2200                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2201                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2202                                 NUM_BANKS(ADDR_SURF_8_BANK));
2203                 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2204                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2205                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2206                                 NUM_BANKS(ADDR_SURF_8_BANK));
2207                 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2208                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2209                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2210                                 NUM_BANKS(ADDR_SURF_8_BANK));
2211                 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2212                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2213                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2214                                 NUM_BANKS(ADDR_SURF_8_BANK));
2215                 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2216                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2217                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2218                                 NUM_BANKS(ADDR_SURF_8_BANK));
2219                 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2220                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2221                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2222                                 NUM_BANKS(ADDR_SURF_8_BANK));
2223                 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2224                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2225                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2226                                 NUM_BANKS(ADDR_SURF_16_BANK));
2227                 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
2228                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2229                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2230                                 NUM_BANKS(ADDR_SURF_16_BANK));
2231                 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2232                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2233                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2234                                  NUM_BANKS(ADDR_SURF_16_BANK));
2235                 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2236                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2237                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2238                                  NUM_BANKS(ADDR_SURF_16_BANK));
2239                 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2240                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2241                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2242                                  NUM_BANKS(ADDR_SURF_16_BANK));
2243                 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2244                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2245                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2246                                  NUM_BANKS(ADDR_SURF_16_BANK));
2247                 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2248                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2249                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2250                                  NUM_BANKS(ADDR_SURF_8_BANK));
2251
2252                 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2253                         if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
2254                             reg_offset != 23)
2255                                 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2256
2257                 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2258                         if (reg_offset != 7)
2259                                 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2260
2261                 break;
2262         case CHIP_FIJI:
2263         case CHIP_VEGAM:
2264                 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2265                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2266                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2267                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2268                 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2269                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2270                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2271                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2272                 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2273                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2274                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2275                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2276                 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2277                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2278                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2279                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2280                 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2281                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2282                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2283                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2284                 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2285                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2286                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2287                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2288                 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2289                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2290                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2291                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2292                 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2293                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2294                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2295                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2296                 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2297                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
2298                 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2299                                 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2300                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2301                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2302                 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2303                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2304                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2305                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2306                 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2307                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2308                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2309                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2310                 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2311                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2312                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2313                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2314                 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2315                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2316                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2317                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2318                 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2319                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2320                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2321                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2322                 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2323                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2324                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2325                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2326                 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2327                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2328                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2329                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2330                 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2331                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2332                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2333                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2334                 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2335                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2336                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2337                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2338                 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2339                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2340                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2341                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2342                 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2343                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2344                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2345                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2346                 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2347                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2348                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2349                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2350                 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2351                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2352                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2353                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2354                 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2355                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2356                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2357                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2358                 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2359                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2360                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2361                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2362                 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2363                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2364                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2365                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2366                 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2367                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2368                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2369                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2370                 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2371                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2372                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2373                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2374                 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2375                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2376                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2377                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2378                 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2379                                  PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
2380                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2381                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2382                 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2383                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2384                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2385                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2386
2387                 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2388                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2389                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2390                                 NUM_BANKS(ADDR_SURF_8_BANK));
2391                 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2392                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2393                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2394                                 NUM_BANKS(ADDR_SURF_8_BANK));
2395                 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2396                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2397                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2398                                 NUM_BANKS(ADDR_SURF_8_BANK));
2399                 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2400                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2401                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2402                                 NUM_BANKS(ADDR_SURF_8_BANK));
2403                 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2404                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2405                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2406                                 NUM_BANKS(ADDR_SURF_8_BANK));
2407                 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2408                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2409                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2410                                 NUM_BANKS(ADDR_SURF_8_BANK));
2411                 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2412                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2413                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2414                                 NUM_BANKS(ADDR_SURF_8_BANK));
2415                 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2416                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2417                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2418                                 NUM_BANKS(ADDR_SURF_8_BANK));
2419                 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2420                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2421                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2422                                 NUM_BANKS(ADDR_SURF_8_BANK));
2423                 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2424                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2425                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2426                                  NUM_BANKS(ADDR_SURF_8_BANK));
2427                 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2428                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2429                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2430                                  NUM_BANKS(ADDR_SURF_8_BANK));
2431                 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2432                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2433                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2434                                  NUM_BANKS(ADDR_SURF_8_BANK));
2435                 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2436                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2437                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2438                                  NUM_BANKS(ADDR_SURF_8_BANK));
2439                 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2440                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2441                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2442                                  NUM_BANKS(ADDR_SURF_4_BANK));
2443
2444                 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2445                         WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2446
2447                 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2448                         if (reg_offset != 7)
2449                                 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2450
2451                 break;
2452         case CHIP_TONGA:
2453                 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2454                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2455                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2456                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2457                 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2458                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2459                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2460                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2461                 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2462                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2463                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2464                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2465                 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2466                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2467                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2468                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2469                 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2470                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2471                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2472                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2473                 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2474                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2475                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2476                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2477                 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2478                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2479                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2480                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2481                 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2482                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2483                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2484                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2485                 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2486                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2487                 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2488                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2489                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2490                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2491                 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2492                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2493                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2494                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2495                 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2496                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2497                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2498                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2499                 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2500                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2501                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2502                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2503                 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2504                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2505                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2506                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2507                 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2508                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2509                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2510                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2511                 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2512                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2513                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2514                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2515                 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2516                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2517                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2518                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2519                 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2520                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2521                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2522                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2523                 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2524                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2525                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2526                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2527                 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2528                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2529                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2530                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2531                 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2532                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2533                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2534                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2535                 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2536                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2537                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2538                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2539                 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2540                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2541                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2542                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2543                 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2544                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2545                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2546                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2547                 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2548                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2549                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2550                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2551                 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2552                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2553                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2554                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2555                 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2556                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2557                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2558                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2559                 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2560                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2561                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2562                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2563                 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2564                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2565                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2566                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2567                 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2568                                  PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2569                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2570                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2571                 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2572                                  PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2573                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2574                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2575
2576                 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2577                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2578                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2579                                 NUM_BANKS(ADDR_SURF_16_BANK));
2580                 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2581                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2582                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2583                                 NUM_BANKS(ADDR_SURF_16_BANK));
2584                 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2585                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2586                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2587                                 NUM_BANKS(ADDR_SURF_16_BANK));
2588                 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2589                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2590                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2591                                 NUM_BANKS(ADDR_SURF_16_BANK));
2592                 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2593                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2594                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2595                                 NUM_BANKS(ADDR_SURF_16_BANK));
2596                 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2597                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2598                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2599                                 NUM_BANKS(ADDR_SURF_16_BANK));
2600                 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2601                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2602                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2603                                 NUM_BANKS(ADDR_SURF_16_BANK));
2604                 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2605                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2606                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2607                                 NUM_BANKS(ADDR_SURF_16_BANK));
2608                 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2609                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2610                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2611                                 NUM_BANKS(ADDR_SURF_16_BANK));
2612                 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2613                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2614                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2615                                  NUM_BANKS(ADDR_SURF_16_BANK));
2616                 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2617                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2618                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2619                                  NUM_BANKS(ADDR_SURF_16_BANK));
2620                 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2621                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2622                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2623                                  NUM_BANKS(ADDR_SURF_8_BANK));
2624                 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2625                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2626                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2627                                  NUM_BANKS(ADDR_SURF_4_BANK));
2628                 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2629                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2630                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2631                                  NUM_BANKS(ADDR_SURF_4_BANK));
2632
2633                 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2634                         WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2635
2636                 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2637                         if (reg_offset != 7)
2638                                 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2639
2640                 break;
2641         case CHIP_POLARIS11:
2642         case CHIP_POLARIS12:
2643                 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2644                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2645                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2646                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2647                 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2648                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2649                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2650                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2651                 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2652                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2653                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2654                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2655                 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2656                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2657                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2658                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2659                 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2660                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2661                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2662                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2663                 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2664                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2665                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2666                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2667                 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2668                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2669                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2670                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2671                 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2672                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2673                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2674                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2675                 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2676                                 PIPE_CONFIG(ADDR_SURF_P4_16x16));
2677                 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2678                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2679                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2680                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2681                 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2682                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2683                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2684                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2685                 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2686                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2687                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2688                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2689                 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2690                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2691                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2692                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2693                 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2694                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2695                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2696                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2697                 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2698                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2699                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2700                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2701                 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2702                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2703                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2704                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2705                 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2706                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2707                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2708                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2709                 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2710                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2711                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2712                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2713                 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2714                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2715                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2716                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2717                 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2718                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2719                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2720                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2721                 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2722                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2723                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2724                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2725                 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2726                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2727                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2728                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2729                 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2730                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2731                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2732                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2733                 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2734                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2735                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2736                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2737                 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2738                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2739                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2740                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2741                 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2742                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2743                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2744                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2745                 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2746                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2747                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2748                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2749                 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2750                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2751                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2752                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2753                 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2754                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2755                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2756                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2757                 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2758                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2759                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2760                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2761                 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2762                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2763                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2764                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2765
2766                 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2767                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2768                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2769                                 NUM_BANKS(ADDR_SURF_16_BANK));
2770
2771                 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2772                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2773                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2774                                 NUM_BANKS(ADDR_SURF_16_BANK));
2775
2776                 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2777                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2778                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2779                                 NUM_BANKS(ADDR_SURF_16_BANK));
2780
2781                 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2782                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2783                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2784                                 NUM_BANKS(ADDR_SURF_16_BANK));
2785
2786                 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2787                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2788                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2789                                 NUM_BANKS(ADDR_SURF_16_BANK));
2790
2791                 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2792                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2793                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2794                                 NUM_BANKS(ADDR_SURF_16_BANK));
2795
2796                 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2797                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2798                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2799                                 NUM_BANKS(ADDR_SURF_16_BANK));
2800
2801                 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2802                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
2803                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2804                                 NUM_BANKS(ADDR_SURF_16_BANK));
2805
2806                 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2807                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2808                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2809                                 NUM_BANKS(ADDR_SURF_16_BANK));
2810
2811                 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2812                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2813                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2814                                 NUM_BANKS(ADDR_SURF_16_BANK));
2815
2816                 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2817                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2818                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2819                                 NUM_BANKS(ADDR_SURF_16_BANK));
2820
2821                 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2822                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2823                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2824                                 NUM_BANKS(ADDR_SURF_16_BANK));
2825
2826                 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2827                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2828                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2829                                 NUM_BANKS(ADDR_SURF_8_BANK));
2830
2831                 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2832                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2833                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2834                                 NUM_BANKS(ADDR_SURF_4_BANK));
2835
2836                 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2837                         WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
2838
2839                 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
2840                         if (reg_offset != 7)
2841                                 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
2842
2843                 break;
2844         case CHIP_POLARIS10:
2845                 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2846                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2847                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2848                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2849                 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2850                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2851                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2852                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2853                 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2854                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2855                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2856                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2857                 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2858                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2859                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2860                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2861                 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2862                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2863                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2864                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2865                 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2866                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2867                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2868                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2869                 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2870                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2871                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2872                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2873                 modearray[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2874                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2875                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
2876                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
2877                 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2878                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16));
2879                 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2880                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2881                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2882                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2883                 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2884                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2885                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2886                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2887                 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2888                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2889                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2890                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2891                 modearray[12] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2892                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2893                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
2894                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2895                 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2896                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2897                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2898                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2899                 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2900                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2901                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2902                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2903                 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
2904                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2905                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2906                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2907                 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2908                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2909                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2910                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2911                 modearray[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2912                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2913                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2914                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2915                 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2916                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2917                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2918                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2919                 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
2920                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2921                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2922                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2923                 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2924                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2925                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2926                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2927                 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
2928                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2929                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2930                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2931                 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2932                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2933                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2934                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2935                 modearray[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
2936                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2937                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2938                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2939                 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
2940                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2941                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
2942                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2943                 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
2944                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2945                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2946                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2947                 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
2948                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2949                                 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
2950                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
2951                 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2952                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2953                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2954                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2955                 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2956                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2957                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2958                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
2959                 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2960                                 PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) |
2961                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2962                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2963                 modearray[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
2964                                 PIPE_CONFIG(ADDR_SURF_P4_16x16) |
2965                                 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
2966                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
2967
2968                 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2969                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2970                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2971                                 NUM_BANKS(ADDR_SURF_16_BANK));
2972
2973                 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2974                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2975                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2976                                 NUM_BANKS(ADDR_SURF_16_BANK));
2977
2978                 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2979                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2980                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2981                                 NUM_BANKS(ADDR_SURF_16_BANK));
2982
2983                 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2984                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2985                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
2986                                 NUM_BANKS(ADDR_SURF_16_BANK));
2987
2988                 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2989                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2990                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
2991                                 NUM_BANKS(ADDR_SURF_16_BANK));
2992
2993                 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2994                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2995                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
2996                                 NUM_BANKS(ADDR_SURF_16_BANK));
2997
2998                 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2999                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3000                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3001                                 NUM_BANKS(ADDR_SURF_16_BANK));
3002
3003                 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3004                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3005                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3006                                 NUM_BANKS(ADDR_SURF_16_BANK));
3007
3008                 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3009                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3010                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3011                                 NUM_BANKS(ADDR_SURF_16_BANK));
3012
3013                 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3014                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3015                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3016                                 NUM_BANKS(ADDR_SURF_16_BANK));
3017
3018                 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3019                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3020                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3021                                 NUM_BANKS(ADDR_SURF_16_BANK));
3022
3023                 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3024                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3025                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3026                                 NUM_BANKS(ADDR_SURF_8_BANK));
3027
3028                 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3029                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3030                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3031                                 NUM_BANKS(ADDR_SURF_4_BANK));
3032
3033                 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3034                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3035                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
3036                                 NUM_BANKS(ADDR_SURF_4_BANK));
3037
3038                 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3039                         WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3040
3041                 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3042                         if (reg_offset != 7)
3043                                 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3044
3045                 break;
3046         case CHIP_STONEY:
3047                 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3048                                 PIPE_CONFIG(ADDR_SURF_P2) |
3049                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3050                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3051                 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3052                                 PIPE_CONFIG(ADDR_SURF_P2) |
3053                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3054                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3055                 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3056                                 PIPE_CONFIG(ADDR_SURF_P2) |
3057                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3058                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3059                 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3060                                 PIPE_CONFIG(ADDR_SURF_P2) |
3061                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3062                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3063                 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3064                                 PIPE_CONFIG(ADDR_SURF_P2) |
3065                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3066                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3067                 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3068                                 PIPE_CONFIG(ADDR_SURF_P2) |
3069                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3070                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3071                 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3072                                 PIPE_CONFIG(ADDR_SURF_P2) |
3073                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3074                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3075                 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3076                                 PIPE_CONFIG(ADDR_SURF_P2));
3077                 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3078                                 PIPE_CONFIG(ADDR_SURF_P2) |
3079                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3080                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3081                 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3082                                  PIPE_CONFIG(ADDR_SURF_P2) |
3083                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3084                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3085                 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3086                                  PIPE_CONFIG(ADDR_SURF_P2) |
3087                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3088                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3089                 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3090                                  PIPE_CONFIG(ADDR_SURF_P2) |
3091                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3092                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3093                 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3094                                  PIPE_CONFIG(ADDR_SURF_P2) |
3095                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3096                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3097                 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3098                                  PIPE_CONFIG(ADDR_SURF_P2) |
3099                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3100                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3101                 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3102                                  PIPE_CONFIG(ADDR_SURF_P2) |
3103                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3104                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3105                 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3106                                  PIPE_CONFIG(ADDR_SURF_P2) |
3107                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3108                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3109                 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3110                                  PIPE_CONFIG(ADDR_SURF_P2) |
3111                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3112                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3113                 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3114                                  PIPE_CONFIG(ADDR_SURF_P2) |
3115                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3116                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3117                 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3118                                  PIPE_CONFIG(ADDR_SURF_P2) |
3119                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3120                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3121                 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3122                                  PIPE_CONFIG(ADDR_SURF_P2) |
3123                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3124                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3125                 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3126                                  PIPE_CONFIG(ADDR_SURF_P2) |
3127                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3128                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3129                 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3130                                  PIPE_CONFIG(ADDR_SURF_P2) |
3131                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3132                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3133                 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3134                                  PIPE_CONFIG(ADDR_SURF_P2) |
3135                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3136                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3137                 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3138                                  PIPE_CONFIG(ADDR_SURF_P2) |
3139                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3140                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3141                 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3142                                  PIPE_CONFIG(ADDR_SURF_P2) |
3143                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3144                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3145                 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3146                                  PIPE_CONFIG(ADDR_SURF_P2) |
3147                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3148                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3149
3150                 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3151                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3152                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3153                                 NUM_BANKS(ADDR_SURF_8_BANK));
3154                 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3155                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3156                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3157                                 NUM_BANKS(ADDR_SURF_8_BANK));
3158                 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3159                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3160                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3161                                 NUM_BANKS(ADDR_SURF_8_BANK));
3162                 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3163                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3164                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3165                                 NUM_BANKS(ADDR_SURF_8_BANK));
3166                 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3167                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3168                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3169                                 NUM_BANKS(ADDR_SURF_8_BANK));
3170                 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3171                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3172                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3173                                 NUM_BANKS(ADDR_SURF_8_BANK));
3174                 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3175                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3176                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3177                                 NUM_BANKS(ADDR_SURF_8_BANK));
3178                 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3179                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3180                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3181                                 NUM_BANKS(ADDR_SURF_16_BANK));
3182                 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3183                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3184                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3185                                 NUM_BANKS(ADDR_SURF_16_BANK));
3186                 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3187                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3188                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3189                                  NUM_BANKS(ADDR_SURF_16_BANK));
3190                 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3191                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3192                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3193                                  NUM_BANKS(ADDR_SURF_16_BANK));
3194                 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3195                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3196                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3197                                  NUM_BANKS(ADDR_SURF_16_BANK));
3198                 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3199                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3200                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3201                                  NUM_BANKS(ADDR_SURF_16_BANK));
3202                 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3203                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3204                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3205                                  NUM_BANKS(ADDR_SURF_8_BANK));
3206
3207                 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3208                         if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3209                             reg_offset != 23)
3210                                 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3211
3212                 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3213                         if (reg_offset != 7)
3214                                 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3215
3216                 break;
3217         default:
3218                 dev_warn(adev->dev,
3219                          "Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
3220                          adev->asic_type);
3221                 fallthrough;
3222
3223         case CHIP_CARRIZO:
3224                 modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3225                                 PIPE_CONFIG(ADDR_SURF_P2) |
3226                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
3227                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3228                 modearray[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3229                                 PIPE_CONFIG(ADDR_SURF_P2) |
3230                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
3231                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3232                 modearray[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3233                                 PIPE_CONFIG(ADDR_SURF_P2) |
3234                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
3235                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3236                 modearray[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3237                                 PIPE_CONFIG(ADDR_SURF_P2) |
3238                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
3239                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3240                 modearray[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3241                                 PIPE_CONFIG(ADDR_SURF_P2) |
3242                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3243                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3244                 modearray[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3245                                 PIPE_CONFIG(ADDR_SURF_P2) |
3246                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3247                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3248                 modearray[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3249                                 PIPE_CONFIG(ADDR_SURF_P2) |
3250                                 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
3251                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
3252                 modearray[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
3253                                 PIPE_CONFIG(ADDR_SURF_P2));
3254                 modearray[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3255                                 PIPE_CONFIG(ADDR_SURF_P2) |
3256                                 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3257                                 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3258                 modearray[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3259                                  PIPE_CONFIG(ADDR_SURF_P2) |
3260                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3261                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3262                 modearray[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3263                                  PIPE_CONFIG(ADDR_SURF_P2) |
3264                                  MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
3265                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3266                 modearray[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3267                                  PIPE_CONFIG(ADDR_SURF_P2) |
3268                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3269                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3270                 modearray[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3271                                  PIPE_CONFIG(ADDR_SURF_P2) |
3272                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3273                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3274                 modearray[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
3275                                  PIPE_CONFIG(ADDR_SURF_P2) |
3276                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3277                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3278                 modearray[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3279                                  PIPE_CONFIG(ADDR_SURF_P2) |
3280                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3281                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3282                 modearray[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3283                                  PIPE_CONFIG(ADDR_SURF_P2) |
3284                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3285                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3286                 modearray[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
3287                                  PIPE_CONFIG(ADDR_SURF_P2) |
3288                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3289                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3290                 modearray[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3291                                  PIPE_CONFIG(ADDR_SURF_P2) |
3292                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3293                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3294                 modearray[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
3295                                  PIPE_CONFIG(ADDR_SURF_P2) |
3296                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3297                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3298                 modearray[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
3299                                  PIPE_CONFIG(ADDR_SURF_P2) |
3300                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3301                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3302                 modearray[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
3303                                  PIPE_CONFIG(ADDR_SURF_P2) |
3304                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
3305                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3306                 modearray[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
3307                                  PIPE_CONFIG(ADDR_SURF_P2) |
3308                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3309                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3310                 modearray[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
3311                                  PIPE_CONFIG(ADDR_SURF_P2) |
3312                                  MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
3313                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
3314                 modearray[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
3315                                  PIPE_CONFIG(ADDR_SURF_P2) |
3316                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3317                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3318                 modearray[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
3319                                  PIPE_CONFIG(ADDR_SURF_P2) |
3320                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3321                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
3322                 modearray[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
3323                                  PIPE_CONFIG(ADDR_SURF_P2) |
3324                                  MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
3325                                  SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
3326
3327                 mod2array[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3328                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3329                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3330                                 NUM_BANKS(ADDR_SURF_8_BANK));
3331                 mod2array[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3332                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3333                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3334                                 NUM_BANKS(ADDR_SURF_8_BANK));
3335                 mod2array[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3336                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3337                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3338                                 NUM_BANKS(ADDR_SURF_8_BANK));
3339                 mod2array[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3340                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3341                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3342                                 NUM_BANKS(ADDR_SURF_8_BANK));
3343                 mod2array[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3344                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3345                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3346                                 NUM_BANKS(ADDR_SURF_8_BANK));
3347                 mod2array[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3348                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3349                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3350                                 NUM_BANKS(ADDR_SURF_8_BANK));
3351                 mod2array[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3352                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3353                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3354                                 NUM_BANKS(ADDR_SURF_8_BANK));
3355                 mod2array[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3356                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
3357                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3358                                 NUM_BANKS(ADDR_SURF_16_BANK));
3359                 mod2array[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
3360                                 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3361                                 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3362                                 NUM_BANKS(ADDR_SURF_16_BANK));
3363                 mod2array[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3364                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
3365                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3366                                  NUM_BANKS(ADDR_SURF_16_BANK));
3367                 mod2array[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
3368                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3369                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3370                                  NUM_BANKS(ADDR_SURF_16_BANK));
3371                 mod2array[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3372                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
3373                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3374                                  NUM_BANKS(ADDR_SURF_16_BANK));
3375                 mod2array[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3376                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3377                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
3378                                  NUM_BANKS(ADDR_SURF_16_BANK));
3379                 mod2array[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
3380                                  BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
3381                                  MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
3382                                  NUM_BANKS(ADDR_SURF_8_BANK));
3383
3384                 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
3385                         if (reg_offset != 7 && reg_offset != 12 && reg_offset != 17 &&
3386                             reg_offset != 23)
3387                                 WREG32(mmGB_TILE_MODE0 + reg_offset, modearray[reg_offset]);
3388
3389                 for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++)
3390                         if (reg_offset != 7)
3391                                 WREG32(mmGB_MACROTILE_MODE0 + reg_offset, mod2array[reg_offset]);
3392
3393                 break;
3394         }
3395 }
3396
3397 static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
3398                                   u32 se_num, u32 sh_num, u32 instance,
3399                                   int xcc_id)
3400 {
3401         u32 data;
3402
3403         if (instance == 0xffffffff)
3404                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
3405         else
3406                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
3407
3408         if (se_num == 0xffffffff)
3409                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
3410         else
3411                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
3412
3413         if (sh_num == 0xffffffff)
3414                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
3415         else
3416                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
3417
3418         WREG32(mmGRBM_GFX_INDEX, data);
3419 }
3420
3421 static void gfx_v8_0_select_me_pipe_q(struct amdgpu_device *adev,
3422                                   u32 me, u32 pipe, u32 q, u32 vm)
3423 {
3424         vi_srbm_select(adev, me, pipe, q, vm);
3425 }
3426
3427 static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
3428 {
3429         u32 data, mask;
3430
3431         data =  RREG32(mmCC_RB_BACKEND_DISABLE) |
3432                 RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3433
3434         data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE);
3435
3436         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
3437                                          adev->gfx.config.max_sh_per_se);
3438
3439         return (~data) & mask;
3440 }
3441
3442 static void
3443 gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
3444 {
3445         switch (adev->asic_type) {
3446         case CHIP_FIJI:
3447         case CHIP_VEGAM:
3448                 *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
3449                           RB_XSEL2(1) | PKR_MAP(2) |
3450                           PKR_XSEL(1) | PKR_YSEL(1) |
3451                           SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
3452                 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
3453                            SE_PAIR_YSEL(2);
3454                 break;
3455         case CHIP_TONGA:
3456         case CHIP_POLARIS10:
3457                 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3458                           SE_XSEL(1) | SE_YSEL(1);
3459                 *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
3460                            SE_PAIR_YSEL(2);
3461                 break;
3462         case CHIP_TOPAZ:
3463         case CHIP_CARRIZO:
3464                 *rconf |= RB_MAP_PKR0(2);
3465                 *rconf1 |= 0x0;
3466                 break;
3467         case CHIP_POLARIS11:
3468         case CHIP_POLARIS12:
3469                 *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
3470                           SE_XSEL(1) | SE_YSEL(1);
3471                 *rconf1 |= 0x0;
3472                 break;
3473         case CHIP_STONEY:
3474                 *rconf |= 0x0;
3475                 *rconf1 |= 0x0;
3476                 break;
3477         default:
3478                 DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
3479                 break;
3480         }
3481 }
3482
3483 static void
3484 gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
3485                                         u32 raster_config, u32 raster_config_1,
3486                                         unsigned rb_mask, unsigned num_rb)
3487 {
3488         unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
3489         unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
3490         unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
3491         unsigned rb_per_se = num_rb / num_se;
3492         unsigned se_mask[4];
3493         unsigned se;
3494
3495         se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
3496         se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
3497         se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
3498         se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
3499
3500         WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
3501         WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
3502         WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
3503
3504         if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
3505                              (!se_mask[2] && !se_mask[3]))) {
3506                 raster_config_1 &= ~SE_PAIR_MAP_MASK;
3507
3508                 if (!se_mask[0] && !se_mask[1]) {
3509                         raster_config_1 |=
3510                                 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
3511                 } else {
3512                         raster_config_1 |=
3513                                 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
3514                 }
3515         }
3516
3517         for (se = 0; se < num_se; se++) {
3518                 unsigned raster_config_se = raster_config;
3519                 unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
3520                 unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
3521                 int idx = (se / 2) * 2;
3522
3523                 if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
3524                         raster_config_se &= ~SE_MAP_MASK;
3525
3526                         if (!se_mask[idx]) {
3527                                 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
3528                         } else {
3529                                 raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
3530                         }
3531                 }
3532
3533                 pkr0_mask &= rb_mask;
3534                 pkr1_mask &= rb_mask;
3535                 if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
3536                         raster_config_se &= ~PKR_MAP_MASK;
3537
3538                         if (!pkr0_mask) {
3539                                 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
3540                         } else {
3541                                 raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
3542                         }
3543                 }
3544
3545                 if (rb_per_se >= 2) {
3546                         unsigned rb0_mask = 1 << (se * rb_per_se);
3547                         unsigned rb1_mask = rb0_mask << 1;
3548
3549                         rb0_mask &= rb_mask;
3550                         rb1_mask &= rb_mask;
3551                         if (!rb0_mask || !rb1_mask) {
3552                                 raster_config_se &= ~RB_MAP_PKR0_MASK;
3553
3554                                 if (!rb0_mask) {
3555                                         raster_config_se |=
3556                                                 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
3557                                 } else {
3558                                         raster_config_se |=
3559                                                 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
3560                                 }
3561                         }
3562
3563                         if (rb_per_se > 2) {
3564                                 rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
3565                                 rb1_mask = rb0_mask << 1;
3566                                 rb0_mask &= rb_mask;
3567                                 rb1_mask &= rb_mask;
3568                                 if (!rb0_mask || !rb1_mask) {
3569                                         raster_config_se &= ~RB_MAP_PKR1_MASK;
3570
3571                                         if (!rb0_mask) {
3572                                                 raster_config_se |=
3573                                                         RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
3574                                         } else {
3575                                                 raster_config_se |=
3576                                                         RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
3577                                         }
3578                                 }
3579                         }
3580                 }
3581
3582                 /* GRBM_GFX_INDEX has a different offset on VI */
3583                 gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff, 0);
3584                 WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
3585                 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3586         }
3587
3588         /* GRBM_GFX_INDEX has a different offset on VI */
3589         gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3590 }
3591
3592 static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
3593 {
3594         int i, j;
3595         u32 data;
3596         u32 raster_config = 0, raster_config_1 = 0;
3597         u32 active_rbs = 0;
3598         u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
3599                                         adev->gfx.config.max_sh_per_se;
3600         unsigned num_rb_pipes;
3601
3602         mutex_lock(&adev->grbm_idx_mutex);
3603         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3604                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3605                         gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3606                         data = gfx_v8_0_get_rb_active_bitmap(adev);
3607                         active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
3608                                                rb_bitmap_width_per_sh);
3609                 }
3610         }
3611         gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3612
3613         adev->gfx.config.backend_enable_mask = active_rbs;
3614         adev->gfx.config.num_rbs = hweight32(active_rbs);
3615
3616         num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
3617                              adev->gfx.config.max_shader_engines, 16);
3618
3619         gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
3620
3621         if (!adev->gfx.config.backend_enable_mask ||
3622                         adev->gfx.config.num_rbs >= num_rb_pipes) {
3623                 WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
3624                 WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
3625         } else {
3626                 gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
3627                                                         adev->gfx.config.backend_enable_mask,
3628                                                         num_rb_pipes);
3629         }
3630
3631         /* cache the values for userspace */
3632         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3633                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3634                         gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3635                         adev->gfx.config.rb_config[i][j].rb_backend_disable =
3636                                 RREG32(mmCC_RB_BACKEND_DISABLE);
3637                         adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
3638                                 RREG32(mmGC_USER_RB_BACKEND_DISABLE);
3639                         adev->gfx.config.rb_config[i][j].raster_config =
3640                                 RREG32(mmPA_SC_RASTER_CONFIG);
3641                         adev->gfx.config.rb_config[i][j].raster_config_1 =
3642                                 RREG32(mmPA_SC_RASTER_CONFIG_1);
3643                 }
3644         }
3645         gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3646         mutex_unlock(&adev->grbm_idx_mutex);
3647 }
3648
3649 #define DEFAULT_SH_MEM_BASES    (0x6000)
3650 /**
3651  * gfx_v8_0_init_compute_vmid - gart enable
3652  *
3653  * @adev: amdgpu_device pointer
3654  *
3655  * Initialize compute vmid sh_mem registers
3656  *
3657  */
3658 static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
3659 {
3660         int i;
3661         uint32_t sh_mem_config;
3662         uint32_t sh_mem_bases;
3663
3664         /*
3665          * Configure apertures:
3666          * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
3667          * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
3668          * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
3669          */
3670         sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
3671
3672         sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
3673                         SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
3674                         SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
3675                         SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
3676                         MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
3677                         SH_MEM_CONFIG__PRIVATE_ATC_MASK;
3678
3679         mutex_lock(&adev->srbm_mutex);
3680         for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
3681                 vi_srbm_select(adev, 0, 0, 0, i);
3682                 /* CP and shaders */
3683                 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
3684                 WREG32(mmSH_MEM_APE1_BASE, 1);
3685                 WREG32(mmSH_MEM_APE1_LIMIT, 0);
3686                 WREG32(mmSH_MEM_BASES, sh_mem_bases);
3687         }
3688         vi_srbm_select(adev, 0, 0, 0, 0);
3689         mutex_unlock(&adev->srbm_mutex);
3690
3691         /* Initialize all compute VMIDs to have no GDS, GWS, or OA
3692            access. These should be enabled by FW for target VMIDs. */
3693         for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
3694                 WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
3695                 WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
3696                 WREG32(amdgpu_gds_reg_offset[i].gws, 0);
3697                 WREG32(amdgpu_gds_reg_offset[i].oa, 0);
3698         }
3699 }
3700
3701 static void gfx_v8_0_init_gds_vmid(struct amdgpu_device *adev)
3702 {
3703         int vmid;
3704
3705         /*
3706          * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
3707          * access. Compute VMIDs should be enabled by FW for target VMIDs,
3708          * the driver can enable them for graphics. VMID0 should maintain
3709          * access so that HWS firmware can save/restore entries.
3710          */
3711         for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
3712                 WREG32(amdgpu_gds_reg_offset[vmid].mem_base, 0);
3713                 WREG32(amdgpu_gds_reg_offset[vmid].mem_size, 0);
3714                 WREG32(amdgpu_gds_reg_offset[vmid].gws, 0);
3715                 WREG32(amdgpu_gds_reg_offset[vmid].oa, 0);
3716         }
3717 }
3718
3719 static void gfx_v8_0_config_init(struct amdgpu_device *adev)
3720 {
3721         switch (adev->asic_type) {
3722         default:
3723                 adev->gfx.config.double_offchip_lds_buf = 1;
3724                 break;
3725         case CHIP_CARRIZO:
3726         case CHIP_STONEY:
3727                 adev->gfx.config.double_offchip_lds_buf = 0;
3728                 break;
3729         }
3730 }
3731
3732 static void gfx_v8_0_constants_init(struct amdgpu_device *adev)
3733 {
3734         u32 tmp, sh_static_mem_cfg;
3735         int i;
3736
3737         WREG32_FIELD(GRBM_CNTL, READ_TIMEOUT, 0xFF);
3738         WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3739         WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3740         WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
3741
3742         gfx_v8_0_tiling_mode_table_init(adev);
3743         gfx_v8_0_setup_rb(adev);
3744         gfx_v8_0_get_cu_info(adev);
3745         gfx_v8_0_config_init(adev);
3746
3747         /* XXX SH_MEM regs */
3748         /* where to put LDS, scratch, GPUVM in FSA64 space */
3749         sh_static_mem_cfg = REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG,
3750                                    SWIZZLE_ENABLE, 1);
3751         sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3752                                    ELEMENT_SIZE, 1);
3753         sh_static_mem_cfg = REG_SET_FIELD(sh_static_mem_cfg, SH_STATIC_MEM_CONFIG,
3754                                    INDEX_STRIDE, 3);
3755         WREG32(mmSH_STATIC_MEM_CONFIG, sh_static_mem_cfg);
3756
3757         mutex_lock(&adev->srbm_mutex);
3758         for (i = 0; i < adev->vm_manager.id_mgr[0].num_ids; i++) {
3759                 vi_srbm_select(adev, 0, 0, 0, i);
3760                 /* CP and shaders */
3761                 if (i == 0) {
3762                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC);
3763                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3764                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3765                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3766                         WREG32(mmSH_MEM_CONFIG, tmp);
3767                         WREG32(mmSH_MEM_BASES, 0);
3768                 } else {
3769                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC);
3770                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC);
3771                         tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
3772                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
3773                         WREG32(mmSH_MEM_CONFIG, tmp);
3774                         tmp = adev->gmc.shared_aperture_start >> 48;
3775                         WREG32(mmSH_MEM_BASES, tmp);
3776                 }
3777
3778                 WREG32(mmSH_MEM_APE1_BASE, 1);
3779                 WREG32(mmSH_MEM_APE1_LIMIT, 0);
3780         }
3781         vi_srbm_select(adev, 0, 0, 0, 0);
3782         mutex_unlock(&adev->srbm_mutex);
3783
3784         gfx_v8_0_init_compute_vmid(adev);
3785         gfx_v8_0_init_gds_vmid(adev);
3786
3787         mutex_lock(&adev->grbm_idx_mutex);
3788         /*
3789          * making sure that the following register writes will be broadcasted
3790          * to all the shaders
3791          */
3792         gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3793
3794         WREG32(mmPA_SC_FIFO_SIZE,
3795                    (adev->gfx.config.sc_prim_fifo_size_frontend <<
3796                         PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
3797                    (adev->gfx.config.sc_prim_fifo_size_backend <<
3798                         PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
3799                    (adev->gfx.config.sc_hiz_tile_fifo_size <<
3800                         PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
3801                    (adev->gfx.config.sc_earlyz_tile_fifo_size <<
3802                         PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
3803
3804         tmp = RREG32(mmSPI_ARB_PRIORITY);
3805         tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2);
3806         tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2);
3807         tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2);
3808         tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2);
3809         WREG32(mmSPI_ARB_PRIORITY, tmp);
3810
3811         mutex_unlock(&adev->grbm_idx_mutex);
3812
3813 }
3814
3815 static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3816 {
3817         u32 i, j, k;
3818         u32 mask;
3819
3820         mutex_lock(&adev->grbm_idx_mutex);
3821         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3822                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3823                         gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
3824                         for (k = 0; k < adev->usec_timeout; k++) {
3825                                 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3826                                         break;
3827                                 udelay(1);
3828                         }
3829                         if (k == adev->usec_timeout) {
3830                                 gfx_v8_0_select_se_sh(adev, 0xffffffff,
3831                                                       0xffffffff, 0xffffffff, 0);
3832                                 mutex_unlock(&adev->grbm_idx_mutex);
3833                                 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
3834                                          i, j);
3835                                 return;
3836                         }
3837                 }
3838         }
3839         gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
3840         mutex_unlock(&adev->grbm_idx_mutex);
3841
3842         mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
3843                 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
3844                 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
3845                 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
3846         for (k = 0; k < adev->usec_timeout; k++) {
3847                 if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
3848                         break;
3849                 udelay(1);
3850         }
3851 }
3852
3853 static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
3854                                                bool enable)
3855 {
3856         u32 tmp = RREG32(mmCP_INT_CNTL_RING0);
3857
3858         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
3859         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
3860         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
3861         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
3862
3863         WREG32(mmCP_INT_CNTL_RING0, tmp);
3864 }
3865
3866 static void gfx_v8_0_init_csb(struct amdgpu_device *adev)
3867 {
3868         adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
3869         /* csib */
3870         WREG32(mmRLC_CSIB_ADDR_HI,
3871                         adev->gfx.rlc.clear_state_gpu_addr >> 32);
3872         WREG32(mmRLC_CSIB_ADDR_LO,
3873                         adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
3874         WREG32(mmRLC_CSIB_LENGTH,
3875                         adev->gfx.rlc.clear_state_size);
3876 }
3877
3878 static void gfx_v8_0_parse_ind_reg_list(int *register_list_format,
3879                                 int ind_offset,
3880                                 int list_size,
3881                                 int *unique_indices,
3882                                 int *indices_count,
3883                                 int max_indices,
3884                                 int *ind_start_offsets,
3885                                 int *offset_count,
3886                                 int max_offset)
3887 {
3888         int indices;
3889         bool new_entry = true;
3890
3891         for (; ind_offset < list_size; ind_offset++) {
3892
3893                 if (new_entry) {
3894                         new_entry = false;
3895                         ind_start_offsets[*offset_count] = ind_offset;
3896                         *offset_count = *offset_count + 1;
3897                         BUG_ON(*offset_count >= max_offset);
3898                 }
3899
3900                 if (register_list_format[ind_offset] == 0xFFFFFFFF) {
3901                         new_entry = true;
3902                         continue;
3903                 }
3904
3905                 ind_offset += 2;
3906
3907                 /* look for the matching indice */
3908                 for (indices = 0;
3909                         indices < *indices_count;
3910                         indices++) {
3911                         if (unique_indices[indices] ==
3912                                 register_list_format[ind_offset])
3913                                 break;
3914                 }
3915
3916                 if (indices >= *indices_count) {
3917                         unique_indices[*indices_count] =
3918                                 register_list_format[ind_offset];
3919                         indices = *indices_count;
3920                         *indices_count = *indices_count + 1;
3921                         BUG_ON(*indices_count >= max_indices);
3922                 }
3923
3924                 register_list_format[ind_offset] = indices;
3925         }
3926 }
3927
3928 static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
3929 {
3930         int i, temp, data;
3931         int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0};
3932         int indices_count = 0;
3933         int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3934         int offset_count = 0;
3935
3936         int list_size;
3937         unsigned int *register_list_format =
3938                 kmemdup(adev->gfx.rlc.register_list_format,
3939                         adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
3940         if (!register_list_format)
3941                 return -ENOMEM;
3942
3943         gfx_v8_0_parse_ind_reg_list(register_list_format,
3944                                 RLC_FormatDirectRegListLength,
3945                                 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
3946                                 unique_indices,
3947                                 &indices_count,
3948                                 ARRAY_SIZE(unique_indices),
3949                                 indirect_start_offsets,
3950                                 &offset_count,
3951                                 ARRAY_SIZE(indirect_start_offsets));
3952
3953         /* save and restore list */
3954         WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
3955
3956         WREG32(mmRLC_SRM_ARAM_ADDR, 0);
3957         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
3958                 WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]);
3959
3960         /* indirect list */
3961         WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start);
3962         for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
3963                 WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]);
3964
3965         list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
3966         list_size = list_size >> 1;
3967         WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size);
3968         WREG32(mmRLC_GPM_SCRATCH_DATA, list_size);
3969
3970         /* starting offsets starts */
3971         WREG32(mmRLC_GPM_SCRATCH_ADDR,
3972                 adev->gfx.rlc.starting_offsets_start);
3973         for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
3974                 WREG32(mmRLC_GPM_SCRATCH_DATA,
3975                                 indirect_start_offsets[i]);
3976
3977         /* unique indices */
3978         temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
3979         data = mmRLC_SRM_INDEX_CNTL_DATA_0;
3980         for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
3981                 if (unique_indices[i] != 0) {
3982                         WREG32(temp + i, unique_indices[i] & 0x3FFFF);
3983                         WREG32(data + i, unique_indices[i] >> 20);
3984                 }
3985         }
3986         kfree(register_list_format);
3987
3988         return 0;
3989 }
3990
3991 static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
3992 {
3993         WREG32_FIELD(RLC_SRM_CNTL, SRM_ENABLE, 1);
3994 }
3995
3996 static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
3997 {
3998         uint32_t data;
3999
4000         WREG32_FIELD(CP_RB_WPTR_POLL_CNTL, IDLE_POLL_COUNT, 0x60);
4001
4002         data = REG_SET_FIELD(0, RLC_PG_DELAY, POWER_UP_DELAY, 0x10);
4003         data = REG_SET_FIELD(data, RLC_PG_DELAY, POWER_DOWN_DELAY, 0x10);
4004         data = REG_SET_FIELD(data, RLC_PG_DELAY, CMD_PROPAGATE_DELAY, 0x10);
4005         data = REG_SET_FIELD(data, RLC_PG_DELAY, MEM_SLEEP_DELAY, 0x10);
4006         WREG32(mmRLC_PG_DELAY, data);
4007
4008         WREG32_FIELD(RLC_PG_DELAY_2, SERDES_CMD_DELAY, 0x3);
4009         WREG32_FIELD(RLC_AUTO_PG_CTRL, GRBM_REG_SAVE_GFX_IDLE_THRESHOLD, 0x55f0);
4010
4011 }
4012
4013 static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
4014                                                 bool enable)
4015 {
4016         WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PU_ENABLE, enable ? 1 : 0);
4017 }
4018
4019 static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
4020                                                   bool enable)
4021 {
4022         WREG32_FIELD(RLC_PG_CNTL, SMU_CLK_SLOWDOWN_ON_PD_ENABLE, enable ? 1 : 0);
4023 }
4024
4025 static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
4026 {
4027         WREG32_FIELD(RLC_PG_CNTL, CP_PG_DISABLE, enable ? 0 : 1);
4028 }
4029
4030 static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
4031 {
4032         if ((adev->asic_type == CHIP_CARRIZO) ||
4033             (adev->asic_type == CHIP_STONEY)) {
4034                 gfx_v8_0_init_csb(adev);
4035                 gfx_v8_0_init_save_restore_list(adev);
4036                 gfx_v8_0_enable_save_restore_machine(adev);
4037                 WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
4038                 gfx_v8_0_init_power_gating(adev);
4039                 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
4040         } else if ((adev->asic_type == CHIP_POLARIS11) ||
4041                    (adev->asic_type == CHIP_POLARIS12) ||
4042                    (adev->asic_type == CHIP_VEGAM)) {
4043                 gfx_v8_0_init_csb(adev);
4044                 gfx_v8_0_init_save_restore_list(adev);
4045                 gfx_v8_0_enable_save_restore_machine(adev);
4046                 gfx_v8_0_init_power_gating(adev);
4047         }
4048
4049 }
4050
4051 static void gfx_v8_0_rlc_stop(struct amdgpu_device *adev)
4052 {
4053         WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 0);
4054
4055         gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4056         gfx_v8_0_wait_for_rlc_serdes(adev);
4057 }
4058
4059 static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev)
4060 {
4061         WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4062         udelay(50);
4063
4064         WREG32_FIELD(GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
4065         udelay(50);
4066 }
4067
4068 static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
4069 {
4070         WREG32_FIELD(RLC_CNTL, RLC_ENABLE_F32, 1);
4071
4072         /* carrizo do enable cp interrupt after cp inited */
4073         if (!(adev->flags & AMD_IS_APU))
4074                 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4075
4076         udelay(50);
4077 }
4078
4079 static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
4080 {
4081         if (amdgpu_sriov_vf(adev)) {
4082                 gfx_v8_0_init_csb(adev);
4083                 return 0;
4084         }
4085
4086         adev->gfx.rlc.funcs->stop(adev);
4087         adev->gfx.rlc.funcs->reset(adev);
4088         gfx_v8_0_init_pg(adev);
4089         adev->gfx.rlc.funcs->start(adev);
4090
4091         return 0;
4092 }
4093
4094 static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
4095 {
4096         u32 tmp = RREG32(mmCP_ME_CNTL);
4097
4098         if (enable) {
4099                 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
4100                 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
4101                 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
4102         } else {
4103                 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
4104                 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
4105                 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
4106         }
4107         WREG32(mmCP_ME_CNTL, tmp);
4108         udelay(50);
4109 }
4110
4111 static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
4112 {
4113         u32 count = 0;
4114         const struct cs_section_def *sect = NULL;
4115         const struct cs_extent_def *ext = NULL;
4116
4117         /* begin clear state */
4118         count += 2;
4119         /* context control state */
4120         count += 3;
4121
4122         for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4123                 for (ext = sect->section; ext->extent != NULL; ++ext) {
4124                         if (sect->id == SECT_CONTEXT)
4125                                 count += 2 + ext->reg_count;
4126                         else
4127                                 return 0;
4128                 }
4129         }
4130         /* pa_sc_raster_config/pa_sc_raster_config1 */
4131         count += 4;
4132         /* end clear state */
4133         count += 2;
4134         /* clear state */
4135         count += 2;
4136
4137         return count;
4138 }
4139
4140 static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
4141 {
4142         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
4143         const struct cs_section_def *sect = NULL;
4144         const struct cs_extent_def *ext = NULL;
4145         int r, i;
4146
4147         /* init the CP */
4148         WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
4149         WREG32(mmCP_ENDIAN_SWAP, 0);
4150         WREG32(mmCP_DEVICE_ID, 1);
4151
4152         gfx_v8_0_cp_gfx_enable(adev, true);
4153
4154         r = amdgpu_ring_alloc(ring, gfx_v8_0_get_csb_size(adev) + 4);
4155         if (r) {
4156                 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
4157                 return r;
4158         }
4159
4160         /* clear state buffer */
4161         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4162         amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
4163
4164         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4165         amdgpu_ring_write(ring, 0x80000000);
4166         amdgpu_ring_write(ring, 0x80000000);
4167
4168         for (sect = vi_cs_data; sect->section != NULL; ++sect) {
4169                 for (ext = sect->section; ext->extent != NULL; ++ext) {
4170                         if (sect->id == SECT_CONTEXT) {
4171                                 amdgpu_ring_write(ring,
4172                                        PACKET3(PACKET3_SET_CONTEXT_REG,
4173                                                ext->reg_count));
4174                                 amdgpu_ring_write(ring,
4175                                        ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
4176                                 for (i = 0; i < ext->reg_count; i++)
4177                                         amdgpu_ring_write(ring, ext->extent[i]);
4178                         }
4179                 }
4180         }
4181
4182         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
4183         amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
4184         amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
4185         amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
4186
4187         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
4188         amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
4189
4190         amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
4191         amdgpu_ring_write(ring, 0);
4192
4193         /* init the CE partitions */
4194         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
4195         amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
4196         amdgpu_ring_write(ring, 0x8000);
4197         amdgpu_ring_write(ring, 0x8000);
4198
4199         amdgpu_ring_commit(ring);
4200
4201         return 0;
4202 }
4203 static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu_ring *ring)
4204 {
4205         u32 tmp;
4206         /* no gfx doorbells on iceland */
4207         if (adev->asic_type == CHIP_TOPAZ)
4208                 return;
4209
4210         tmp = RREG32(mmCP_RB_DOORBELL_CONTROL);
4211
4212         if (ring->use_doorbell) {
4213                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4214                                 DOORBELL_OFFSET, ring->doorbell_index);
4215                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4216                                                 DOORBELL_HIT, 0);
4217                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
4218                                             DOORBELL_EN, 1);
4219         } else {
4220                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
4221         }
4222
4223         WREG32(mmCP_RB_DOORBELL_CONTROL, tmp);
4224
4225         if (adev->flags & AMD_IS_APU)
4226                 return;
4227
4228         tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
4229                                         DOORBELL_RANGE_LOWER,
4230                                         adev->doorbell_index.gfx_ring0);
4231         WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
4232
4233         WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
4234                 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
4235 }
4236
4237 static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
4238 {
4239         struct amdgpu_ring *ring;
4240         u32 tmp;
4241         u32 rb_bufsz;
4242         u64 rb_addr, rptr_addr, wptr_gpu_addr;
4243
4244         /* Set the write pointer delay */
4245         WREG32(mmCP_RB_WPTR_DELAY, 0);
4246
4247         /* set the RB to use vmid 0 */
4248         WREG32(mmCP_RB_VMID, 0);
4249
4250         /* Set ring buffer size */
4251         ring = &adev->gfx.gfx_ring[0];
4252         rb_bufsz = order_base_2(ring->ring_size / 8);
4253         tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
4254         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
4255         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3);
4256         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1);
4257 #ifdef __BIG_ENDIAN
4258         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
4259 #endif
4260         WREG32(mmCP_RB0_CNTL, tmp);
4261
4262         /* Initialize the ring buffer's read and write pointers */
4263         WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK);
4264         ring->wptr = 0;
4265         WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
4266
4267         /* set the wb address wether it's enabled or not */
4268         rptr_addr = ring->rptr_gpu_addr;
4269         WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
4270         WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
4271
4272         wptr_gpu_addr = ring->wptr_gpu_addr;
4273         WREG32(mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
4274         WREG32(mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
4275         mdelay(1);
4276         WREG32(mmCP_RB0_CNTL, tmp);
4277
4278         rb_addr = ring->gpu_addr >> 8;
4279         WREG32(mmCP_RB0_BASE, rb_addr);
4280         WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
4281
4282         gfx_v8_0_set_cpg_door_bell(adev, ring);
4283         /* start the ring */
4284         amdgpu_ring_clear_ring(ring);
4285         gfx_v8_0_cp_gfx_start(adev);
4286         ring->sched.ready = true;
4287
4288         return 0;
4289 }
4290
4291 static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
4292 {
4293         if (enable) {
4294                 WREG32(mmCP_MEC_CNTL, 0);
4295         } else {
4296                 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
4297                 adev->gfx.kiq[0].ring.sched.ready = false;
4298         }
4299         udelay(50);
4300 }
4301
4302 /* KIQ functions */
4303 static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
4304 {
4305         uint32_t tmp;
4306         struct amdgpu_device *adev = ring->adev;
4307
4308         /* tell RLC which is KIQ queue */
4309         tmp = RREG32(mmRLC_CP_SCHEDULERS);
4310         tmp &= 0xffffff00;
4311         tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
4312         WREG32(mmRLC_CP_SCHEDULERS, tmp);
4313         tmp |= 0x80;
4314         WREG32(mmRLC_CP_SCHEDULERS, tmp);
4315 }
4316
4317 static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
4318 {
4319         struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
4320         uint64_t queue_mask = 0;
4321         int r, i;
4322
4323         for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
4324                 if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap))
4325                         continue;
4326
4327                 /* This situation may be hit in the future if a new HW
4328                  * generation exposes more than 64 queues. If so, the
4329                  * definition of queue_mask needs updating */
4330                 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
4331                         DRM_ERROR("Invalid KCQ enabled: %d\n", i);
4332                         break;
4333                 }
4334
4335                 queue_mask |= (1ull << i);
4336         }
4337
4338         r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8);
4339         if (r) {
4340                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
4341                 return r;
4342         }
4343         /* set resources */
4344         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
4345         amdgpu_ring_write(kiq_ring, 0); /* vmid_mask:0 queue_type:0 (KIQ) */
4346         amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
4347         amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
4348         amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
4349         amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
4350         amdgpu_ring_write(kiq_ring, 0); /* oac mask */
4351         amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
4352         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4353                 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4354                 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
4355                 uint64_t wptr_addr = ring->wptr_gpu_addr;
4356
4357                 /* map queues */
4358                 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
4359                 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
4360                 amdgpu_ring_write(kiq_ring,
4361                                   PACKET3_MAP_QUEUES_NUM_QUEUES(1));
4362                 amdgpu_ring_write(kiq_ring,
4363                                   PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index) |
4364                                   PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
4365                                   PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
4366                                   PACKET3_MAP_QUEUES_ME(ring->me == 1 ? 0 : 1)); /* doorbell */
4367                 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
4368                 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
4369                 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
4370                 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
4371         }
4372
4373         amdgpu_ring_commit(kiq_ring);
4374
4375         return 0;
4376 }
4377
4378 static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
4379 {
4380         int i, r = 0;
4381
4382         if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
4383                 WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, req);
4384                 for (i = 0; i < adev->usec_timeout; i++) {
4385                         if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
4386                                 break;
4387                         udelay(1);
4388                 }
4389                 if (i == adev->usec_timeout)
4390                         r = -ETIMEDOUT;
4391         }
4392         WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
4393         WREG32(mmCP_HQD_PQ_RPTR, 0);
4394         WREG32(mmCP_HQD_PQ_WPTR, 0);
4395
4396         return r;
4397 }
4398
4399 static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd)
4400 {
4401         struct amdgpu_device *adev = ring->adev;
4402
4403         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
4404                 if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
4405                         mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
4406                         mqd->cp_hqd_queue_priority =
4407                                 AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
4408                 }
4409         }
4410 }
4411
4412 static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
4413 {
4414         struct amdgpu_device *adev = ring->adev;
4415         struct vi_mqd *mqd = ring->mqd_ptr;
4416         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
4417         uint32_t tmp;
4418
4419         mqd->header = 0xC0310800;
4420         mqd->compute_pipelinestat_enable = 0x00000001;
4421         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
4422         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
4423         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
4424         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
4425         mqd->compute_misc_reserved = 0x00000003;
4426         mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
4427                                                      + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4428         mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
4429                                                      + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
4430         eop_base_addr = ring->eop_gpu_addr >> 8;
4431         mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
4432         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
4433
4434         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4435         tmp = RREG32(mmCP_HQD_EOP_CONTROL);
4436         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
4437                         (order_base_2(GFX8_MEC_HPD_SIZE / 4) - 1));
4438
4439         mqd->cp_hqd_eop_control = tmp;
4440
4441         /* enable doorbell? */
4442         tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL),
4443                             CP_HQD_PQ_DOORBELL_CONTROL,
4444                             DOORBELL_EN,
4445                             ring->use_doorbell ? 1 : 0);
4446
4447         mqd->cp_hqd_pq_doorbell_control = tmp;
4448
4449         /* set the pointer to the MQD */
4450         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
4451         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
4452
4453         /* set MQD vmid to 0 */
4454         tmp = RREG32(mmCP_MQD_CONTROL);
4455         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
4456         mqd->cp_mqd_control = tmp;
4457
4458         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4459         hqd_gpu_addr = ring->gpu_addr >> 8;
4460         mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
4461         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4462
4463         /* set up the HQD, this is similar to CP_RB0_CNTL */
4464         tmp = RREG32(mmCP_HQD_PQ_CONTROL);
4465         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
4466                             (order_base_2(ring->ring_size / 4) - 1));
4467         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
4468                         (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
4469 #ifdef __BIG_ENDIAN
4470         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
4471 #endif
4472         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
4473         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
4474         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
4475         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
4476         mqd->cp_hqd_pq_control = tmp;
4477
4478         /* set the wb address whether it's enabled or not */
4479         wb_gpu_addr = ring->rptr_gpu_addr;
4480         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
4481         mqd->cp_hqd_pq_rptr_report_addr_hi =
4482                 upper_32_bits(wb_gpu_addr) & 0xffff;
4483
4484         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4485         wb_gpu_addr = ring->wptr_gpu_addr;
4486         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4487         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4488
4489         tmp = 0;
4490         /* enable the doorbell if requested */
4491         if (ring->use_doorbell) {
4492                 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
4493                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4494                                 DOORBELL_OFFSET, ring->doorbell_index);
4495
4496                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4497                                          DOORBELL_EN, 1);
4498                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4499                                          DOORBELL_SOURCE, 0);
4500                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4501                                          DOORBELL_HIT, 0);
4502         }
4503
4504         mqd->cp_hqd_pq_doorbell_control = tmp;
4505
4506         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4507         ring->wptr = 0;
4508         mqd->cp_hqd_pq_wptr = ring->wptr;
4509         mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
4510
4511         /* set the vmid for the queue */
4512         mqd->cp_hqd_vmid = 0;
4513
4514         tmp = RREG32(mmCP_HQD_PERSISTENT_STATE);
4515         tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
4516         mqd->cp_hqd_persistent_state = tmp;
4517
4518         /* set MTYPE */
4519         tmp = RREG32(mmCP_HQD_IB_CONTROL);
4520         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
4521         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MTYPE, 3);
4522         mqd->cp_hqd_ib_control = tmp;
4523
4524         tmp = RREG32(mmCP_HQD_IQ_TIMER);
4525         tmp = REG_SET_FIELD(tmp, CP_HQD_IQ_TIMER, MTYPE, 3);
4526         mqd->cp_hqd_iq_timer = tmp;
4527
4528         tmp = RREG32(mmCP_HQD_CTX_SAVE_CONTROL);
4529         tmp = REG_SET_FIELD(tmp, CP_HQD_CTX_SAVE_CONTROL, MTYPE, 3);
4530         mqd->cp_hqd_ctx_save_control = tmp;
4531
4532         /* defaults */
4533         mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
4534         mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
4535         mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
4536         mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
4537         mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
4538         mqd->cp_hqd_cntl_stack_size = RREG32(mmCP_HQD_CNTL_STACK_SIZE);
4539         mqd->cp_hqd_wg_state_offset = RREG32(mmCP_HQD_WG_STATE_OFFSET);
4540         mqd->cp_hqd_ctx_save_size = RREG32(mmCP_HQD_CTX_SAVE_SIZE);
4541         mqd->cp_hqd_eop_done_events = RREG32(mmCP_HQD_EOP_EVENTS);
4542         mqd->cp_hqd_error = RREG32(mmCP_HQD_ERROR);
4543         mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
4544         mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
4545
4546         /* set static priority for a queue/ring */
4547         gfx_v8_0_mqd_set_priority(ring, mqd);
4548         mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
4549
4550         /* map_queues packet doesn't need activate the queue,
4551          * so only kiq need set this field.
4552          */
4553         if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
4554                 mqd->cp_hqd_active = 1;
4555
4556         return 0;
4557 }
4558
4559 static int gfx_v8_0_mqd_commit(struct amdgpu_device *adev,
4560                         struct vi_mqd *mqd)
4561 {
4562         uint32_t mqd_reg;
4563         uint32_t *mqd_data;
4564
4565         /* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_HQD_ERROR */
4566         mqd_data = &mqd->cp_mqd_base_addr_lo;
4567
4568         /* disable wptr polling */
4569         WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0);
4570
4571         /* program all HQD registers */
4572         for (mqd_reg = mmCP_HQD_VMID; mqd_reg <= mmCP_HQD_EOP_CONTROL; mqd_reg++)
4573                 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4574
4575         /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
4576          * This is safe since EOP RPTR==WPTR for any inactive HQD
4577          * on ASICs that do not support context-save.
4578          * EOP writes/reads can start anywhere in the ring.
4579          */
4580         if (adev->asic_type != CHIP_TONGA) {
4581                 WREG32(mmCP_HQD_EOP_RPTR, mqd->cp_hqd_eop_rptr);
4582                 WREG32(mmCP_HQD_EOP_WPTR, mqd->cp_hqd_eop_wptr);
4583                 WREG32(mmCP_HQD_EOP_WPTR_MEM, mqd->cp_hqd_eop_wptr_mem);
4584         }
4585
4586         for (mqd_reg = mmCP_HQD_EOP_EVENTS; mqd_reg <= mmCP_HQD_ERROR; mqd_reg++)
4587                 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4588
4589         /* activate the HQD */
4590         for (mqd_reg = mmCP_MQD_BASE_ADDR; mqd_reg <= mmCP_HQD_ACTIVE; mqd_reg++)
4591                 WREG32(mqd_reg, mqd_data[mqd_reg - mmCP_MQD_BASE_ADDR]);
4592
4593         return 0;
4594 }
4595
4596 static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
4597 {
4598         struct amdgpu_device *adev = ring->adev;
4599         struct vi_mqd *mqd = ring->mqd_ptr;
4600
4601         gfx_v8_0_kiq_setting(ring);
4602
4603         if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4604                 /* reset MQD to a clean status */
4605                 if (adev->gfx.kiq[0].mqd_backup)
4606                         memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct vi_mqd_allocation));
4607
4608                 /* reset ring buffer */
4609                 ring->wptr = 0;
4610                 amdgpu_ring_clear_ring(ring);
4611                 mutex_lock(&adev->srbm_mutex);
4612                 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4613                 gfx_v8_0_mqd_commit(adev, mqd);
4614                 vi_srbm_select(adev, 0, 0, 0, 0);
4615                 mutex_unlock(&adev->srbm_mutex);
4616         } else {
4617                 memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4618                 ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4619                 ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4620                 if (amdgpu_sriov_vf(adev) && adev->in_suspend)
4621                         amdgpu_ring_clear_ring(ring);
4622                 mutex_lock(&adev->srbm_mutex);
4623                 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4624                 gfx_v8_0_mqd_init(ring);
4625                 gfx_v8_0_mqd_commit(adev, mqd);
4626                 vi_srbm_select(adev, 0, 0, 0, 0);
4627                 mutex_unlock(&adev->srbm_mutex);
4628
4629                 if (adev->gfx.kiq[0].mqd_backup)
4630                         memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct vi_mqd_allocation));
4631         }
4632
4633         return 0;
4634 }
4635
4636 static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
4637 {
4638         struct amdgpu_device *adev = ring->adev;
4639         struct vi_mqd *mqd = ring->mqd_ptr;
4640         int mqd_idx = ring - &adev->gfx.compute_ring[0];
4641
4642         if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4643                 memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
4644                 ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
4645                 ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
4646                 mutex_lock(&adev->srbm_mutex);
4647                 vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4648                 gfx_v8_0_mqd_init(ring);
4649                 vi_srbm_select(adev, 0, 0, 0, 0);
4650                 mutex_unlock(&adev->srbm_mutex);
4651
4652                 if (adev->gfx.mec.mqd_backup[mqd_idx])
4653                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
4654         } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4655                 /* reset MQD to a clean status */
4656                 if (adev->gfx.mec.mqd_backup[mqd_idx])
4657                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
4658                 /* reset ring buffer */
4659                 ring->wptr = 0;
4660                 amdgpu_ring_clear_ring(ring);
4661         } else {
4662                 amdgpu_ring_clear_ring(ring);
4663         }
4664         return 0;
4665 }
4666
4667 static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
4668 {
4669         if (adev->asic_type > CHIP_TONGA) {
4670                 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
4671                 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
4672         }
4673         /* enable doorbells */
4674         WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4675 }
4676
4677 static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
4678 {
4679         struct amdgpu_ring *ring;
4680         int r;
4681
4682         ring = &adev->gfx.kiq[0].ring;
4683
4684         r = amdgpu_bo_reserve(ring->mqd_obj, false);
4685         if (unlikely(r != 0))
4686                 return r;
4687
4688         r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4689         if (unlikely(r != 0))
4690                 return r;
4691
4692         gfx_v8_0_kiq_init_queue(ring);
4693         amdgpu_bo_kunmap(ring->mqd_obj);
4694         ring->mqd_ptr = NULL;
4695         amdgpu_bo_unreserve(ring->mqd_obj);
4696         ring->sched.ready = true;
4697         return 0;
4698 }
4699
4700 static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
4701 {
4702         struct amdgpu_ring *ring = NULL;
4703         int r = 0, i;
4704
4705         gfx_v8_0_cp_compute_enable(adev, true);
4706
4707         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4708                 ring = &adev->gfx.compute_ring[i];
4709
4710                 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4711                 if (unlikely(r != 0))
4712                         goto done;
4713                 r = amdgpu_bo_kmap(ring->mqd_obj, &ring->mqd_ptr);
4714                 if (!r) {
4715                         r = gfx_v8_0_kcq_init_queue(ring);
4716                         amdgpu_bo_kunmap(ring->mqd_obj);
4717                         ring->mqd_ptr = NULL;
4718                 }
4719                 amdgpu_bo_unreserve(ring->mqd_obj);
4720                 if (r)
4721                         goto done;
4722         }
4723
4724         gfx_v8_0_set_mec_doorbell_range(adev);
4725
4726         r = gfx_v8_0_kiq_kcq_enable(adev);
4727         if (r)
4728                 goto done;
4729
4730 done:
4731         return r;
4732 }
4733
4734 static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
4735 {
4736         int r, i;
4737         struct amdgpu_ring *ring;
4738
4739         /* collect all the ring_tests here, gfx, kiq, compute */
4740         ring = &adev->gfx.gfx_ring[0];
4741         r = amdgpu_ring_test_helper(ring);
4742         if (r)
4743                 return r;
4744
4745         ring = &adev->gfx.kiq[0].ring;
4746         r = amdgpu_ring_test_helper(ring);
4747         if (r)
4748                 return r;
4749
4750         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4751                 ring = &adev->gfx.compute_ring[i];
4752                 amdgpu_ring_test_helper(ring);
4753         }
4754
4755         return 0;
4756 }
4757
4758 static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
4759 {
4760         int r;
4761
4762         if (!(adev->flags & AMD_IS_APU))
4763                 gfx_v8_0_enable_gui_idle_interrupt(adev, false);
4764
4765         r = gfx_v8_0_kiq_resume(adev);
4766         if (r)
4767                 return r;
4768
4769         r = gfx_v8_0_cp_gfx_resume(adev);
4770         if (r)
4771                 return r;
4772
4773         r = gfx_v8_0_kcq_resume(adev);
4774         if (r)
4775                 return r;
4776
4777         r = gfx_v8_0_cp_test_all_rings(adev);
4778         if (r)
4779                 return r;
4780
4781         gfx_v8_0_enable_gui_idle_interrupt(adev, true);
4782
4783         return 0;
4784 }
4785
4786 static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable)
4787 {
4788         gfx_v8_0_cp_gfx_enable(adev, enable);
4789         gfx_v8_0_cp_compute_enable(adev, enable);
4790 }
4791
4792 static int gfx_v8_0_hw_init(void *handle)
4793 {
4794         int r;
4795         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4796
4797         gfx_v8_0_init_golden_registers(adev);
4798         gfx_v8_0_constants_init(adev);
4799
4800         r = adev->gfx.rlc.funcs->resume(adev);
4801         if (r)
4802                 return r;
4803
4804         r = gfx_v8_0_cp_resume(adev);
4805
4806         return r;
4807 }
4808
4809 static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
4810 {
4811         int r, i;
4812         struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
4813
4814         r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
4815         if (r)
4816                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
4817
4818         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4819                 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4820
4821                 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
4822                 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
4823                                                 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
4824                                                 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
4825                                                 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
4826                                                 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
4827                 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
4828                 amdgpu_ring_write(kiq_ring, 0);
4829                 amdgpu_ring_write(kiq_ring, 0);
4830                 amdgpu_ring_write(kiq_ring, 0);
4831         }
4832         r = amdgpu_ring_test_helper(kiq_ring);
4833         if (r)
4834                 DRM_ERROR("KCQ disable failed\n");
4835
4836         return r;
4837 }
4838
4839 static bool gfx_v8_0_is_idle(void *handle)
4840 {
4841         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4842
4843         if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)
4844                 || RREG32(mmGRBM_STATUS2) != 0x8)
4845                 return false;
4846         else
4847                 return true;
4848 }
4849
4850 static bool gfx_v8_0_rlc_is_idle(void *handle)
4851 {
4852         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4853
4854         if (RREG32(mmGRBM_STATUS2) != 0x8)
4855                 return false;
4856         else
4857                 return true;
4858 }
4859
4860 static int gfx_v8_0_wait_for_rlc_idle(void *handle)
4861 {
4862         unsigned int i;
4863         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4864
4865         for (i = 0; i < adev->usec_timeout; i++) {
4866                 if (gfx_v8_0_rlc_is_idle(handle))
4867                         return 0;
4868
4869                 udelay(1);
4870         }
4871         return -ETIMEDOUT;
4872 }
4873
4874 static int gfx_v8_0_wait_for_idle(void *handle)
4875 {
4876         unsigned int i;
4877         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4878
4879         for (i = 0; i < adev->usec_timeout; i++) {
4880                 if (gfx_v8_0_is_idle(handle))
4881                         return 0;
4882
4883                 udelay(1);
4884         }
4885         return -ETIMEDOUT;
4886 }
4887
4888 static int gfx_v8_0_hw_fini(void *handle)
4889 {
4890         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4891
4892         amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4893         amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4894
4895         amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
4896
4897         amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
4898
4899         /* disable KCQ to avoid CPC touch memory not valid anymore */
4900         gfx_v8_0_kcq_disable(adev);
4901
4902         if (amdgpu_sriov_vf(adev)) {
4903                 pr_debug("For SRIOV client, shouldn't do anything.\n");
4904                 return 0;
4905         }
4906         amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4907         if (!gfx_v8_0_wait_for_idle(adev))
4908                 gfx_v8_0_cp_enable(adev, false);
4909         else
4910                 pr_err("cp is busy, skip halt cp\n");
4911         if (!gfx_v8_0_wait_for_rlc_idle(adev))
4912                 adev->gfx.rlc.funcs->stop(adev);
4913         else
4914                 pr_err("rlc is busy, skip halt rlc\n");
4915         amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
4916
4917         return 0;
4918 }
4919
4920 static int gfx_v8_0_suspend(void *handle)
4921 {
4922         return gfx_v8_0_hw_fini(handle);
4923 }
4924
4925 static int gfx_v8_0_resume(void *handle)
4926 {
4927         return gfx_v8_0_hw_init(handle);
4928 }
4929
4930 static bool gfx_v8_0_check_soft_reset(void *handle)
4931 {
4932         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4933         u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
4934         u32 tmp;
4935
4936         /* GRBM_STATUS */
4937         tmp = RREG32(mmGRBM_STATUS);
4938         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
4939                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
4940                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
4941                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
4942                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
4943                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK |
4944                    GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
4945                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4946                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
4947                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4948                                                 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
4949                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4950                                                 SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
4951         }
4952
4953         /* GRBM_STATUS2 */
4954         tmp = RREG32(mmGRBM_STATUS2);
4955         if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
4956                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
4957                                                 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
4958
4959         if (REG_GET_FIELD(tmp, GRBM_STATUS2, CPF_BUSY) ||
4960             REG_GET_FIELD(tmp, GRBM_STATUS2, CPC_BUSY) ||
4961             REG_GET_FIELD(tmp, GRBM_STATUS2, CPG_BUSY)) {
4962                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4963                                                 SOFT_RESET_CPF, 1);
4964                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4965                                                 SOFT_RESET_CPC, 1);
4966                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4967                                                 SOFT_RESET_CPG, 1);
4968                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET,
4969                                                 SOFT_RESET_GRBM, 1);
4970         }
4971
4972         /* SRBM_STATUS */
4973         tmp = RREG32(mmSRBM_STATUS);
4974         if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING))
4975                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4976                                                 SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1);
4977         if (REG_GET_FIELD(tmp, SRBM_STATUS, SEM_BUSY))
4978                 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
4979                                                 SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
4980
4981         if (grbm_soft_reset || srbm_soft_reset) {
4982                 adev->gfx.grbm_soft_reset = grbm_soft_reset;
4983                 adev->gfx.srbm_soft_reset = srbm_soft_reset;
4984                 return true;
4985         } else {
4986                 adev->gfx.grbm_soft_reset = 0;
4987                 adev->gfx.srbm_soft_reset = 0;
4988                 return false;
4989         }
4990 }
4991
4992 static int gfx_v8_0_pre_soft_reset(void *handle)
4993 {
4994         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4995         u32 grbm_soft_reset = 0;
4996
4997         if ((!adev->gfx.grbm_soft_reset) &&
4998             (!adev->gfx.srbm_soft_reset))
4999                 return 0;
5000
5001         grbm_soft_reset = adev->gfx.grbm_soft_reset;
5002
5003         /* stop the rlc */
5004         adev->gfx.rlc.funcs->stop(adev);
5005
5006         if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5007             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5008                 /* Disable GFX parsing/prefetching */
5009                 gfx_v8_0_cp_gfx_enable(adev, false);
5010
5011         if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5012             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5013             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5014             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5015                 int i;
5016
5017                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5018                         struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5019
5020                         mutex_lock(&adev->srbm_mutex);
5021                         vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5022                         gfx_v8_0_deactivate_hqd(adev, 2);
5023                         vi_srbm_select(adev, 0, 0, 0, 0);
5024                         mutex_unlock(&adev->srbm_mutex);
5025                 }
5026                 /* Disable MEC parsing/prefetching */
5027                 gfx_v8_0_cp_compute_enable(adev, false);
5028         }
5029
5030         return 0;
5031 }
5032
5033 static int gfx_v8_0_soft_reset(void *handle)
5034 {
5035         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5036         u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
5037         u32 tmp;
5038
5039         if ((!adev->gfx.grbm_soft_reset) &&
5040             (!adev->gfx.srbm_soft_reset))
5041                 return 0;
5042
5043         grbm_soft_reset = adev->gfx.grbm_soft_reset;
5044         srbm_soft_reset = adev->gfx.srbm_soft_reset;
5045
5046         if (grbm_soft_reset || srbm_soft_reset) {
5047                 tmp = RREG32(mmGMCON_DEBUG);
5048                 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 1);
5049                 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 1);
5050                 WREG32(mmGMCON_DEBUG, tmp);
5051                 udelay(50);
5052         }
5053
5054         if (grbm_soft_reset) {
5055                 tmp = RREG32(mmGRBM_SOFT_RESET);
5056                 tmp |= grbm_soft_reset;
5057                 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
5058                 WREG32(mmGRBM_SOFT_RESET, tmp);
5059                 tmp = RREG32(mmGRBM_SOFT_RESET);
5060
5061                 udelay(50);
5062
5063                 tmp &= ~grbm_soft_reset;
5064                 WREG32(mmGRBM_SOFT_RESET, tmp);
5065                 tmp = RREG32(mmGRBM_SOFT_RESET);
5066         }
5067
5068         if (srbm_soft_reset) {
5069                 tmp = RREG32(mmSRBM_SOFT_RESET);
5070                 tmp |= srbm_soft_reset;
5071                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
5072                 WREG32(mmSRBM_SOFT_RESET, tmp);
5073                 tmp = RREG32(mmSRBM_SOFT_RESET);
5074
5075                 udelay(50);
5076
5077                 tmp &= ~srbm_soft_reset;
5078                 WREG32(mmSRBM_SOFT_RESET, tmp);
5079                 tmp = RREG32(mmSRBM_SOFT_RESET);
5080         }
5081
5082         if (grbm_soft_reset || srbm_soft_reset) {
5083                 tmp = RREG32(mmGMCON_DEBUG);
5084                 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_STALL, 0);
5085                 tmp = REG_SET_FIELD(tmp, GMCON_DEBUG, GFX_CLEAR, 0);
5086                 WREG32(mmGMCON_DEBUG, tmp);
5087         }
5088
5089         /* Wait a little for things to settle down */
5090         udelay(50);
5091
5092         return 0;
5093 }
5094
5095 static int gfx_v8_0_post_soft_reset(void *handle)
5096 {
5097         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5098         u32 grbm_soft_reset = 0;
5099
5100         if ((!adev->gfx.grbm_soft_reset) &&
5101             (!adev->gfx.srbm_soft_reset))
5102                 return 0;
5103
5104         grbm_soft_reset = adev->gfx.grbm_soft_reset;
5105
5106         if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5107             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
5108             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPC) ||
5109             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPG)) {
5110                 int i;
5111
5112                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5113                         struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5114
5115                         mutex_lock(&adev->srbm_mutex);
5116                         vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
5117                         gfx_v8_0_deactivate_hqd(adev, 2);
5118                         vi_srbm_select(adev, 0, 0, 0, 0);
5119                         mutex_unlock(&adev->srbm_mutex);
5120                 }
5121                 gfx_v8_0_kiq_resume(adev);
5122                 gfx_v8_0_kcq_resume(adev);
5123         }
5124
5125         if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
5126             REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
5127                 gfx_v8_0_cp_gfx_resume(adev);
5128
5129         gfx_v8_0_cp_test_all_rings(adev);
5130
5131         adev->gfx.rlc.funcs->start(adev);
5132
5133         return 0;
5134 }
5135
5136 /**
5137  * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot
5138  *
5139  * @adev: amdgpu_device pointer
5140  *
5141  * Fetches a GPU clock counter snapshot.
5142  * Returns the 64 bit clock counter snapshot.
5143  */
5144 static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
5145 {
5146         uint64_t clock;
5147
5148         mutex_lock(&adev->gfx.gpu_clock_mutex);
5149         WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
5150         clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) |
5151                 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
5152         mutex_unlock(&adev->gfx.gpu_clock_mutex);
5153         return clock;
5154 }
5155
5156 static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
5157                                           uint32_t vmid,
5158                                           uint32_t gds_base, uint32_t gds_size,
5159                                           uint32_t gws_base, uint32_t gws_size,
5160                                           uint32_t oa_base, uint32_t oa_size)
5161 {
5162         /* GDS Base */
5163         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5164         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5165                                 WRITE_DATA_DST_SEL(0)));
5166         amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base);
5167         amdgpu_ring_write(ring, 0);
5168         amdgpu_ring_write(ring, gds_base);
5169
5170         /* GDS Size */
5171         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5172         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5173                                 WRITE_DATA_DST_SEL(0)));
5174         amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size);
5175         amdgpu_ring_write(ring, 0);
5176         amdgpu_ring_write(ring, gds_size);
5177
5178         /* GWS */
5179         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5180         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5181                                 WRITE_DATA_DST_SEL(0)));
5182         amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws);
5183         amdgpu_ring_write(ring, 0);
5184         amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
5185
5186         /* OA */
5187         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5188         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5189                                 WRITE_DATA_DST_SEL(0)));
5190         amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa);
5191         amdgpu_ring_write(ring, 0);
5192         amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
5193 }
5194
5195 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
5196 {
5197         WREG32(mmSQ_IND_INDEX,
5198                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5199                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5200                 (address << SQ_IND_INDEX__INDEX__SHIFT) |
5201                 (SQ_IND_INDEX__FORCE_READ_MASK));
5202         return RREG32(mmSQ_IND_DATA);
5203 }
5204
5205 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
5206                            uint32_t wave, uint32_t thread,
5207                            uint32_t regno, uint32_t num, uint32_t *out)
5208 {
5209         WREG32(mmSQ_IND_INDEX,
5210                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
5211                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
5212                 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
5213                 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
5214                 (SQ_IND_INDEX__FORCE_READ_MASK) |
5215                 (SQ_IND_INDEX__AUTO_INCR_MASK));
5216         while (num--)
5217                 *(out++) = RREG32(mmSQ_IND_DATA);
5218 }
5219
5220 static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
5221 {
5222         /* type 0 wave data */
5223         dst[(*no_fields)++] = 0;
5224         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
5225         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
5226         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
5227         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
5228         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
5229         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
5230         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
5231         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
5232         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
5233         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
5234         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
5235         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
5236         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_LO);
5237         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TBA_HI);
5238         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_LO);
5239         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
5240         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
5241         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
5242         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
5243 }
5244
5245 static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
5246                                      uint32_t wave, uint32_t start,
5247                                      uint32_t size, uint32_t *dst)
5248 {
5249         wave_read_regs(
5250                 adev, simd, wave, 0,
5251                 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
5252 }
5253
5254
5255 static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
5256         .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
5257         .select_se_sh = &gfx_v8_0_select_se_sh,
5258         .read_wave_data = &gfx_v8_0_read_wave_data,
5259         .read_wave_sgprs = &gfx_v8_0_read_wave_sgprs,
5260         .select_me_pipe_q = &gfx_v8_0_select_me_pipe_q
5261 };
5262
5263 static int gfx_v8_0_early_init(void *handle)
5264 {
5265         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5266
5267         adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
5268         adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
5269                                           AMDGPU_MAX_COMPUTE_RINGS);
5270         adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
5271         gfx_v8_0_set_ring_funcs(adev);
5272         gfx_v8_0_set_irq_funcs(adev);
5273         gfx_v8_0_set_gds_init(adev);
5274         gfx_v8_0_set_rlc_funcs(adev);
5275
5276         return 0;
5277 }
5278
5279 static int gfx_v8_0_late_init(void *handle)
5280 {
5281         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5282         int r;
5283
5284         r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
5285         if (r)
5286                 return r;
5287
5288         r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
5289         if (r)
5290                 return r;
5291
5292         /* requires IBs so do in late init after IB pool is initialized */
5293         r = gfx_v8_0_do_edc_gpr_workarounds(adev);
5294         if (r)
5295                 return r;
5296
5297         r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
5298         if (r) {
5299                 DRM_ERROR("amdgpu_irq_get() failed to get IRQ for EDC, r: %d.\n", r);
5300                 return r;
5301         }
5302
5303         r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
5304         if (r) {
5305                 DRM_ERROR(
5306                         "amdgpu_irq_get() failed to get IRQ for SQ, r: %d.\n",
5307                         r);
5308                 return r;
5309         }
5310
5311         return 0;
5312 }
5313
5314 static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
5315                                                        bool enable)
5316 {
5317         if ((adev->asic_type == CHIP_POLARIS11) ||
5318             (adev->asic_type == CHIP_POLARIS12) ||
5319             (adev->asic_type == CHIP_VEGAM))
5320                 /* Send msg to SMU via Powerplay */
5321                 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, enable);
5322
5323         WREG32_FIELD(RLC_PG_CNTL, STATIC_PER_CU_PG_ENABLE, enable ? 1 : 0);
5324 }
5325
5326 static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
5327                                                         bool enable)
5328 {
5329         WREG32_FIELD(RLC_PG_CNTL, DYN_PER_CU_PG_ENABLE, enable ? 1 : 0);
5330 }
5331
5332 static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
5333                 bool enable)
5334 {
5335         WREG32_FIELD(RLC_PG_CNTL, QUICK_PG_ENABLE, enable ? 1 : 0);
5336 }
5337
5338 static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
5339                                           bool enable)
5340 {
5341         WREG32_FIELD(RLC_PG_CNTL, GFX_POWER_GATING_ENABLE, enable ? 1 : 0);
5342 }
5343
5344 static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
5345                                                 bool enable)
5346 {
5347         WREG32_FIELD(RLC_PG_CNTL, GFX_PIPELINE_PG_ENABLE, enable ? 1 : 0);
5348
5349         /* Read any GFX register to wake up GFX. */
5350         if (!enable)
5351                 RREG32(mmDB_RENDER_CONTROL);
5352 }
5353
5354 static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
5355                                           bool enable)
5356 {
5357         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
5358                 cz_enable_gfx_cg_power_gating(adev, true);
5359                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
5360                         cz_enable_gfx_pipeline_power_gating(adev, true);
5361         } else {
5362                 cz_enable_gfx_cg_power_gating(adev, false);
5363                 cz_enable_gfx_pipeline_power_gating(adev, false);
5364         }
5365 }
5366
5367 static int gfx_v8_0_set_powergating_state(void *handle,
5368                                           enum amd_powergating_state state)
5369 {
5370         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5371         bool enable = (state == AMD_PG_STATE_GATE);
5372
5373         if (amdgpu_sriov_vf(adev))
5374                 return 0;
5375
5376         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5377                                 AMD_PG_SUPPORT_RLC_SMU_HS |
5378                                 AMD_PG_SUPPORT_CP |
5379                                 AMD_PG_SUPPORT_GFX_DMG))
5380                 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5381         switch (adev->asic_type) {
5382         case CHIP_CARRIZO:
5383         case CHIP_STONEY:
5384
5385                 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
5386                         cz_enable_sck_slow_down_on_power_up(adev, true);
5387                         cz_enable_sck_slow_down_on_power_down(adev, true);
5388                 } else {
5389                         cz_enable_sck_slow_down_on_power_up(adev, false);
5390                         cz_enable_sck_slow_down_on_power_down(adev, false);
5391                 }
5392                 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
5393                         cz_enable_cp_power_gating(adev, true);
5394                 else
5395                         cz_enable_cp_power_gating(adev, false);
5396
5397                 cz_update_gfx_cg_power_gating(adev, enable);
5398
5399                 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5400                         gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5401                 else
5402                         gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5403
5404                 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5405                         gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5406                 else
5407                         gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5408                 break;
5409         case CHIP_POLARIS11:
5410         case CHIP_POLARIS12:
5411         case CHIP_VEGAM:
5412                 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5413                         gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5414                 else
5415                         gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5416
5417                 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5418                         gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5419                 else
5420                         gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5421
5422                 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
5423                         polaris11_enable_gfx_quick_mg_power_gating(adev, true);
5424                 else
5425                         polaris11_enable_gfx_quick_mg_power_gating(adev, false);
5426                 break;
5427         default:
5428                 break;
5429         }
5430         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
5431                                 AMD_PG_SUPPORT_RLC_SMU_HS |
5432                                 AMD_PG_SUPPORT_CP |
5433                                 AMD_PG_SUPPORT_GFX_DMG))
5434                 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5435         return 0;
5436 }
5437
5438 static void gfx_v8_0_get_clockgating_state(void *handle, u64 *flags)
5439 {
5440         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5441         int data;
5442
5443         if (amdgpu_sriov_vf(adev))
5444                 *flags = 0;
5445
5446         /* AMD_CG_SUPPORT_GFX_MGCG */
5447         data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5448         if (!(data & RLC_CGTT_MGCG_OVERRIDE__CPF_MASK))
5449                 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5450
5451         /* AMD_CG_SUPPORT_GFX_CGLG */
5452         data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5453         if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5454                 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5455
5456         /* AMD_CG_SUPPORT_GFX_CGLS */
5457         if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5458                 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5459
5460         /* AMD_CG_SUPPORT_GFX_CGTS */
5461         data = RREG32(mmCGTS_SM_CTRL_REG);
5462         if (!(data & CGTS_SM_CTRL_REG__OVERRIDE_MASK))
5463                 *flags |= AMD_CG_SUPPORT_GFX_CGTS;
5464
5465         /* AMD_CG_SUPPORT_GFX_CGTS_LS */
5466         if (!(data & CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK))
5467                 *flags |= AMD_CG_SUPPORT_GFX_CGTS_LS;
5468
5469         /* AMD_CG_SUPPORT_GFX_RLC_LS */
5470         data = RREG32(mmRLC_MEM_SLP_CNTL);
5471         if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
5472                 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
5473
5474         /* AMD_CG_SUPPORT_GFX_CP_LS */
5475         data = RREG32(mmCP_MEM_SLP_CNTL);
5476         if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
5477                 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
5478 }
5479
5480 static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
5481                                      uint32_t reg_addr, uint32_t cmd)
5482 {
5483         uint32_t data;
5484
5485         gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
5486
5487         WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5488         WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
5489
5490         data = RREG32(mmRLC_SERDES_WR_CTRL);
5491         if (adev->asic_type == CHIP_STONEY)
5492                 data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5493                           RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5494                           RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5495                           RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5496                           RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5497                           RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5498                           RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5499                           RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5500                           RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5501         else
5502                 data &= ~(RLC_SERDES_WR_CTRL__WRITE_COMMAND_MASK |
5503                           RLC_SERDES_WR_CTRL__READ_COMMAND_MASK |
5504                           RLC_SERDES_WR_CTRL__P1_SELECT_MASK |
5505                           RLC_SERDES_WR_CTRL__P2_SELECT_MASK |
5506                           RLC_SERDES_WR_CTRL__RDDATA_RESET_MASK |
5507                           RLC_SERDES_WR_CTRL__POWER_DOWN_MASK |
5508                           RLC_SERDES_WR_CTRL__POWER_UP_MASK |
5509                           RLC_SERDES_WR_CTRL__SHORT_FORMAT_MASK |
5510                           RLC_SERDES_WR_CTRL__BPM_DATA_MASK |
5511                           RLC_SERDES_WR_CTRL__REG_ADDR_MASK |
5512                           RLC_SERDES_WR_CTRL__SRBM_OVERRIDE_MASK);
5513         data |= (RLC_SERDES_WR_CTRL__RSVD_BPM_ADDR_MASK |
5514                  (cmd << RLC_SERDES_WR_CTRL__BPM_DATA__SHIFT) |
5515                  (reg_addr << RLC_SERDES_WR_CTRL__REG_ADDR__SHIFT) |
5516                  (0xff << RLC_SERDES_WR_CTRL__BPM_ADDR__SHIFT));
5517
5518         WREG32(mmRLC_SERDES_WR_CTRL, data);
5519 }
5520
5521 #define MSG_ENTER_RLC_SAFE_MODE     1
5522 #define MSG_EXIT_RLC_SAFE_MODE      0
5523 #define RLC_GPR_REG2__REQ_MASK 0x00000001
5524 #define RLC_GPR_REG2__REQ__SHIFT 0
5525 #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
5526 #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
5527
5528 static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
5529 {
5530         uint32_t rlc_setting;
5531
5532         rlc_setting = RREG32(mmRLC_CNTL);
5533         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
5534                 return false;
5535
5536         return true;
5537 }
5538
5539 static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
5540 {
5541         uint32_t data;
5542         unsigned i;
5543         data = RREG32(mmRLC_CNTL);
5544         data |= RLC_SAFE_MODE__CMD_MASK;
5545         data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5546         data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
5547         WREG32(mmRLC_SAFE_MODE, data);
5548
5549         /* wait for RLC_SAFE_MODE */
5550         for (i = 0; i < adev->usec_timeout; i++) {
5551                 if ((RREG32(mmRLC_GPM_STAT) &
5552                      (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5553                       RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
5554                     (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
5555                      RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
5556                         break;
5557                 udelay(1);
5558         }
5559         for (i = 0; i < adev->usec_timeout; i++) {
5560                 if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5561                         break;
5562                 udelay(1);
5563         }
5564 }
5565
5566 static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
5567 {
5568         uint32_t data;
5569         unsigned i;
5570
5571         data = RREG32(mmRLC_CNTL);
5572         data |= RLC_SAFE_MODE__CMD_MASK;
5573         data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
5574         WREG32(mmRLC_SAFE_MODE, data);
5575
5576         for (i = 0; i < adev->usec_timeout; i++) {
5577                 if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
5578                         break;
5579                 udelay(1);
5580         }
5581 }
5582
5583 static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
5584 {
5585         u32 data;
5586
5587         amdgpu_gfx_off_ctrl(adev, false);
5588
5589         if (amdgpu_sriov_is_pp_one_vf(adev))
5590                 data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
5591         else
5592                 data = RREG32(mmRLC_SPM_VMID);
5593
5594         data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
5595         data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
5596
5597         if (amdgpu_sriov_is_pp_one_vf(adev))
5598                 WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
5599         else
5600                 WREG32(mmRLC_SPM_VMID, data);
5601
5602         amdgpu_gfx_off_ctrl(adev, true);
5603 }
5604
5605 static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
5606         .is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
5607         .set_safe_mode = gfx_v8_0_set_safe_mode,
5608         .unset_safe_mode = gfx_v8_0_unset_safe_mode,
5609         .init = gfx_v8_0_rlc_init,
5610         .get_csb_size = gfx_v8_0_get_csb_size,
5611         .get_csb_buffer = gfx_v8_0_get_csb_buffer,
5612         .get_cp_table_num = gfx_v8_0_cp_jump_table_num,
5613         .resume = gfx_v8_0_rlc_resume,
5614         .stop = gfx_v8_0_rlc_stop,
5615         .reset = gfx_v8_0_rlc_reset,
5616         .start = gfx_v8_0_rlc_start,
5617         .update_spm_vmid = gfx_v8_0_update_spm_vmid
5618 };
5619
5620 static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
5621                                                       bool enable)
5622 {
5623         uint32_t temp, data;
5624
5625         amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5626
5627         /* It is disabled by HW by default */
5628         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
5629                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5630                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS)
5631                                 /* 1 - RLC memory Light sleep */
5632                                 WREG32_FIELD(RLC_MEM_SLP_CNTL, RLC_MEM_LS_EN, 1);
5633
5634                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS)
5635                                 WREG32_FIELD(CP_MEM_SLP_CNTL, CP_MEM_LS_EN, 1);
5636                 }
5637
5638                 /* 3 - RLC_CGTT_MGCG_OVERRIDE */
5639                 temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5640                 if (adev->flags & AMD_IS_APU)
5641                         data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5642                                   RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5643                                   RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK);
5644                 else
5645                         data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5646                                   RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5647                                   RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5648                                   RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5649
5650                 if (temp != data)
5651                         WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5652
5653                 /* 4 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5654                 gfx_v8_0_wait_for_rlc_serdes(adev);
5655
5656                 /* 5 - clear mgcg override */
5657                 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5658
5659                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
5660                         /* 6 - Enable CGTS(Tree Shade) MGCG /MGLS */
5661                         temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5662                         data &= ~(CGTS_SM_CTRL_REG__SM_MODE_MASK);
5663                         data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
5664                         data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
5665                         data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
5666                         if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
5667                             (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
5668                                 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
5669                         data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
5670                         data |= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT);
5671                         if (temp != data)
5672                                 WREG32(mmCGTS_SM_CTRL_REG, data);
5673                 }
5674                 udelay(50);
5675
5676                 /* 7 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5677                 gfx_v8_0_wait_for_rlc_serdes(adev);
5678         } else {
5679                 /* 1 - MGCG_OVERRIDE[0] for CP and MGCG_OVERRIDE[1] for RLC */
5680                 temp = data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5681                 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_MASK |
5682                                 RLC_CGTT_MGCG_OVERRIDE__RLC_MASK |
5683                                 RLC_CGTT_MGCG_OVERRIDE__MGCG_MASK |
5684                                 RLC_CGTT_MGCG_OVERRIDE__GRBM_MASK);
5685                 if (temp != data)
5686                         WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data);
5687
5688                 /* 2 - disable MGLS in RLC */
5689                 data = RREG32(mmRLC_MEM_SLP_CNTL);
5690                 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
5691                         data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
5692                         WREG32(mmRLC_MEM_SLP_CNTL, data);
5693                 }
5694
5695                 /* 3 - disable MGLS in CP */
5696                 data = RREG32(mmCP_MEM_SLP_CNTL);
5697                 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
5698                         data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
5699                         WREG32(mmCP_MEM_SLP_CNTL, data);
5700                 }
5701
5702                 /* 4 - Disable CGTS(Tree Shade) MGCG and MGLS */
5703                 temp = data = RREG32(mmCGTS_SM_CTRL_REG);
5704                 data |= (CGTS_SM_CTRL_REG__OVERRIDE_MASK |
5705                                 CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK);
5706                 if (temp != data)
5707                         WREG32(mmCGTS_SM_CTRL_REG, data);
5708
5709                 /* 5 - wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5710                 gfx_v8_0_wait_for_rlc_serdes(adev);
5711
5712                 /* 6 - set mgcg override */
5713                 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_MGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5714
5715                 udelay(50);
5716
5717                 /* 7- wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5718                 gfx_v8_0_wait_for_rlc_serdes(adev);
5719         }
5720
5721         amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5722 }
5723
5724 static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5725                                                       bool enable)
5726 {
5727         uint32_t temp, temp1, data, data1;
5728
5729         temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
5730
5731         amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5732
5733         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
5734                 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5735                 data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK;
5736                 if (temp1 != data1)
5737                         WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5738
5739                 /* : wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5740                 gfx_v8_0_wait_for_rlc_serdes(adev);
5741
5742                 /* 2 - clear cgcg override */
5743                 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, CLE_BPM_SERDES_CMD);
5744
5745                 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5746                 gfx_v8_0_wait_for_rlc_serdes(adev);
5747
5748                 /* 3 - write cmd to set CGLS */
5749                 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, SET_BPM_SERDES_CMD);
5750
5751                 /* 4 - enable cgcg */
5752                 data |= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5753
5754                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5755                         /* enable cgls*/
5756                         data |= RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5757
5758                         temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5759                         data1 &= ~RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK;
5760
5761                         if (temp1 != data1)
5762                                 WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5763                 } else {
5764                         data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5765                 }
5766
5767                 if (temp != data)
5768                         WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5769
5770                 /* 5 enable cntx_empty_int_enable/cntx_busy_int_enable/
5771                  * Cmp_busy/GFX_Idle interrupts
5772                  */
5773                 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5774         } else {
5775                 /* disable cntx_empty_int_enable & GFX Idle interrupt */
5776                 gfx_v8_0_enable_gui_idle_interrupt(adev, false);
5777
5778                 /* TEST CGCG */
5779                 temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
5780                 data1 |= (RLC_CGTT_MGCG_OVERRIDE__CGCG_MASK |
5781                                 RLC_CGTT_MGCG_OVERRIDE__CGLS_MASK);
5782                 if (temp1 != data1)
5783                         WREG32(mmRLC_CGTT_MGCG_OVERRIDE, data1);
5784
5785                 /* read gfx register to wake up cgcg */
5786                 RREG32(mmCB_CGTT_SCLK_CTRL);
5787                 RREG32(mmCB_CGTT_SCLK_CTRL);
5788                 RREG32(mmCB_CGTT_SCLK_CTRL);
5789                 RREG32(mmCB_CGTT_SCLK_CTRL);
5790
5791                 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5792                 gfx_v8_0_wait_for_rlc_serdes(adev);
5793
5794                 /* write cmd to Set CGCG Override */
5795                 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGCG_OVERRIDE, SET_BPM_SERDES_CMD);
5796
5797                 /* wait for RLC_SERDES_CU_MASTER & RLC_SERDES_NONCU_MASTER idle */
5798                 gfx_v8_0_wait_for_rlc_serdes(adev);
5799
5800                 /* write cmd to Clear CGLS */
5801                 gfx_v8_0_send_serdes_cmd(adev, BPM_REG_CGLS_EN, CLE_BPM_SERDES_CMD);
5802
5803                 /* disable cgcg, cgls should be disabled too. */
5804                 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
5805                           RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
5806                 if (temp != data)
5807                         WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5808                 /* enable interrupts again for PG */
5809                 gfx_v8_0_enable_gui_idle_interrupt(adev, true);
5810         }
5811
5812         gfx_v8_0_wait_for_rlc_serdes(adev);
5813
5814         amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5815 }
5816 static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5817                                             bool enable)
5818 {
5819         if (enable) {
5820                 /* CGCG/CGLS should be enabled after MGCG/MGLS/TS(CG/LS)
5821                  * ===  MGCG + MGLS + TS(CG/LS) ===
5822                  */
5823                 gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5824                 gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5825         } else {
5826                 /* CGCG/CGLS should be disabled before MGCG/MGLS/TS(CG/LS)
5827                  * ===  CGCG + CGLS ===
5828                  */
5829                 gfx_v8_0_update_coarse_grain_clock_gating(adev, enable);
5830                 gfx_v8_0_update_medium_grain_clock_gating(adev, enable);
5831         }
5832         return 0;
5833 }
5834
5835 static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
5836                                           enum amd_clockgating_state state)
5837 {
5838         uint32_t msg_id, pp_state = 0;
5839         uint32_t pp_support_state = 0;
5840
5841         if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5842                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5843                         pp_support_state = PP_STATE_SUPPORT_LS;
5844                         pp_state = PP_STATE_LS;
5845                 }
5846                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5847                         pp_support_state |= PP_STATE_SUPPORT_CG;
5848                         pp_state |= PP_STATE_CG;
5849                 }
5850                 if (state == AMD_CG_STATE_UNGATE)
5851                         pp_state = 0;
5852
5853                 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5854                                 PP_BLOCK_GFX_CG,
5855                                 pp_support_state,
5856                                 pp_state);
5857                 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5858         }
5859
5860         if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
5861                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5862                         pp_support_state = PP_STATE_SUPPORT_LS;
5863                         pp_state = PP_STATE_LS;
5864                 }
5865
5866                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5867                         pp_support_state |= PP_STATE_SUPPORT_CG;
5868                         pp_state |= PP_STATE_CG;
5869                 }
5870
5871                 if (state == AMD_CG_STATE_UNGATE)
5872                         pp_state = 0;
5873
5874                 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5875                                 PP_BLOCK_GFX_MG,
5876                                 pp_support_state,
5877                                 pp_state);
5878                 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5879         }
5880
5881         return 0;
5882 }
5883
5884 static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
5885                                           enum amd_clockgating_state state)
5886 {
5887
5888         uint32_t msg_id, pp_state = 0;
5889         uint32_t pp_support_state = 0;
5890
5891         if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
5892                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5893                         pp_support_state = PP_STATE_SUPPORT_LS;
5894                         pp_state = PP_STATE_LS;
5895                 }
5896                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5897                         pp_support_state |= PP_STATE_SUPPORT_CG;
5898                         pp_state |= PP_STATE_CG;
5899                 }
5900                 if (state == AMD_CG_STATE_UNGATE)
5901                         pp_state = 0;
5902
5903                 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5904                                 PP_BLOCK_GFX_CG,
5905                                 pp_support_state,
5906                                 pp_state);
5907                 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5908         }
5909
5910         if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
5911                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
5912                         pp_support_state = PP_STATE_SUPPORT_LS;
5913                         pp_state = PP_STATE_LS;
5914                 }
5915                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
5916                         pp_support_state |= PP_STATE_SUPPORT_CG;
5917                         pp_state |= PP_STATE_CG;
5918                 }
5919                 if (state == AMD_CG_STATE_UNGATE)
5920                         pp_state = 0;
5921
5922                 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5923                                 PP_BLOCK_GFX_3D,
5924                                 pp_support_state,
5925                                 pp_state);
5926                 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5927         }
5928
5929         if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
5930                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
5931                         pp_support_state = PP_STATE_SUPPORT_LS;
5932                         pp_state = PP_STATE_LS;
5933                 }
5934
5935                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5936                         pp_support_state |= PP_STATE_SUPPORT_CG;
5937                         pp_state |= PP_STATE_CG;
5938                 }
5939
5940                 if (state == AMD_CG_STATE_UNGATE)
5941                         pp_state = 0;
5942
5943                 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5944                                 PP_BLOCK_GFX_MG,
5945                                 pp_support_state,
5946                                 pp_state);
5947                 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5948         }
5949
5950         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
5951                 pp_support_state = PP_STATE_SUPPORT_LS;
5952
5953                 if (state == AMD_CG_STATE_UNGATE)
5954                         pp_state = 0;
5955                 else
5956                         pp_state = PP_STATE_LS;
5957
5958                 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5959                                 PP_BLOCK_GFX_RLC,
5960                                 pp_support_state,
5961                                 pp_state);
5962                 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5963         }
5964
5965         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
5966                 pp_support_state = PP_STATE_SUPPORT_LS;
5967
5968                 if (state == AMD_CG_STATE_UNGATE)
5969                         pp_state = 0;
5970                 else
5971                         pp_state = PP_STATE_LS;
5972                 msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
5973                         PP_BLOCK_GFX_CP,
5974                         pp_support_state,
5975                         pp_state);
5976                 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
5977         }
5978
5979         return 0;
5980 }
5981
5982 static int gfx_v8_0_set_clockgating_state(void *handle,
5983                                           enum amd_clockgating_state state)
5984 {
5985         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5986
5987         if (amdgpu_sriov_vf(adev))
5988                 return 0;
5989
5990         switch (adev->asic_type) {
5991         case CHIP_FIJI:
5992         case CHIP_CARRIZO:
5993         case CHIP_STONEY:
5994                 gfx_v8_0_update_gfx_clock_gating(adev,
5995                                                  state == AMD_CG_STATE_GATE);
5996                 break;
5997         case CHIP_TONGA:
5998                 gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
5999                 break;
6000         case CHIP_POLARIS10:
6001         case CHIP_POLARIS11:
6002         case CHIP_POLARIS12:
6003         case CHIP_VEGAM:
6004                 gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
6005                 break;
6006         default:
6007                 break;
6008         }
6009         return 0;
6010 }
6011
6012 static u64 gfx_v8_0_ring_get_rptr(struct amdgpu_ring *ring)
6013 {
6014         return *ring->rptr_cpu_addr;
6015 }
6016
6017 static u64 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
6018 {
6019         struct amdgpu_device *adev = ring->adev;
6020
6021         if (ring->use_doorbell)
6022                 /* XXX check if swapping is necessary on BE */
6023                 return *ring->wptr_cpu_addr;
6024         else
6025                 return RREG32(mmCP_RB0_WPTR);
6026 }
6027
6028 static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
6029 {
6030         struct amdgpu_device *adev = ring->adev;
6031
6032         if (ring->use_doorbell) {
6033                 /* XXX check if swapping is necessary on BE */
6034                 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
6035                 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6036         } else {
6037                 WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
6038                 (void)RREG32(mmCP_RB0_WPTR);
6039         }
6040 }
6041
6042 static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
6043 {
6044         u32 ref_and_mask, reg_mem_engine;
6045
6046         if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) ||
6047             (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) {
6048                 switch (ring->me) {
6049                 case 1:
6050                         ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe;
6051                         break;
6052                 case 2:
6053                         ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe;
6054                         break;
6055                 default:
6056                         return;
6057                 }
6058                 reg_mem_engine = 0;
6059         } else {
6060                 ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK;
6061                 reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */
6062         }
6063
6064         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6065         amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
6066                                  WAIT_REG_MEM_FUNCTION(3) |  /* == */
6067                                  reg_mem_engine));
6068         amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ);
6069         amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE);
6070         amdgpu_ring_write(ring, ref_and_mask);
6071         amdgpu_ring_write(ring, ref_and_mask);
6072         amdgpu_ring_write(ring, 0x20); /* poll interval */
6073 }
6074
6075 static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
6076 {
6077         amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6078         amdgpu_ring_write(ring, EVENT_TYPE(VS_PARTIAL_FLUSH) |
6079                 EVENT_INDEX(4));
6080
6081         amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
6082         amdgpu_ring_write(ring, EVENT_TYPE(VGT_FLUSH) |
6083                 EVENT_INDEX(0));
6084 }
6085
6086 static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
6087                                         struct amdgpu_job *job,
6088                                         struct amdgpu_ib *ib,
6089                                         uint32_t flags)
6090 {
6091         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6092         u32 header, control = 0;
6093
6094         if (ib->flags & AMDGPU_IB_FLAG_CE)
6095                 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
6096         else
6097                 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
6098
6099         control |= ib->length_dw | (vmid << 24);
6100
6101         if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
6102                 control |= INDIRECT_BUFFER_PRE_ENB(1);
6103
6104                 if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
6105                         gfx_v8_0_ring_emit_de_meta(ring);
6106         }
6107
6108         amdgpu_ring_write(ring, header);
6109         amdgpu_ring_write(ring,
6110 #ifdef __BIG_ENDIAN
6111                           (2 << 0) |
6112 #endif
6113                           (ib->gpu_addr & 0xFFFFFFFC));
6114         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6115         amdgpu_ring_write(ring, control);
6116 }
6117
6118 static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
6119                                           struct amdgpu_job *job,
6120                                           struct amdgpu_ib *ib,
6121                                           uint32_t flags)
6122 {
6123         unsigned vmid = AMDGPU_JOB_GET_VMID(job);
6124         u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
6125
6126         /* Currently, there is a high possibility to get wave ID mismatch
6127          * between ME and GDS, leading to a hw deadlock, because ME generates
6128          * different wave IDs than the GDS expects. This situation happens
6129          * randomly when at least 5 compute pipes use GDS ordered append.
6130          * The wave IDs generated by ME are also wrong after suspend/resume.
6131          * Those are probably bugs somewhere else in the kernel driver.
6132          *
6133          * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
6134          * GDS to 0 for this ring (me/pipe).
6135          */
6136         if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
6137                 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
6138                 amdgpu_ring_write(ring, mmGDS_COMPUTE_MAX_WAVE_ID - PACKET3_SET_CONFIG_REG_START);
6139                 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
6140         }
6141
6142         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
6143         amdgpu_ring_write(ring,
6144 #ifdef __BIG_ENDIAN
6145                                 (2 << 0) |
6146 #endif
6147                                 (ib->gpu_addr & 0xFFFFFFFC));
6148         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
6149         amdgpu_ring_write(ring, control);
6150 }
6151
6152 static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
6153                                          u64 seq, unsigned flags)
6154 {
6155         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6156         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6157
6158         /* Workaround for cache flush problems. First send a dummy EOP
6159          * event down the pipe with seq one below.
6160          */
6161         amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6162         amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6163                                  EOP_TC_ACTION_EN |
6164                                  EOP_TC_WB_ACTION_EN |
6165                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6166                                  EVENT_INDEX(5)));
6167         amdgpu_ring_write(ring, addr & 0xfffffffc);
6168         amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6169                                 DATA_SEL(1) | INT_SEL(0));
6170         amdgpu_ring_write(ring, lower_32_bits(seq - 1));
6171         amdgpu_ring_write(ring, upper_32_bits(seq - 1));
6172
6173         /* Then send the real EOP event down the pipe:
6174          * EVENT_WRITE_EOP - flush caches, send int */
6175         amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
6176         amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6177                                  EOP_TC_ACTION_EN |
6178                                  EOP_TC_WB_ACTION_EN |
6179                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6180                                  EVENT_INDEX(5)));
6181         amdgpu_ring_write(ring, addr & 0xfffffffc);
6182         amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
6183                           DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6184         amdgpu_ring_write(ring, lower_32_bits(seq));
6185         amdgpu_ring_write(ring, upper_32_bits(seq));
6186
6187 }
6188
6189 static void gfx_v8_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
6190 {
6191         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6192         uint32_t seq = ring->fence_drv.sync_seq;
6193         uint64_t addr = ring->fence_drv.gpu_addr;
6194
6195         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6196         amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
6197                                  WAIT_REG_MEM_FUNCTION(3) | /* equal */
6198                                  WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
6199         amdgpu_ring_write(ring, addr & 0xfffffffc);
6200         amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
6201         amdgpu_ring_write(ring, seq);
6202         amdgpu_ring_write(ring, 0xffffffff);
6203         amdgpu_ring_write(ring, 4); /* poll interval */
6204 }
6205
6206 static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
6207                                         unsigned vmid, uint64_t pd_addr)
6208 {
6209         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
6210
6211         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
6212
6213         /* wait for the invalidate to complete */
6214         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
6215         amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
6216                                  WAIT_REG_MEM_FUNCTION(0) |  /* always */
6217                                  WAIT_REG_MEM_ENGINE(0))); /* me */
6218         amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
6219         amdgpu_ring_write(ring, 0);
6220         amdgpu_ring_write(ring, 0); /* ref */
6221         amdgpu_ring_write(ring, 0); /* mask */
6222         amdgpu_ring_write(ring, 0x20); /* poll interval */
6223
6224         /* compute doesn't have PFP */
6225         if (usepfp) {
6226                 /* sync PFP to ME, otherwise we might get invalid PFP reads */
6227                 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
6228                 amdgpu_ring_write(ring, 0x0);
6229         }
6230 }
6231
6232 static u64 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
6233 {
6234         return *ring->wptr_cpu_addr;
6235 }
6236
6237 static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
6238 {
6239         struct amdgpu_device *adev = ring->adev;
6240
6241         /* XXX check if swapping is necessary on BE */
6242         *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
6243         WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
6244 }
6245
6246 static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
6247                                              u64 addr, u64 seq,
6248                                              unsigned flags)
6249 {
6250         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
6251         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
6252
6253         /* RELEASE_MEM - flush caches, send int */
6254         amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
6255         amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
6256                                  EOP_TC_ACTION_EN |
6257                                  EOP_TC_WB_ACTION_EN |
6258                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
6259                                  EVENT_INDEX(5)));
6260         amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
6261         amdgpu_ring_write(ring, addr & 0xfffffffc);
6262         amdgpu_ring_write(ring, upper_32_bits(addr));
6263         amdgpu_ring_write(ring, lower_32_bits(seq));
6264         amdgpu_ring_write(ring, upper_32_bits(seq));
6265 }
6266
6267 static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
6268                                          u64 seq, unsigned int flags)
6269 {
6270         /* we only allocate 32bit for each seq wb address */
6271         BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
6272
6273         /* write fence seq to the "addr" */
6274         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6275         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6276                                  WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
6277         amdgpu_ring_write(ring, lower_32_bits(addr));
6278         amdgpu_ring_write(ring, upper_32_bits(addr));
6279         amdgpu_ring_write(ring, lower_32_bits(seq));
6280
6281         if (flags & AMDGPU_FENCE_FLAG_INT) {
6282                 /* set register to trigger INT */
6283                 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6284                 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
6285                                          WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
6286                 amdgpu_ring_write(ring, mmCPC_INT_STATUS);
6287                 amdgpu_ring_write(ring, 0);
6288                 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
6289         }
6290 }
6291
6292 static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring)
6293 {
6294         amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
6295         amdgpu_ring_write(ring, 0);
6296 }
6297
6298 static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
6299 {
6300         uint32_t dw2 = 0;
6301
6302         if (amdgpu_sriov_vf(ring->adev))
6303                 gfx_v8_0_ring_emit_ce_meta(ring);
6304
6305         dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
6306         if (flags & AMDGPU_HAVE_CTX_SWITCH) {
6307                 gfx_v8_0_ring_emit_vgt_flush(ring);
6308                 /* set load_global_config & load_global_uconfig */
6309                 dw2 |= 0x8001;
6310                 /* set load_cs_sh_regs */
6311                 dw2 |= 0x01000000;
6312                 /* set load_per_context_state & load_gfx_sh_regs for GFX */
6313                 dw2 |= 0x10002;
6314
6315                 /* set load_ce_ram if preamble presented */
6316                 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
6317                         dw2 |= 0x10000000;
6318         } else {
6319                 /* still load_ce_ram if this is the first time preamble presented
6320                  * although there is no context switch happens.
6321                  */
6322                 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
6323                         dw2 |= 0x10000000;
6324         }
6325
6326         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
6327         amdgpu_ring_write(ring, dw2);
6328         amdgpu_ring_write(ring, 0);
6329 }
6330
6331 static unsigned gfx_v8_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
6332 {
6333         unsigned ret;
6334
6335         amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
6336         amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
6337         amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
6338         amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
6339         ret = ring->wptr & ring->buf_mask;
6340         amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
6341         return ret;
6342 }
6343
6344 static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
6345 {
6346         unsigned cur;
6347
6348         BUG_ON(offset > ring->buf_mask);
6349         BUG_ON(ring->ring[offset] != 0x55aa55aa);
6350
6351         cur = (ring->wptr & ring->buf_mask) - 1;
6352         if (likely(cur > offset))
6353                 ring->ring[offset] = cur - offset;
6354         else
6355                 ring->ring[offset] = (ring->ring_size >> 2) - offset + cur;
6356 }
6357
6358 static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
6359                                     uint32_t reg_val_offs)
6360 {
6361         struct amdgpu_device *adev = ring->adev;
6362
6363         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
6364         amdgpu_ring_write(ring, 0 |     /* src: register*/
6365                                 (5 << 8) |      /* dst: memory */
6366                                 (1 << 20));     /* write confirm */
6367         amdgpu_ring_write(ring, reg);
6368         amdgpu_ring_write(ring, 0);
6369         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
6370                                 reg_val_offs * 4));
6371         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
6372                                 reg_val_offs * 4));
6373 }
6374
6375 static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
6376                                   uint32_t val)
6377 {
6378         uint32_t cmd;
6379
6380         switch (ring->funcs->type) {
6381         case AMDGPU_RING_TYPE_GFX:
6382                 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
6383                 break;
6384         case AMDGPU_RING_TYPE_KIQ:
6385                 cmd = 1 << 16; /* no inc addr */
6386                 break;
6387         default:
6388                 cmd = WR_CONFIRM;
6389                 break;
6390         }
6391
6392         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
6393         amdgpu_ring_write(ring, cmd);
6394         amdgpu_ring_write(ring, reg);
6395         amdgpu_ring_write(ring, 0);
6396         amdgpu_ring_write(ring, val);
6397 }
6398
6399 static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid)
6400 {
6401         struct amdgpu_device *adev = ring->adev;
6402         uint32_t value = 0;
6403
6404         value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
6405         value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
6406         value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
6407         value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
6408         WREG32(mmSQ_CMD, value);
6409 }
6410
6411 static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
6412                                                  enum amdgpu_interrupt_state state)
6413 {
6414         WREG32_FIELD(CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE,
6415                      state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6416 }
6417
6418 static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
6419                                                      int me, int pipe,
6420                                                      enum amdgpu_interrupt_state state)
6421 {
6422         u32 mec_int_cntl, mec_int_cntl_reg;
6423
6424         /*
6425          * amdgpu controls only the first MEC. That's why this function only
6426          * handles the setting of interrupts for this specific MEC. All other
6427          * pipes' interrupts are set by amdkfd.
6428          */
6429
6430         if (me == 1) {
6431                 switch (pipe) {
6432                 case 0:
6433                         mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL;
6434                         break;
6435                 case 1:
6436                         mec_int_cntl_reg = mmCP_ME1_PIPE1_INT_CNTL;
6437                         break;
6438                 case 2:
6439                         mec_int_cntl_reg = mmCP_ME1_PIPE2_INT_CNTL;
6440                         break;
6441                 case 3:
6442                         mec_int_cntl_reg = mmCP_ME1_PIPE3_INT_CNTL;
6443                         break;
6444                 default:
6445                         DRM_DEBUG("invalid pipe %d\n", pipe);
6446                         return;
6447                 }
6448         } else {
6449                 DRM_DEBUG("invalid me %d\n", me);
6450                 return;
6451         }
6452
6453         switch (state) {
6454         case AMDGPU_IRQ_STATE_DISABLE:
6455                 mec_int_cntl = RREG32(mec_int_cntl_reg);
6456                 mec_int_cntl &= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
6457                 WREG32(mec_int_cntl_reg, mec_int_cntl);
6458                 break;
6459         case AMDGPU_IRQ_STATE_ENABLE:
6460                 mec_int_cntl = RREG32(mec_int_cntl_reg);
6461                 mec_int_cntl |= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK;
6462                 WREG32(mec_int_cntl_reg, mec_int_cntl);
6463                 break;
6464         default:
6465                 break;
6466         }
6467 }
6468
6469 static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6470                                              struct amdgpu_irq_src *source,
6471                                              unsigned type,
6472                                              enum amdgpu_interrupt_state state)
6473 {
6474         WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_REG_INT_ENABLE,
6475                      state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6476
6477         return 0;
6478 }
6479
6480 static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6481                                               struct amdgpu_irq_src *source,
6482                                               unsigned type,
6483                                               enum amdgpu_interrupt_state state)
6484 {
6485         WREG32_FIELD(CP_INT_CNTL_RING0, PRIV_INSTR_INT_ENABLE,
6486                      state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
6487
6488         return 0;
6489 }
6490
6491 static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6492                                             struct amdgpu_irq_src *src,
6493                                             unsigned type,
6494                                             enum amdgpu_interrupt_state state)
6495 {
6496         switch (type) {
6497         case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6498                 gfx_v8_0_set_gfx_eop_interrupt_state(adev, state);
6499                 break;
6500         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6501                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6502                 break;
6503         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6504                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6505                 break;
6506         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6507                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6508                 break;
6509         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6510                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6511                 break;
6512         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
6513                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
6514                 break;
6515         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
6516                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
6517                 break;
6518         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
6519                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
6520                 break;
6521         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
6522                 gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
6523                 break;
6524         default:
6525                 break;
6526         }
6527         return 0;
6528 }
6529
6530 static int gfx_v8_0_set_cp_ecc_int_state(struct amdgpu_device *adev,
6531                                          struct amdgpu_irq_src *source,
6532                                          unsigned int type,
6533                                          enum amdgpu_interrupt_state state)
6534 {
6535         int enable_flag;
6536
6537         switch (state) {
6538         case AMDGPU_IRQ_STATE_DISABLE:
6539                 enable_flag = 0;
6540                 break;
6541
6542         case AMDGPU_IRQ_STATE_ENABLE:
6543                 enable_flag = 1;
6544                 break;
6545
6546         default:
6547                 return -EINVAL;
6548         }
6549
6550         WREG32_FIELD(CP_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6551         WREG32_FIELD(CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6552         WREG32_FIELD(CP_INT_CNTL_RING1, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6553         WREG32_FIELD(CP_INT_CNTL_RING2, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6554         WREG32_FIELD(CPC_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, enable_flag);
6555         WREG32_FIELD(CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6556                      enable_flag);
6557         WREG32_FIELD(CP_ME1_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6558                      enable_flag);
6559         WREG32_FIELD(CP_ME1_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6560                      enable_flag);
6561         WREG32_FIELD(CP_ME1_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6562                      enable_flag);
6563         WREG32_FIELD(CP_ME2_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6564                      enable_flag);
6565         WREG32_FIELD(CP_ME2_PIPE1_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6566                      enable_flag);
6567         WREG32_FIELD(CP_ME2_PIPE2_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6568                      enable_flag);
6569         WREG32_FIELD(CP_ME2_PIPE3_INT_CNTL, CP_ECC_ERROR_INT_ENABLE,
6570                      enable_flag);
6571
6572         return 0;
6573 }
6574
6575 static int gfx_v8_0_set_sq_int_state(struct amdgpu_device *adev,
6576                                      struct amdgpu_irq_src *source,
6577                                      unsigned int type,
6578                                      enum amdgpu_interrupt_state state)
6579 {
6580         int enable_flag;
6581
6582         switch (state) {
6583         case AMDGPU_IRQ_STATE_DISABLE:
6584                 enable_flag = 1;
6585                 break;
6586
6587         case AMDGPU_IRQ_STATE_ENABLE:
6588                 enable_flag = 0;
6589                 break;
6590
6591         default:
6592                 return -EINVAL;
6593         }
6594
6595         WREG32_FIELD(SQ_INTERRUPT_MSG_CTRL, STALL,
6596                      enable_flag);
6597
6598         return 0;
6599 }
6600
6601 static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
6602                             struct amdgpu_irq_src *source,
6603                             struct amdgpu_iv_entry *entry)
6604 {
6605         int i;
6606         u8 me_id, pipe_id, queue_id;
6607         struct amdgpu_ring *ring;
6608
6609         DRM_DEBUG("IH: CP EOP\n");
6610         me_id = (entry->ring_id & 0x0c) >> 2;
6611         pipe_id = (entry->ring_id & 0x03) >> 0;
6612         queue_id = (entry->ring_id & 0x70) >> 4;
6613
6614         switch (me_id) {
6615         case 0:
6616                 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6617                 break;
6618         case 1:
6619         case 2:
6620                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6621                         ring = &adev->gfx.compute_ring[i];
6622                         /* Per-queue interrupt is supported for MEC starting from VI.
6623                           * The interrupt can only be enabled/disabled per pipe instead of per queue.
6624                           */
6625                         if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
6626                                 amdgpu_fence_process(ring);
6627                 }
6628                 break;
6629         }
6630         return 0;
6631 }
6632
6633 static void gfx_v8_0_fault(struct amdgpu_device *adev,
6634                            struct amdgpu_iv_entry *entry)
6635 {
6636         u8 me_id, pipe_id, queue_id;
6637         struct amdgpu_ring *ring;
6638         int i;
6639
6640         me_id = (entry->ring_id & 0x0c) >> 2;
6641         pipe_id = (entry->ring_id & 0x03) >> 0;
6642         queue_id = (entry->ring_id & 0x70) >> 4;
6643
6644         switch (me_id) {
6645         case 0:
6646                 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
6647                 break;
6648         case 1:
6649         case 2:
6650                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6651                         ring = &adev->gfx.compute_ring[i];
6652                         if (ring->me == me_id && ring->pipe == pipe_id &&
6653                             ring->queue == queue_id)
6654                                 drm_sched_fault(&ring->sched);
6655                 }
6656                 break;
6657         }
6658 }
6659
6660 static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
6661                                  struct amdgpu_irq_src *source,
6662                                  struct amdgpu_iv_entry *entry)
6663 {
6664         DRM_ERROR("Illegal register access in command stream\n");
6665         gfx_v8_0_fault(adev, entry);
6666         return 0;
6667 }
6668
6669 static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
6670                                   struct amdgpu_irq_src *source,
6671                                   struct amdgpu_iv_entry *entry)
6672 {
6673         DRM_ERROR("Illegal instruction in command stream\n");
6674         gfx_v8_0_fault(adev, entry);
6675         return 0;
6676 }
6677
6678 static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
6679                                      struct amdgpu_irq_src *source,
6680                                      struct amdgpu_iv_entry *entry)
6681 {
6682         DRM_ERROR("CP EDC/ECC error detected.");
6683         return 0;
6684 }
6685
6686 static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data,
6687                                   bool from_wq)
6688 {
6689         u32 enc, se_id, sh_id, cu_id;
6690         char type[20];
6691         int sq_edc_source = -1;
6692
6693         enc = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, ENCODING);
6694         se_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_CMN, SE_ID);
6695
6696         switch (enc) {
6697                 case 0:
6698                         DRM_INFO("SQ general purpose intr detected:"
6699                                         "se_id %d, immed_overflow %d, host_reg_overflow %d,"
6700                                         "host_cmd_overflow %d, cmd_timestamp %d,"
6701                                         "reg_timestamp %d, thread_trace_buff_full %d,"
6702                                         "wlt %d, thread_trace %d.\n",
6703                                         se_id,
6704                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, IMMED_OVERFLOW),
6705                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_REG_OVERFLOW),
6706                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, HOST_CMD_OVERFLOW),
6707                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, CMD_TIMESTAMP),
6708                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, REG_TIMESTAMP),
6709                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE_BUF_FULL),
6710                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, WLT),
6711                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_AUTO, THREAD_TRACE)
6712                                         );
6713                         break;
6714                 case 1:
6715                 case 2:
6716
6717                         cu_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, CU_ID);
6718                         sh_id = REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SH_ID);
6719
6720                         /*
6721                          * This function can be called either directly from ISR
6722                          * or from BH in which case we can access SQ_EDC_INFO
6723                          * instance
6724                          */
6725                         if (from_wq) {
6726                                 mutex_lock(&adev->grbm_idx_mutex);
6727                                 gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id, 0);
6728
6729                                 sq_edc_source = REG_GET_FIELD(RREG32(mmSQ_EDC_INFO), SQ_EDC_INFO, SOURCE);
6730
6731                                 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
6732                                 mutex_unlock(&adev->grbm_idx_mutex);
6733                         }
6734
6735                         if (enc == 1)
6736                                 sprintf(type, "instruction intr");
6737                         else
6738                                 sprintf(type, "EDC/ECC error");
6739
6740                         DRM_INFO(
6741                                 "SQ %s detected: "
6742                                         "se_id %d, sh_id %d, cu_id %d, simd_id %d, wave_id %d, vm_id %d "
6743                                         "trap %s, sq_ed_info.source %s.\n",
6744                                         type, se_id, sh_id, cu_id,
6745                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, SIMD_ID),
6746                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, WAVE_ID),
6747                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, VM_ID),
6748                                         REG_GET_FIELD(ih_data, SQ_INTERRUPT_WORD_WAVE, PRIV) ? "true" : "false",
6749                                         (sq_edc_source != -1) ? sq_edc_source_names[sq_edc_source] : "unavailable"
6750                                 );
6751                         break;
6752                 default:
6753                         DRM_ERROR("SQ invalid encoding type\n.");
6754         }
6755 }
6756
6757 static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
6758 {
6759
6760         struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
6761         struct sq_work *sq_work = container_of(work, struct sq_work, work);
6762
6763         gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data, true);
6764 }
6765
6766 static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
6767                            struct amdgpu_irq_src *source,
6768                            struct amdgpu_iv_entry *entry)
6769 {
6770         unsigned ih_data = entry->src_data[0];
6771
6772         /*
6773          * Try to submit work so SQ_EDC_INFO can be accessed from
6774          * BH. If previous work submission hasn't finished yet
6775          * just print whatever info is possible directly from the ISR.
6776          */
6777         if (work_pending(&adev->gfx.sq_work.work)) {
6778                 gfx_v8_0_parse_sq_irq(adev, ih_data, false);
6779         } else {
6780                 adev->gfx.sq_work.ih_data = ih_data;
6781                 schedule_work(&adev->gfx.sq_work.work);
6782         }
6783
6784         return 0;
6785 }
6786
6787 static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring)
6788 {
6789         amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
6790         amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
6791                           PACKET3_TC_ACTION_ENA |
6792                           PACKET3_SH_KCACHE_ACTION_ENA |
6793                           PACKET3_SH_ICACHE_ACTION_ENA |
6794                           PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
6795         amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
6796         amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE */
6797         amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
6798 }
6799
6800 static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
6801 {
6802         amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
6803         amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
6804                           PACKET3_TC_ACTION_ENA |
6805                           PACKET3_SH_KCACHE_ACTION_ENA |
6806                           PACKET3_SH_ICACHE_ACTION_ENA |
6807                           PACKET3_TC_WB_ACTION_ENA);  /* CP_COHER_CNTL */
6808         amdgpu_ring_write(ring, 0xffffffff);    /* CP_COHER_SIZE */
6809         amdgpu_ring_write(ring, 0xff);          /* CP_COHER_SIZE_HI */
6810         amdgpu_ring_write(ring, 0);             /* CP_COHER_BASE */
6811         amdgpu_ring_write(ring, 0);             /* CP_COHER_BASE_HI */
6812         amdgpu_ring_write(ring, 0x0000000A);    /* poll interval */
6813 }
6814
6815
6816 /* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
6817 #define mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT       0x0000007f
6818 static void gfx_v8_0_emit_wave_limit_cs(struct amdgpu_ring *ring,
6819                                         uint32_t pipe, bool enable)
6820 {
6821         uint32_t val;
6822         uint32_t wcl_cs_reg;
6823
6824         val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT;
6825
6826         switch (pipe) {
6827         case 0:
6828                 wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS0;
6829                 break;
6830         case 1:
6831                 wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS1;
6832                 break;
6833         case 2:
6834                 wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS2;
6835                 break;
6836         case 3:
6837                 wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS3;
6838                 break;
6839         default:
6840                 DRM_DEBUG("invalid pipe %d\n", pipe);
6841                 return;
6842         }
6843
6844         amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
6845
6846 }
6847
6848 #define mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT      0x07ffffff
6849 static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
6850 {
6851         struct amdgpu_device *adev = ring->adev;
6852         uint32_t val;
6853         int i;
6854
6855         /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
6856          * number of gfx waves. Setting 5 bit will make sure gfx only gets
6857          * around 25% of gpu resources.
6858          */
6859         val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT;
6860         amdgpu_ring_emit_wreg(ring, mmSPI_WCL_PIPE_PERCENT_GFX, val);
6861
6862         /* Restrict waves for normal/low priority compute queues as well
6863          * to get best QoS for high priority compute jobs.
6864          *
6865          * amdgpu controls only 1st ME(0-3 CS pipes).
6866          */
6867         for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
6868                 if (i != ring->pipe)
6869                         gfx_v8_0_emit_wave_limit_cs(ring, i, enable);
6870
6871         }
6872
6873 }
6874
6875 static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
6876         .name = "gfx_v8_0",
6877         .early_init = gfx_v8_0_early_init,
6878         .late_init = gfx_v8_0_late_init,
6879         .sw_init = gfx_v8_0_sw_init,
6880         .sw_fini = gfx_v8_0_sw_fini,
6881         .hw_init = gfx_v8_0_hw_init,
6882         .hw_fini = gfx_v8_0_hw_fini,
6883         .suspend = gfx_v8_0_suspend,
6884         .resume = gfx_v8_0_resume,
6885         .is_idle = gfx_v8_0_is_idle,
6886         .wait_for_idle = gfx_v8_0_wait_for_idle,
6887         .check_soft_reset = gfx_v8_0_check_soft_reset,
6888         .pre_soft_reset = gfx_v8_0_pre_soft_reset,
6889         .soft_reset = gfx_v8_0_soft_reset,
6890         .post_soft_reset = gfx_v8_0_post_soft_reset,
6891         .set_clockgating_state = gfx_v8_0_set_clockgating_state,
6892         .set_powergating_state = gfx_v8_0_set_powergating_state,
6893         .get_clockgating_state = gfx_v8_0_get_clockgating_state,
6894 };
6895
6896 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
6897         .type = AMDGPU_RING_TYPE_GFX,
6898         .align_mask = 0xff,
6899         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6900         .support_64bit_ptrs = false,
6901         .get_rptr = gfx_v8_0_ring_get_rptr,
6902         .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
6903         .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
6904         .emit_frame_size = /* maximum 215dw if count 16 IBs in */
6905                 5 +  /* COND_EXEC */
6906                 7 +  /* PIPELINE_SYNC */
6907                 VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
6908                 12 +  /* FENCE for VM_FLUSH */
6909                 20 + /* GDS switch */
6910                 4 + /* double SWITCH_BUFFER,
6911                        the first COND_EXEC jump to the place just
6912                            prior to this double SWITCH_BUFFER  */
6913                 5 + /* COND_EXEC */
6914                 7 +      /*     HDP_flush */
6915                 4 +      /*     VGT_flush */
6916                 14 + /* CE_META */
6917                 31 + /* DE_META */
6918                 3 + /* CNTX_CTRL */
6919                 5 + /* HDP_INVL */
6920                 12 + 12 + /* FENCE x2 */
6921                 2 + /* SWITCH_BUFFER */
6922                 5, /* SURFACE_SYNC */
6923         .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
6924         .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
6925         .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
6926         .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
6927         .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6928         .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6929         .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
6930         .test_ring = gfx_v8_0_ring_test_ring,
6931         .test_ib = gfx_v8_0_ring_test_ib,
6932         .insert_nop = amdgpu_ring_insert_nop,
6933         .pad_ib = amdgpu_ring_generic_pad_ib,
6934         .emit_switch_buffer = gfx_v8_ring_emit_sb,
6935         .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
6936         .init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec,
6937         .patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
6938         .emit_wreg = gfx_v8_0_ring_emit_wreg,
6939         .soft_recovery = gfx_v8_0_ring_soft_recovery,
6940         .emit_mem_sync = gfx_v8_0_emit_mem_sync,
6941 };
6942
6943 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
6944         .type = AMDGPU_RING_TYPE_COMPUTE,
6945         .align_mask = 0xff,
6946         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6947         .support_64bit_ptrs = false,
6948         .get_rptr = gfx_v8_0_ring_get_rptr,
6949         .get_wptr = gfx_v8_0_ring_get_wptr_compute,
6950         .set_wptr = gfx_v8_0_ring_set_wptr_compute,
6951         .emit_frame_size =
6952                 20 + /* gfx_v8_0_ring_emit_gds_switch */
6953                 7 + /* gfx_v8_0_ring_emit_hdp_flush */
6954                 5 + /* hdp_invalidate */
6955                 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6956                 VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
6957                 7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
6958                 7 + /* gfx_v8_0_emit_mem_sync_compute */
6959                 5 + /* gfx_v8_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
6960                 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */
6961         .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
6962         .emit_ib = gfx_v8_0_ring_emit_ib_compute,
6963         .emit_fence = gfx_v8_0_ring_emit_fence_compute,
6964         .emit_pipeline_sync = gfx_v8_0_ring_emit_pipeline_sync,
6965         .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
6966         .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
6967         .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
6968         .test_ring = gfx_v8_0_ring_test_ring,
6969         .test_ib = gfx_v8_0_ring_test_ib,
6970         .insert_nop = amdgpu_ring_insert_nop,
6971         .pad_ib = amdgpu_ring_generic_pad_ib,
6972         .emit_wreg = gfx_v8_0_ring_emit_wreg,
6973         .emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
6974         .emit_wave_limit = gfx_v8_0_emit_wave_limit,
6975 };
6976
6977 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
6978         .type = AMDGPU_RING_TYPE_KIQ,
6979         .align_mask = 0xff,
6980         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6981         .support_64bit_ptrs = false,
6982         .get_rptr = gfx_v8_0_ring_get_rptr,
6983         .get_wptr = gfx_v8_0_ring_get_wptr_compute,
6984         .set_wptr = gfx_v8_0_ring_set_wptr_compute,
6985         .emit_frame_size =
6986                 20 + /* gfx_v8_0_ring_emit_gds_switch */
6987                 7 + /* gfx_v8_0_ring_emit_hdp_flush */
6988                 5 + /* hdp_invalidate */
6989                 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
6990                 17 + /* gfx_v8_0_ring_emit_vm_flush */
6991                 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6992         .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
6993         .emit_fence = gfx_v8_0_ring_emit_fence_kiq,
6994         .test_ring = gfx_v8_0_ring_test_ring,
6995         .insert_nop = amdgpu_ring_insert_nop,
6996         .pad_ib = amdgpu_ring_generic_pad_ib,
6997         .emit_rreg = gfx_v8_0_ring_emit_rreg,
6998         .emit_wreg = gfx_v8_0_ring_emit_wreg,
6999 };
7000
7001 static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
7002 {
7003         int i;
7004
7005         adev->gfx.kiq[0].ring.funcs = &gfx_v8_0_ring_funcs_kiq;
7006
7007         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
7008                 adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
7009
7010         for (i = 0; i < adev->gfx.num_compute_rings; i++)
7011                 adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute;
7012 }
7013
7014 static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = {
7015         .set = gfx_v8_0_set_eop_interrupt_state,
7016         .process = gfx_v8_0_eop_irq,
7017 };
7018
7019 static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = {
7020         .set = gfx_v8_0_set_priv_reg_fault_state,
7021         .process = gfx_v8_0_priv_reg_irq,
7022 };
7023
7024 static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = {
7025         .set = gfx_v8_0_set_priv_inst_fault_state,
7026         .process = gfx_v8_0_priv_inst_irq,
7027 };
7028
7029 static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = {
7030         .set = gfx_v8_0_set_cp_ecc_int_state,
7031         .process = gfx_v8_0_cp_ecc_error_irq,
7032 };
7033
7034 static const struct amdgpu_irq_src_funcs gfx_v8_0_sq_irq_funcs = {
7035         .set = gfx_v8_0_set_sq_int_state,
7036         .process = gfx_v8_0_sq_irq,
7037 };
7038
7039 static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev)
7040 {
7041         adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7042         adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs;
7043
7044         adev->gfx.priv_reg_irq.num_types = 1;
7045         adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs;
7046
7047         adev->gfx.priv_inst_irq.num_types = 1;
7048         adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
7049
7050         adev->gfx.cp_ecc_error_irq.num_types = 1;
7051         adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
7052
7053         adev->gfx.sq_irq.num_types = 1;
7054         adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
7055 }
7056
7057 static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
7058 {
7059         adev->gfx.rlc.funcs = &iceland_rlc_funcs;
7060 }
7061
7062 static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
7063 {
7064         /* init asci gds info */
7065         adev->gds.gds_size = RREG32(mmGDS_VMID0_SIZE);
7066         adev->gds.gws_size = 64;
7067         adev->gds.oa_size = 16;
7068         adev->gds.gds_compute_max_wave_id = RREG32(mmGDS_COMPUTE_MAX_WAVE_ID);
7069 }
7070
7071 static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
7072                                                  u32 bitmap)
7073 {
7074         u32 data;
7075
7076         if (!bitmap)
7077                 return;
7078
7079         data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
7080         data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
7081
7082         WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
7083 }
7084
7085 static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
7086 {
7087         u32 data, mask;
7088
7089         data =  RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) |
7090                 RREG32(mmGC_USER_SHADER_ARRAY_CONFIG);
7091
7092         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7093
7094         return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask;
7095 }
7096
7097 static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
7098 {
7099         int i, j, k, counter, active_cu_number = 0;
7100         u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
7101         struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
7102         unsigned disable_masks[4 * 2];
7103         u32 ao_cu_num;
7104
7105         memset(cu_info, 0, sizeof(*cu_info));
7106
7107         if (adev->flags & AMD_IS_APU)
7108                 ao_cu_num = 2;
7109         else
7110                 ao_cu_num = adev->gfx.config.max_cu_per_sh;
7111
7112         amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
7113
7114         mutex_lock(&adev->grbm_idx_mutex);
7115         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7116                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7117                         mask = 1;
7118                         ao_bitmap = 0;
7119                         counter = 0;
7120                         gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff, 0);
7121                         if (i < 4 && j < 2)
7122                                 gfx_v8_0_set_user_cu_inactive_bitmap(
7123                                         adev, disable_masks[i * 2 + j]);
7124                         bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
7125                         cu_info->bitmap[i][j] = bitmap;
7126
7127                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
7128                                 if (bitmap & mask) {
7129                                         if (counter < ao_cu_num)
7130                                                 ao_bitmap |= mask;
7131                                         counter ++;
7132                                 }
7133                                 mask <<= 1;
7134                         }
7135                         active_cu_number += counter;
7136                         if (i < 2 && j < 2)
7137                                 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
7138                         cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
7139                 }
7140         }
7141         gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
7142         mutex_unlock(&adev->grbm_idx_mutex);
7143
7144         cu_info->number = active_cu_number;
7145         cu_info->ao_cu_mask = ao_cu_mask;
7146         cu_info->simd_per_cu = NUM_SIMD_PER_CU;
7147         cu_info->max_waves_per_simd = 10;
7148         cu_info->max_scratch_slots_per_cu = 32;
7149         cu_info->wave_front_size = 64;
7150         cu_info->lds_size = 64;
7151 }
7152
7153 const struct amdgpu_ip_block_version gfx_v8_0_ip_block =
7154 {
7155         .type = AMD_IP_BLOCK_TYPE_GFX,
7156         .major = 8,
7157         .minor = 0,
7158         .rev = 0,
7159         .funcs = &gfx_v8_0_ip_funcs,
7160 };
7161
7162 const struct amdgpu_ip_block_version gfx_v8_1_ip_block =
7163 {
7164         .type = AMD_IP_BLOCK_TYPE_GFX,
7165         .major = 8,
7166         .minor = 1,
7167         .rev = 0,
7168         .funcs = &gfx_v8_0_ip_funcs,
7169 };
7170
7171 static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
7172 {
7173         uint64_t ce_payload_addr;
7174         int cnt_ce;
7175         union {
7176                 struct vi_ce_ib_state regular;
7177                 struct vi_ce_ib_state_chained_ib chained;
7178         } ce_payload = {};
7179
7180         if (ring->adev->virt.chained_ib_support) {
7181                 ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7182                         offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload);
7183                 cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2;
7184         } else {
7185                 ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
7186                         offsetof(struct vi_gfx_meta_data, ce_payload);
7187                 cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2;
7188         }
7189
7190         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_ce));
7191         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
7192                                 WRITE_DATA_DST_SEL(8) |
7193                                 WR_CONFIRM) |
7194                                 WRITE_DATA_CACHE_POLICY(0));
7195         amdgpu_ring_write(ring, lower_32_bits(ce_payload_addr));
7196         amdgpu_ring_write(ring, upper_32_bits(ce_payload_addr));
7197         amdgpu_ring_write_multiple(ring, (void *)&ce_payload, cnt_ce - 2);
7198 }
7199
7200 static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
7201 {
7202         uint64_t de_payload_addr, gds_addr, csa_addr;
7203         int cnt_de;
7204         union {
7205                 struct vi_de_ib_state regular;
7206                 struct vi_de_ib_state_chained_ib chained;
7207         } de_payload = {};
7208
7209         csa_addr = amdgpu_csa_vaddr(ring->adev);
7210         gds_addr = csa_addr + 4096;
7211         if (ring->adev->virt.chained_ib_support) {
7212                 de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr);
7213                 de_payload.chained.gds_backup_addrhi = upper_32_bits(gds_addr);
7214                 de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data_chained_ib, de_payload);
7215                 cnt_de = (sizeof(de_payload.chained) >> 2) + 4 - 2;
7216         } else {
7217                 de_payload.regular.gds_backup_addrlo = lower_32_bits(gds_addr);
7218                 de_payload.regular.gds_backup_addrhi = upper_32_bits(gds_addr);
7219                 de_payload_addr = csa_addr + offsetof(struct vi_gfx_meta_data, de_payload);
7220                 cnt_de = (sizeof(de_payload.regular) >> 2) + 4 - 2;
7221         }
7222
7223         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_de));
7224         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
7225                                 WRITE_DATA_DST_SEL(8) |
7226                                 WR_CONFIRM) |
7227                                 WRITE_DATA_CACHE_POLICY(0));
7228         amdgpu_ring_write(ring, lower_32_bits(de_payload_addr));
7229         amdgpu_ring_write(ring, upper_32_bits(de_payload_addr));
7230         amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2);
7231 }