drm/amdgpu: no touch for the reserved bit of RLC_CGTT_MGCG_OVERRIDE
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / amd / amdgpu / gfx_v9_0.c
1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/kernel.h>
24 #include <linux/firmware.h>
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include "soc15.h"
29 #include "soc15d.h"
30 #include "amdgpu_atomfirmware.h"
31
32 #include "gc/gc_9_0_offset.h"
33 #include "gc/gc_9_0_sh_mask.h"
34 #include "vega10_enum.h"
35 #include "hdp/hdp_4_0_offset.h"
36
37 #include "soc15_common.h"
38 #include "clearstate_gfx9.h"
39 #include "v9_structs.h"
40
41 #define GFX9_NUM_GFX_RINGS     1
42 #define GFX9_MEC_HPD_SIZE 2048
43 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
44 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
45
46 #define mmPWR_MISC_CNTL_STATUS                                  0x0183
47 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX                         0
48 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT        0x0
49 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT          0x1
50 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK          0x00000001L
51 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK            0x00000006L
52
53 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
54 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
55 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
56 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
57 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
58 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
59
60 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
61 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
62 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
63 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
64 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
65 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
66
67 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
68 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
69 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
70 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
71 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
72 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
73
74 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
75 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
76 MODULE_FIRMWARE("amdgpu/raven_me.bin");
77 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
78 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
79 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
80
81 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
82 {
83         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
84         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
85         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
86         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
87         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
88         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
89         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
90         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
91         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
92         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
93         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
94         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
95         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
96         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
97         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
98         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
99 };
100
101 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
102 {
103         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
104         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
105         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
106         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
107         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
108         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
109         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
110 };
111
112 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
113 {
114         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
115         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
116         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
117         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
118         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
119         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
120         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
121         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
122         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
123         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
124         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
125 };
126
127 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
128 {
129         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
130         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
131         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
132         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
133         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
134         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
135         SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
136         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
137         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
138         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
139         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
140         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
141         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
142         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
143         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
144         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
145         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
146         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
147         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
148         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
149         SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
150 };
151
152 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
153 {
154         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
155         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
156         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
157         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
158         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
159         SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
160         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
161 };
162
163 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
164 {
165         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
166         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
167 };
168
169 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
170 {
171         SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
172         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
173         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
174         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
175         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
176         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
177         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
178         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
179         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
180         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
181         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
182         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
183         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
184         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
185         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
186         SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
187 };
188
189 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
190 {
191         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
192         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
193         SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
194         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
195         SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
196         SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
197         SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
198         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
199         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
200         SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000)
201 };
202
203 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
204 {
205         mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
206         mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
207         mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
208         mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
209         mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
210         mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
211         mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
212         mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
213 };
214
215 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
216 {
217         mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
218         mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
219         mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
220         mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
221         mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
222         mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
223         mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
224         mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
225 };
226
227 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
228 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
229 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
230
231 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
232 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
233 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
234 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
235 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
236                                  struct amdgpu_cu_info *cu_info);
237 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
238 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
239 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
240
241 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
242 {
243         switch (adev->asic_type) {
244         case CHIP_VEGA10:
245                 soc15_program_register_sequence(adev,
246                                                  golden_settings_gc_9_0,
247                                                  ARRAY_SIZE(golden_settings_gc_9_0));
248                 soc15_program_register_sequence(adev,
249                                                  golden_settings_gc_9_0_vg10,
250                                                  ARRAY_SIZE(golden_settings_gc_9_0_vg10));
251                 break;
252         case CHIP_VEGA12:
253                 soc15_program_register_sequence(adev,
254                                                 golden_settings_gc_9_2_1,
255                                                 ARRAY_SIZE(golden_settings_gc_9_2_1));
256                 soc15_program_register_sequence(adev,
257                                                 golden_settings_gc_9_2_1_vg12,
258                                                 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
259                 break;
260         case CHIP_VEGA20:
261                 soc15_program_register_sequence(adev,
262                                                 golden_settings_gc_9_0,
263                                                 ARRAY_SIZE(golden_settings_gc_9_0));
264                 soc15_program_register_sequence(adev,
265                                                 golden_settings_gc_9_0_vg20,
266                                                 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
267                 break;
268         case CHIP_RAVEN:
269                 soc15_program_register_sequence(adev,
270                                                  golden_settings_gc_9_1,
271                                                  ARRAY_SIZE(golden_settings_gc_9_1));
272                 soc15_program_register_sequence(adev,
273                                                  golden_settings_gc_9_1_rv1,
274                                                  ARRAY_SIZE(golden_settings_gc_9_1_rv1));
275                 break;
276         default:
277                 break;
278         }
279
280         soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
281                                         (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
282 }
283
284 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
285 {
286         adev->gfx.scratch.num_reg = 8;
287         adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
288         adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
289 }
290
291 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
292                                        bool wc, uint32_t reg, uint32_t val)
293 {
294         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
295         amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
296                                 WRITE_DATA_DST_SEL(0) |
297                                 (wc ? WR_CONFIRM : 0));
298         amdgpu_ring_write(ring, reg);
299         amdgpu_ring_write(ring, 0);
300         amdgpu_ring_write(ring, val);
301 }
302
303 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
304                                   int mem_space, int opt, uint32_t addr0,
305                                   uint32_t addr1, uint32_t ref, uint32_t mask,
306                                   uint32_t inv)
307 {
308         amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
309         amdgpu_ring_write(ring,
310                                  /* memory (1) or register (0) */
311                                  (WAIT_REG_MEM_MEM_SPACE(mem_space) |
312                                  WAIT_REG_MEM_OPERATION(opt) | /* wait */
313                                  WAIT_REG_MEM_FUNCTION(3) |  /* equal */
314                                  WAIT_REG_MEM_ENGINE(eng_sel)));
315
316         if (mem_space)
317                 BUG_ON(addr0 & 0x3); /* Dword align */
318         amdgpu_ring_write(ring, addr0);
319         amdgpu_ring_write(ring, addr1);
320         amdgpu_ring_write(ring, ref);
321         amdgpu_ring_write(ring, mask);
322         amdgpu_ring_write(ring, inv); /* poll interval */
323 }
324
325 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
326 {
327         struct amdgpu_device *adev = ring->adev;
328         uint32_t scratch;
329         uint32_t tmp = 0;
330         unsigned i;
331         int r;
332
333         r = amdgpu_gfx_scratch_get(adev, &scratch);
334         if (r) {
335                 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
336                 return r;
337         }
338         WREG32(scratch, 0xCAFEDEAD);
339         r = amdgpu_ring_alloc(ring, 3);
340         if (r) {
341                 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
342                           ring->idx, r);
343                 amdgpu_gfx_scratch_free(adev, scratch);
344                 return r;
345         }
346         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
347         amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
348         amdgpu_ring_write(ring, 0xDEADBEEF);
349         amdgpu_ring_commit(ring);
350
351         for (i = 0; i < adev->usec_timeout; i++) {
352                 tmp = RREG32(scratch);
353                 if (tmp == 0xDEADBEEF)
354                         break;
355                 DRM_UDELAY(1);
356         }
357         if (i < adev->usec_timeout) {
358                 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
359                          ring->idx, i);
360         } else {
361                 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
362                           ring->idx, scratch, tmp);
363                 r = -EINVAL;
364         }
365         amdgpu_gfx_scratch_free(adev, scratch);
366         return r;
367 }
368
369 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
370 {
371         struct amdgpu_device *adev = ring->adev;
372         struct amdgpu_ib ib;
373         struct dma_fence *f = NULL;
374
375         unsigned index;
376         uint64_t gpu_addr;
377         uint32_t tmp;
378         long r;
379
380         r = amdgpu_device_wb_get(adev, &index);
381         if (r) {
382                 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
383                 return r;
384         }
385
386         gpu_addr = adev->wb.gpu_addr + (index * 4);
387         adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
388         memset(&ib, 0, sizeof(ib));
389         r = amdgpu_ib_get(adev, NULL, 16, &ib);
390         if (r) {
391                 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
392                 goto err1;
393         }
394         ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
395         ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
396         ib.ptr[2] = lower_32_bits(gpu_addr);
397         ib.ptr[3] = upper_32_bits(gpu_addr);
398         ib.ptr[4] = 0xDEADBEEF;
399         ib.length_dw = 5;
400
401         r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
402         if (r)
403                 goto err2;
404
405         r = dma_fence_wait_timeout(f, false, timeout);
406         if (r == 0) {
407                         DRM_ERROR("amdgpu: IB test timed out.\n");
408                         r = -ETIMEDOUT;
409                         goto err2;
410         } else if (r < 0) {
411                         DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
412                         goto err2;
413         }
414
415         tmp = adev->wb.wb[index];
416         if (tmp == 0xDEADBEEF) {
417                         DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
418                         r = 0;
419         } else {
420                         DRM_ERROR("ib test on ring %d failed\n", ring->idx);
421                         r = -EINVAL;
422         }
423
424 err2:
425         amdgpu_ib_free(adev, &ib, NULL);
426         dma_fence_put(f);
427 err1:
428         amdgpu_device_wb_free(adev, index);
429         return r;
430 }
431
432
433 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
434 {
435         release_firmware(adev->gfx.pfp_fw);
436         adev->gfx.pfp_fw = NULL;
437         release_firmware(adev->gfx.me_fw);
438         adev->gfx.me_fw = NULL;
439         release_firmware(adev->gfx.ce_fw);
440         adev->gfx.ce_fw = NULL;
441         release_firmware(adev->gfx.rlc_fw);
442         adev->gfx.rlc_fw = NULL;
443         release_firmware(adev->gfx.mec_fw);
444         adev->gfx.mec_fw = NULL;
445         release_firmware(adev->gfx.mec2_fw);
446         adev->gfx.mec2_fw = NULL;
447
448         kfree(adev->gfx.rlc.register_list_format);
449 }
450
451 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
452 {
453         const struct rlc_firmware_header_v2_1 *rlc_hdr;
454
455         rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
456         adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
457         adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
458         adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
459         adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
460         adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
461         adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
462         adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
463         adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
464         adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
465         adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
466         adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
467         adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
468         adev->gfx.rlc.reg_list_format_direct_reg_list_length =
469                         le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
470 }
471
472 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
473 {
474         const char *chip_name;
475         char fw_name[30];
476         int err;
477         struct amdgpu_firmware_info *info = NULL;
478         const struct common_firmware_header *header = NULL;
479         const struct gfx_firmware_header_v1_0 *cp_hdr;
480         const struct rlc_firmware_header_v2_0 *rlc_hdr;
481         unsigned int *tmp = NULL;
482         unsigned int i = 0;
483         uint16_t version_major;
484         uint16_t version_minor;
485
486         DRM_DEBUG("\n");
487
488         switch (adev->asic_type) {
489         case CHIP_VEGA10:
490                 chip_name = "vega10";
491                 break;
492         case CHIP_VEGA12:
493                 chip_name = "vega12";
494                 break;
495         case CHIP_VEGA20:
496                 chip_name = "vega20";
497                 break;
498         case CHIP_RAVEN:
499                 chip_name = "raven";
500                 break;
501         default:
502                 BUG();
503         }
504
505         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
506         err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
507         if (err)
508                 goto out;
509         err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
510         if (err)
511                 goto out;
512         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
513         adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
514         adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
515
516         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
517         err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
518         if (err)
519                 goto out;
520         err = amdgpu_ucode_validate(adev->gfx.me_fw);
521         if (err)
522                 goto out;
523         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
524         adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
525         adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
526
527         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
528         err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
529         if (err)
530                 goto out;
531         err = amdgpu_ucode_validate(adev->gfx.ce_fw);
532         if (err)
533                 goto out;
534         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
535         adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
536         adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
537
538         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
539         err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
540         if (err)
541                 goto out;
542         err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
543         rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
544
545         version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
546         version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
547         if (version_major == 2 && version_minor == 1)
548                 adev->gfx.rlc.is_rlc_v2_1 = true;
549
550         adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
551         adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
552         adev->gfx.rlc.save_and_restore_offset =
553                         le32_to_cpu(rlc_hdr->save_and_restore_offset);
554         adev->gfx.rlc.clear_state_descriptor_offset =
555                         le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
556         adev->gfx.rlc.avail_scratch_ram_locations =
557                         le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
558         adev->gfx.rlc.reg_restore_list_size =
559                         le32_to_cpu(rlc_hdr->reg_restore_list_size);
560         adev->gfx.rlc.reg_list_format_start =
561                         le32_to_cpu(rlc_hdr->reg_list_format_start);
562         adev->gfx.rlc.reg_list_format_separate_start =
563                         le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
564         adev->gfx.rlc.starting_offsets_start =
565                         le32_to_cpu(rlc_hdr->starting_offsets_start);
566         adev->gfx.rlc.reg_list_format_size_bytes =
567                         le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
568         adev->gfx.rlc.reg_list_size_bytes =
569                         le32_to_cpu(rlc_hdr->reg_list_size_bytes);
570         adev->gfx.rlc.register_list_format =
571                         kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
572                                 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
573         if (!adev->gfx.rlc.register_list_format) {
574                 err = -ENOMEM;
575                 goto out;
576         }
577
578         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
579                         le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
580         for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
581                 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
582
583         adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
584
585         tmp = (unsigned int *)((uintptr_t)rlc_hdr +
586                         le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
587         for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
588                 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
589
590         if (adev->gfx.rlc.is_rlc_v2_1)
591                 gfx_v9_0_init_rlc_ext_microcode(adev);
592
593         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
594         err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
595         if (err)
596                 goto out;
597         err = amdgpu_ucode_validate(adev->gfx.mec_fw);
598         if (err)
599                 goto out;
600         cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
601         adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
602         adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
603
604
605         snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
606         err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
607         if (!err) {
608                 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
609                 if (err)
610                         goto out;
611                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
612                 adev->gfx.mec2_fw->data;
613                 adev->gfx.mec2_fw_version =
614                 le32_to_cpu(cp_hdr->header.ucode_version);
615                 adev->gfx.mec2_feature_version =
616                 le32_to_cpu(cp_hdr->ucode_feature_version);
617         } else {
618                 err = 0;
619                 adev->gfx.mec2_fw = NULL;
620         }
621
622         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
623                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
624                 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
625                 info->fw = adev->gfx.pfp_fw;
626                 header = (const struct common_firmware_header *)info->fw->data;
627                 adev->firmware.fw_size +=
628                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
629
630                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
631                 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
632                 info->fw = adev->gfx.me_fw;
633                 header = (const struct common_firmware_header *)info->fw->data;
634                 adev->firmware.fw_size +=
635                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
636
637                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
638                 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
639                 info->fw = adev->gfx.ce_fw;
640                 header = (const struct common_firmware_header *)info->fw->data;
641                 adev->firmware.fw_size +=
642                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
643
644                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
645                 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
646                 info->fw = adev->gfx.rlc_fw;
647                 header = (const struct common_firmware_header *)info->fw->data;
648                 adev->firmware.fw_size +=
649                         ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
650
651                 if (adev->gfx.rlc.is_rlc_v2_1 &&
652                     adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
653                     adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
654                     adev->gfx.rlc.save_restore_list_srm_size_bytes) {
655                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
656                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
657                         info->fw = adev->gfx.rlc_fw;
658                         adev->firmware.fw_size +=
659                                 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
660
661                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
662                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
663                         info->fw = adev->gfx.rlc_fw;
664                         adev->firmware.fw_size +=
665                                 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
666
667                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
668                         info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
669                         info->fw = adev->gfx.rlc_fw;
670                         adev->firmware.fw_size +=
671                                 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
672                 }
673
674                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
675                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
676                 info->fw = adev->gfx.mec_fw;
677                 header = (const struct common_firmware_header *)info->fw->data;
678                 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
679                 adev->firmware.fw_size +=
680                         ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
681
682                 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
683                 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
684                 info->fw = adev->gfx.mec_fw;
685                 adev->firmware.fw_size +=
686                         ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
687
688                 if (adev->gfx.mec2_fw) {
689                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
690                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
691                         info->fw = adev->gfx.mec2_fw;
692                         header = (const struct common_firmware_header *)info->fw->data;
693                         cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
694                         adev->firmware.fw_size +=
695                                 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
696                         info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
697                         info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
698                         info->fw = adev->gfx.mec2_fw;
699                         adev->firmware.fw_size +=
700                                 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
701                 }
702
703         }
704
705 out:
706         if (err) {
707                 dev_err(adev->dev,
708                         "gfx9: Failed to load firmware \"%s\"\n",
709                         fw_name);
710                 release_firmware(adev->gfx.pfp_fw);
711                 adev->gfx.pfp_fw = NULL;
712                 release_firmware(adev->gfx.me_fw);
713                 adev->gfx.me_fw = NULL;
714                 release_firmware(adev->gfx.ce_fw);
715                 adev->gfx.ce_fw = NULL;
716                 release_firmware(adev->gfx.rlc_fw);
717                 adev->gfx.rlc_fw = NULL;
718                 release_firmware(adev->gfx.mec_fw);
719                 adev->gfx.mec_fw = NULL;
720                 release_firmware(adev->gfx.mec2_fw);
721                 adev->gfx.mec2_fw = NULL;
722         }
723         return err;
724 }
725
726 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
727 {
728         u32 count = 0;
729         const struct cs_section_def *sect = NULL;
730         const struct cs_extent_def *ext = NULL;
731
732         /* begin clear state */
733         count += 2;
734         /* context control state */
735         count += 3;
736
737         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
738                 for (ext = sect->section; ext->extent != NULL; ++ext) {
739                         if (sect->id == SECT_CONTEXT)
740                                 count += 2 + ext->reg_count;
741                         else
742                                 return 0;
743                 }
744         }
745
746         /* end clear state */
747         count += 2;
748         /* clear state */
749         count += 2;
750
751         return count;
752 }
753
754 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
755                                     volatile u32 *buffer)
756 {
757         u32 count = 0, i;
758         const struct cs_section_def *sect = NULL;
759         const struct cs_extent_def *ext = NULL;
760
761         if (adev->gfx.rlc.cs_data == NULL)
762                 return;
763         if (buffer == NULL)
764                 return;
765
766         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
767         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
768
769         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
770         buffer[count++] = cpu_to_le32(0x80000000);
771         buffer[count++] = cpu_to_le32(0x80000000);
772
773         for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
774                 for (ext = sect->section; ext->extent != NULL; ++ext) {
775                         if (sect->id == SECT_CONTEXT) {
776                                 buffer[count++] =
777                                         cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
778                                 buffer[count++] = cpu_to_le32(ext->reg_index -
779                                                 PACKET3_SET_CONTEXT_REG_START);
780                                 for (i = 0; i < ext->reg_count; i++)
781                                         buffer[count++] = cpu_to_le32(ext->extent[i]);
782                         } else {
783                                 return;
784                         }
785                 }
786         }
787
788         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
789         buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
790
791         buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
792         buffer[count++] = cpu_to_le32(0);
793 }
794
795 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
796 {
797         uint32_t data;
798
799         /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
800         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
801         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
802         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
803         WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
804
805         /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
806         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
807
808         /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
809         WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
810
811         mutex_lock(&adev->grbm_idx_mutex);
812         /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
813         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
814         WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
815
816         /* set mmRLC_LB_PARAMS = 0x003F_1006 */
817         data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
818         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
819         data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
820         WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
821
822         /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
823         data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
824         data &= 0x0000FFFF;
825         data |= 0x00C00000;
826         WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
827
828         /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */
829         WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF);
830
831         /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
832          * but used for RLC_LB_CNTL configuration */
833         data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
834         data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
835         data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
836         WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
837         mutex_unlock(&adev->grbm_idx_mutex);
838 }
839
840 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
841 {
842         WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
843 }
844
845 static void rv_init_cp_jump_table(struct amdgpu_device *adev)
846 {
847         const __le32 *fw_data;
848         volatile u32 *dst_ptr;
849         int me, i, max_me = 5;
850         u32 bo_offset = 0;
851         u32 table_offset, table_size;
852
853         /* write the cp table buffer */
854         dst_ptr = adev->gfx.rlc.cp_table_ptr;
855         for (me = 0; me < max_me; me++) {
856                 if (me == 0) {
857                         const struct gfx_firmware_header_v1_0 *hdr =
858                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
859                         fw_data = (const __le32 *)
860                                 (adev->gfx.ce_fw->data +
861                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
862                         table_offset = le32_to_cpu(hdr->jt_offset);
863                         table_size = le32_to_cpu(hdr->jt_size);
864                 } else if (me == 1) {
865                         const struct gfx_firmware_header_v1_0 *hdr =
866                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
867                         fw_data = (const __le32 *)
868                                 (adev->gfx.pfp_fw->data +
869                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
870                         table_offset = le32_to_cpu(hdr->jt_offset);
871                         table_size = le32_to_cpu(hdr->jt_size);
872                 } else if (me == 2) {
873                         const struct gfx_firmware_header_v1_0 *hdr =
874                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
875                         fw_data = (const __le32 *)
876                                 (adev->gfx.me_fw->data +
877                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
878                         table_offset = le32_to_cpu(hdr->jt_offset);
879                         table_size = le32_to_cpu(hdr->jt_size);
880                 } else if (me == 3) {
881                         const struct gfx_firmware_header_v1_0 *hdr =
882                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
883                         fw_data = (const __le32 *)
884                                 (adev->gfx.mec_fw->data +
885                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
886                         table_offset = le32_to_cpu(hdr->jt_offset);
887                         table_size = le32_to_cpu(hdr->jt_size);
888                 } else  if (me == 4) {
889                         const struct gfx_firmware_header_v1_0 *hdr =
890                                 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
891                         fw_data = (const __le32 *)
892                                 (adev->gfx.mec2_fw->data +
893                                  le32_to_cpu(hdr->header.ucode_array_offset_bytes));
894                         table_offset = le32_to_cpu(hdr->jt_offset);
895                         table_size = le32_to_cpu(hdr->jt_size);
896                 }
897
898                 for (i = 0; i < table_size; i ++) {
899                         dst_ptr[bo_offset + i] =
900                                 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
901                 }
902
903                 bo_offset += table_size;
904         }
905 }
906
907 static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
908 {
909         /* clear state block */
910         amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
911                         &adev->gfx.rlc.clear_state_gpu_addr,
912                         (void **)&adev->gfx.rlc.cs_ptr);
913
914         /* jump table block */
915         amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
916                         &adev->gfx.rlc.cp_table_gpu_addr,
917                         (void **)&adev->gfx.rlc.cp_table_ptr);
918 }
919
920 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
921 {
922         volatile u32 *dst_ptr;
923         u32 dws;
924         const struct cs_section_def *cs_data;
925         int r;
926
927         adev->gfx.rlc.cs_data = gfx9_cs_data;
928
929         cs_data = adev->gfx.rlc.cs_data;
930
931         if (cs_data) {
932                 /* clear state block */
933                 adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
934                 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
935                                               AMDGPU_GEM_DOMAIN_VRAM,
936                                               &adev->gfx.rlc.clear_state_obj,
937                                               &adev->gfx.rlc.clear_state_gpu_addr,
938                                               (void **)&adev->gfx.rlc.cs_ptr);
939                 if (r) {
940                         dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
941                                 r);
942                         gfx_v9_0_rlc_fini(adev);
943                         return r;
944                 }
945                 /* set up the cs buffer */
946                 dst_ptr = adev->gfx.rlc.cs_ptr;
947                 gfx_v9_0_get_csb_buffer(adev, dst_ptr);
948                 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
949                 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
950                 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
951         }
952
953         if (adev->asic_type == CHIP_RAVEN) {
954                 /* TODO: double check the cp_table_size for RV */
955                 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
956                 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
957                                               PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
958                                               &adev->gfx.rlc.cp_table_obj,
959                                               &adev->gfx.rlc.cp_table_gpu_addr,
960                                               (void **)&adev->gfx.rlc.cp_table_ptr);
961                 if (r) {
962                         dev_err(adev->dev,
963                                 "(%d) failed to create cp table bo\n", r);
964                         gfx_v9_0_rlc_fini(adev);
965                         return r;
966                 }
967
968                 rv_init_cp_jump_table(adev);
969                 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
970                 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
971
972                 gfx_v9_0_init_lbpw(adev);
973         }
974
975         return 0;
976 }
977
978 static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
979 {
980         int r;
981
982         r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
983         if (unlikely(r != 0))
984                 return r;
985
986         r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
987                         AMDGPU_GEM_DOMAIN_VRAM);
988         if (!r)
989                 adev->gfx.rlc.clear_state_gpu_addr =
990                         amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
991
992         amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
993
994         return r;
995 }
996
997 static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
998 {
999         int r;
1000
1001         if (!adev->gfx.rlc.clear_state_obj)
1002                 return;
1003
1004         r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
1005         if (likely(r == 0)) {
1006                 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1007                 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1008         }
1009 }
1010
1011 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1012 {
1013         amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1014         amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1015 }
1016
1017 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1018 {
1019         int r;
1020         u32 *hpd;
1021         const __le32 *fw_data;
1022         unsigned fw_size;
1023         u32 *fw;
1024         size_t mec_hpd_size;
1025
1026         const struct gfx_firmware_header_v1_0 *mec_hdr;
1027
1028         bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1029
1030         /* take ownership of the relevant compute queues */
1031         amdgpu_gfx_compute_queue_acquire(adev);
1032         mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1033
1034         r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1035                                       AMDGPU_GEM_DOMAIN_GTT,
1036                                       &adev->gfx.mec.hpd_eop_obj,
1037                                       &adev->gfx.mec.hpd_eop_gpu_addr,
1038                                       (void **)&hpd);
1039         if (r) {
1040                 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1041                 gfx_v9_0_mec_fini(adev);
1042                 return r;
1043         }
1044
1045         memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1046
1047         amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1048         amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1049
1050         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1051
1052         fw_data = (const __le32 *)
1053                 (adev->gfx.mec_fw->data +
1054                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1055         fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
1056
1057         r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1058                                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1059                                       &adev->gfx.mec.mec_fw_obj,
1060                                       &adev->gfx.mec.mec_fw_gpu_addr,
1061                                       (void **)&fw);
1062         if (r) {
1063                 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1064                 gfx_v9_0_mec_fini(adev);
1065                 return r;
1066         }
1067
1068         memcpy(fw, fw_data, fw_size);
1069
1070         amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1071         amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1072
1073         return 0;
1074 }
1075
1076 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1077 {
1078         WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1079                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1080                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1081                 (address << SQ_IND_INDEX__INDEX__SHIFT) |
1082                 (SQ_IND_INDEX__FORCE_READ_MASK));
1083         return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1084 }
1085
1086 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1087                            uint32_t wave, uint32_t thread,
1088                            uint32_t regno, uint32_t num, uint32_t *out)
1089 {
1090         WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1091                 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1092                 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1093                 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1094                 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1095                 (SQ_IND_INDEX__FORCE_READ_MASK) |
1096                 (SQ_IND_INDEX__AUTO_INCR_MASK));
1097         while (num--)
1098                 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1099 }
1100
1101 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1102 {
1103         /* type 1 wave data */
1104         dst[(*no_fields)++] = 1;
1105         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1106         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1107         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1108         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1109         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1110         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1111         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1112         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1113         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1114         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1115         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1116         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1117         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1118         dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1119 }
1120
1121 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1122                                      uint32_t wave, uint32_t start,
1123                                      uint32_t size, uint32_t *dst)
1124 {
1125         wave_read_regs(
1126                 adev, simd, wave, 0,
1127                 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1128 }
1129
1130 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1131                                      uint32_t wave, uint32_t thread,
1132                                      uint32_t start, uint32_t size,
1133                                      uint32_t *dst)
1134 {
1135         wave_read_regs(
1136                 adev, simd, wave, thread,
1137                 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1138 }
1139
1140 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1141                                   u32 me, u32 pipe, u32 q)
1142 {
1143         soc15_grbm_select(adev, me, pipe, q, 0);
1144 }
1145
1146 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1147         .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1148         .select_se_sh = &gfx_v9_0_select_se_sh,
1149         .read_wave_data = &gfx_v9_0_read_wave_data,
1150         .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1151         .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
1152         .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
1153 };
1154
1155 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
1156 {
1157         u32 gb_addr_config;
1158         int err;
1159
1160         adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
1161
1162         switch (adev->asic_type) {
1163         case CHIP_VEGA10:
1164                 adev->gfx.config.max_hw_contexts = 8;
1165                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1166                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1167                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1168                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1169                 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
1170                 break;
1171         case CHIP_VEGA12:
1172                 adev->gfx.config.max_hw_contexts = 8;
1173                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1174                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1175                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1176                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1177                 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
1178                 DRM_INFO("fix gfx.config for vega12\n");
1179                 break;
1180         case CHIP_VEGA20:
1181                 adev->gfx.config.max_hw_contexts = 8;
1182                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1183                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1184                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1185                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1186                 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1187                 gb_addr_config &= ~0xf3e777ff;
1188                 gb_addr_config |= 0x22014042;
1189                 /* check vbios table if gpu info is not available */
1190                 err = amdgpu_atomfirmware_get_gfx_info(adev);
1191                 if (err)
1192                         return err;
1193                 break;
1194         case CHIP_RAVEN:
1195                 adev->gfx.config.max_hw_contexts = 8;
1196                 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1197                 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1198                 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1199                 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1200                 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
1201                 break;
1202         default:
1203                 BUG();
1204                 break;
1205         }
1206
1207         adev->gfx.config.gb_addr_config = gb_addr_config;
1208
1209         adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1210                         REG_GET_FIELD(
1211                                         adev->gfx.config.gb_addr_config,
1212                                         GB_ADDR_CONFIG,
1213                                         NUM_PIPES);
1214
1215         adev->gfx.config.max_tile_pipes =
1216                 adev->gfx.config.gb_addr_config_fields.num_pipes;
1217
1218         adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
1219                         REG_GET_FIELD(
1220                                         adev->gfx.config.gb_addr_config,
1221                                         GB_ADDR_CONFIG,
1222                                         NUM_BANKS);
1223         adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1224                         REG_GET_FIELD(
1225                                         adev->gfx.config.gb_addr_config,
1226                                         GB_ADDR_CONFIG,
1227                                         MAX_COMPRESSED_FRAGS);
1228         adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1229                         REG_GET_FIELD(
1230                                         adev->gfx.config.gb_addr_config,
1231                                         GB_ADDR_CONFIG,
1232                                         NUM_RB_PER_SE);
1233         adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1234                         REG_GET_FIELD(
1235                                         adev->gfx.config.gb_addr_config,
1236                                         GB_ADDR_CONFIG,
1237                                         NUM_SHADER_ENGINES);
1238         adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1239                         REG_GET_FIELD(
1240                                         adev->gfx.config.gb_addr_config,
1241                                         GB_ADDR_CONFIG,
1242                                         PIPE_INTERLEAVE_SIZE));
1243
1244         return 0;
1245 }
1246
1247 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1248                                    struct amdgpu_ngg_buf *ngg_buf,
1249                                    int size_se,
1250                                    int default_size_se)
1251 {
1252         int r;
1253
1254         if (size_se < 0) {
1255                 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1256                 return -EINVAL;
1257         }
1258         size_se = size_se ? size_se : default_size_se;
1259
1260         ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1261         r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1262                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1263                                     &ngg_buf->bo,
1264                                     &ngg_buf->gpu_addr,
1265                                     NULL);
1266         if (r) {
1267                 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1268                 return r;
1269         }
1270         ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1271
1272         return r;
1273 }
1274
1275 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1276 {
1277         int i;
1278
1279         for (i = 0; i < NGG_BUF_MAX; i++)
1280                 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1281                                       &adev->gfx.ngg.buf[i].gpu_addr,
1282                                       NULL);
1283
1284         memset(&adev->gfx.ngg.buf[0], 0,
1285                         sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1286
1287         adev->gfx.ngg.init = false;
1288
1289         return 0;
1290 }
1291
1292 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1293 {
1294         int r;
1295
1296         if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1297                 return 0;
1298
1299         /* GDS reserve memory: 64 bytes alignment */
1300         adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1301         adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1302         adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1303         adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
1304         adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
1305
1306         /* Primitive Buffer */
1307         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1308                                     amdgpu_prim_buf_per_se,
1309                                     64 * 1024);
1310         if (r) {
1311                 dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1312                 goto err;
1313         }
1314
1315         /* Position Buffer */
1316         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1317                                     amdgpu_pos_buf_per_se,
1318                                     256 * 1024);
1319         if (r) {
1320                 dev_err(adev->dev, "Failed to create Position Buffer\n");
1321                 goto err;
1322         }
1323
1324         /* Control Sideband */
1325         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1326                                     amdgpu_cntl_sb_buf_per_se,
1327                                     256);
1328         if (r) {
1329                 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1330                 goto err;
1331         }
1332
1333         /* Parameter Cache, not created by default */
1334         if (amdgpu_param_buf_per_se <= 0)
1335                 goto out;
1336
1337         r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1338                                     amdgpu_param_buf_per_se,
1339                                     512 * 1024);
1340         if (r) {
1341                 dev_err(adev->dev, "Failed to create Parameter Cache\n");
1342                 goto err;
1343         }
1344
1345 out:
1346         adev->gfx.ngg.init = true;
1347         return 0;
1348 err:
1349         gfx_v9_0_ngg_fini(adev);
1350         return r;
1351 }
1352
1353 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1354 {
1355         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1356         int r;
1357         u32 data, base;
1358
1359         if (!amdgpu_ngg)
1360                 return 0;
1361
1362         /* Program buffer size */
1363         data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
1364                              adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
1365         data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
1366                              adev->gfx.ngg.buf[NGG_POS].size >> 8);
1367         WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1368
1369         data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
1370                              adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
1371         data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
1372                              adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
1373         WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1374
1375         /* Program buffer base address */
1376         base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1377         data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1378         WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1379
1380         base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1381         data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1382         WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1383
1384         base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1385         data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1386         WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1387
1388         base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1389         data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1390         WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1391
1392         base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1393         data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1394         WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1395
1396         base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1397         data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1398         WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1399
1400         /* Clear GDS reserved memory */
1401         r = amdgpu_ring_alloc(ring, 17);
1402         if (r) {
1403                 DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
1404                           ring->idx, r);
1405                 return r;
1406         }
1407
1408         gfx_v9_0_write_data_to_reg(ring, 0, false,
1409                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
1410                                    (adev->gds.mem.total_size +
1411                                     adev->gfx.ngg.gds_reserve_size) >>
1412                                    AMDGPU_GDS_SHIFT);
1413
1414         amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1415         amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1416                                 PACKET3_DMA_DATA_DST_SEL(1) |
1417                                 PACKET3_DMA_DATA_SRC_SEL(2)));
1418         amdgpu_ring_write(ring, 0);
1419         amdgpu_ring_write(ring, 0);
1420         amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1421         amdgpu_ring_write(ring, 0);
1422         amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
1423                                 adev->gfx.ngg.gds_reserve_size);
1424
1425         gfx_v9_0_write_data_to_reg(ring, 0, false,
1426                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
1427
1428         amdgpu_ring_commit(ring);
1429
1430         return 0;
1431 }
1432
1433 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1434                                       int mec, int pipe, int queue)
1435 {
1436         int r;
1437         unsigned irq_type;
1438         struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1439
1440         ring = &adev->gfx.compute_ring[ring_id];
1441
1442         /* mec0 is me1 */
1443         ring->me = mec + 1;
1444         ring->pipe = pipe;
1445         ring->queue = queue;
1446
1447         ring->ring_obj = NULL;
1448         ring->use_doorbell = true;
1449         ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
1450         ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1451                                 + (ring_id * GFX9_MEC_HPD_SIZE);
1452         sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1453
1454         irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1455                 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1456                 + ring->pipe;
1457
1458         /* type-2 packets are deprecated on MEC, use type-3 instead */
1459         r = amdgpu_ring_init(adev, ring, 1024,
1460                              &adev->gfx.eop_irq, irq_type);
1461         if (r)
1462                 return r;
1463
1464
1465         return 0;
1466 }
1467
1468 static int gfx_v9_0_sw_init(void *handle)
1469 {
1470         int i, j, k, r, ring_id;
1471         struct amdgpu_ring *ring;
1472         struct amdgpu_kiq *kiq;
1473         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1474
1475         switch (adev->asic_type) {
1476         case CHIP_VEGA10:
1477         case CHIP_VEGA12:
1478         case CHIP_VEGA20:
1479         case CHIP_RAVEN:
1480                 adev->gfx.mec.num_mec = 2;
1481                 break;
1482         default:
1483                 adev->gfx.mec.num_mec = 1;
1484                 break;
1485         }
1486
1487         adev->gfx.mec.num_pipe_per_mec = 4;
1488         adev->gfx.mec.num_queue_per_pipe = 8;
1489
1490         /* KIQ event */
1491         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
1492         if (r)
1493                 return r;
1494
1495         /* EOP Event */
1496         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
1497         if (r)
1498                 return r;
1499
1500         /* Privileged reg */
1501         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
1502                               &adev->gfx.priv_reg_irq);
1503         if (r)
1504                 return r;
1505
1506         /* Privileged inst */
1507         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
1508                               &adev->gfx.priv_inst_irq);
1509         if (r)
1510                 return r;
1511
1512         adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1513
1514         gfx_v9_0_scratch_init(adev);
1515
1516         r = gfx_v9_0_init_microcode(adev);
1517         if (r) {
1518                 DRM_ERROR("Failed to load gfx firmware!\n");
1519                 return r;
1520         }
1521
1522         r = gfx_v9_0_rlc_init(adev);
1523         if (r) {
1524                 DRM_ERROR("Failed to init rlc BOs!\n");
1525                 return r;
1526         }
1527
1528         r = gfx_v9_0_mec_init(adev);
1529         if (r) {
1530                 DRM_ERROR("Failed to init MEC BOs!\n");
1531                 return r;
1532         }
1533
1534         /* set up the gfx ring */
1535         for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1536                 ring = &adev->gfx.gfx_ring[i];
1537                 ring->ring_obj = NULL;
1538                 if (!i)
1539                         sprintf(ring->name, "gfx");
1540                 else
1541                         sprintf(ring->name, "gfx_%d", i);
1542                 ring->use_doorbell = true;
1543                 ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
1544                 r = amdgpu_ring_init(adev, ring, 1024,
1545                                      &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1546                 if (r)
1547                         return r;
1548         }
1549
1550         /* set up the compute queues - allocate horizontally across pipes */
1551         ring_id = 0;
1552         for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1553                 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1554                         for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1555                                 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
1556                                         continue;
1557
1558                                 r = gfx_v9_0_compute_ring_init(adev,
1559                                                                ring_id,
1560                                                                i, k, j);
1561                                 if (r)
1562                                         return r;
1563
1564                                 ring_id++;
1565                         }
1566                 }
1567         }
1568
1569         r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
1570         if (r) {
1571                 DRM_ERROR("Failed to init KIQ BOs!\n");
1572                 return r;
1573         }
1574
1575         kiq = &adev->gfx.kiq;
1576         r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1577         if (r)
1578                 return r;
1579
1580         /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1581         r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
1582         if (r)
1583                 return r;
1584
1585         /* reserve GDS, GWS and OA resource for gfx */
1586         r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
1587                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
1588                                     &adev->gds.gds_gfx_bo, NULL, NULL);
1589         if (r)
1590                 return r;
1591
1592         r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
1593                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
1594                                     &adev->gds.gws_gfx_bo, NULL, NULL);
1595         if (r)
1596                 return r;
1597
1598         r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
1599                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
1600                                     &adev->gds.oa_gfx_bo, NULL, NULL);
1601         if (r)
1602                 return r;
1603
1604         adev->gfx.ce_ram_size = 0x8000;
1605
1606         r = gfx_v9_0_gpu_early_init(adev);
1607         if (r)
1608                 return r;
1609
1610         r = gfx_v9_0_ngg_init(adev);
1611         if (r)
1612                 return r;
1613
1614         return 0;
1615 }
1616
1617
1618 static int gfx_v9_0_sw_fini(void *handle)
1619 {
1620         int i;
1621         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1622
1623         amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1624         amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1625         amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1626
1627         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1628                 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1629         for (i = 0; i < adev->gfx.num_compute_rings; i++)
1630                 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1631
1632         amdgpu_gfx_compute_mqd_sw_fini(adev);
1633         amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1634         amdgpu_gfx_kiq_fini(adev);
1635
1636         gfx_v9_0_mec_fini(adev);
1637         gfx_v9_0_ngg_fini(adev);
1638         amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1639                                 &adev->gfx.rlc.clear_state_gpu_addr,
1640                                 (void **)&adev->gfx.rlc.cs_ptr);
1641         if (adev->asic_type == CHIP_RAVEN) {
1642                 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
1643                                 &adev->gfx.rlc.cp_table_gpu_addr,
1644                                 (void **)&adev->gfx.rlc.cp_table_ptr);
1645         }
1646         gfx_v9_0_free_microcode(adev);
1647
1648         return 0;
1649 }
1650
1651
1652 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1653 {
1654         /* TODO */
1655 }
1656
1657 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1658 {
1659         u32 data;
1660
1661         if (instance == 0xffffffff)
1662                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1663         else
1664                 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1665
1666         if (se_num == 0xffffffff)
1667                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1668         else
1669                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1670
1671         if (sh_num == 0xffffffff)
1672                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1673         else
1674                 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1675
1676         WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1677 }
1678
1679 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1680 {
1681         u32 data, mask;
1682
1683         data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1684         data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1685
1686         data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1687         data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1688
1689         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1690                                          adev->gfx.config.max_sh_per_se);
1691
1692         return (~data) & mask;
1693 }
1694
1695 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1696 {
1697         int i, j;
1698         u32 data;
1699         u32 active_rbs = 0;
1700         u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1701                                         adev->gfx.config.max_sh_per_se;
1702
1703         mutex_lock(&adev->grbm_idx_mutex);
1704         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1705                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1706                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1707                         data = gfx_v9_0_get_rb_active_bitmap(adev);
1708                         active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1709                                                rb_bitmap_width_per_sh);
1710                 }
1711         }
1712         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1713         mutex_unlock(&adev->grbm_idx_mutex);
1714
1715         adev->gfx.config.backend_enable_mask = active_rbs;
1716         adev->gfx.config.num_rbs = hweight32(active_rbs);
1717 }
1718
1719 #define DEFAULT_SH_MEM_BASES    (0x6000)
1720 #define FIRST_COMPUTE_VMID      (8)
1721 #define LAST_COMPUTE_VMID       (16)
1722 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1723 {
1724         int i;
1725         uint32_t sh_mem_config;
1726         uint32_t sh_mem_bases;
1727
1728         /*
1729          * Configure apertures:
1730          * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1731          * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1732          * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1733          */
1734         sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1735
1736         sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1737                         SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1738                         SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1739
1740         mutex_lock(&adev->srbm_mutex);
1741         for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1742                 soc15_grbm_select(adev, 0, 0, 0, i);
1743                 /* CP and shaders */
1744                 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1745                 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1746         }
1747         soc15_grbm_select(adev, 0, 0, 0, 0);
1748         mutex_unlock(&adev->srbm_mutex);
1749 }
1750
1751 static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
1752 {
1753         u32 tmp;
1754         int i;
1755
1756         WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1757
1758         gfx_v9_0_tiling_mode_table_init(adev);
1759
1760         gfx_v9_0_setup_rb(adev);
1761         gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1762         adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
1763
1764         /* XXX SH_MEM regs */
1765         /* where to put LDS, scratch, GPUVM in FSA64 space */
1766         mutex_lock(&adev->srbm_mutex);
1767         for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
1768                 soc15_grbm_select(adev, 0, 0, 0, i);
1769                 /* CP and shaders */
1770                 if (i == 0) {
1771                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1772                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1773                         WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1774                         WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1775                 } else {
1776                         tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1777                                             SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1778                         WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1779                         tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1780                                 (adev->gmc.private_aperture_start >> 48));
1781                         tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1782                                 (adev->gmc.shared_aperture_start >> 48));
1783                         WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1784                 }
1785         }
1786         soc15_grbm_select(adev, 0, 0, 0, 0);
1787
1788         mutex_unlock(&adev->srbm_mutex);
1789
1790         gfx_v9_0_init_compute_vmid(adev);
1791
1792         mutex_lock(&adev->grbm_idx_mutex);
1793         /*
1794          * making sure that the following register writes will be broadcasted
1795          * to all the shaders
1796          */
1797         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1798
1799         WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
1800                    (adev->gfx.config.sc_prim_fifo_size_frontend <<
1801                         PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1802                    (adev->gfx.config.sc_prim_fifo_size_backend <<
1803                         PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1804                    (adev->gfx.config.sc_hiz_tile_fifo_size <<
1805                         PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1806                    (adev->gfx.config.sc_earlyz_tile_fifo_size <<
1807                         PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
1808         mutex_unlock(&adev->grbm_idx_mutex);
1809
1810 }
1811
1812 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
1813 {
1814         u32 i, j, k;
1815         u32 mask;
1816
1817         mutex_lock(&adev->grbm_idx_mutex);
1818         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1819                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1820                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1821                         for (k = 0; k < adev->usec_timeout; k++) {
1822                                 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
1823                                         break;
1824                                 udelay(1);
1825                         }
1826                         if (k == adev->usec_timeout) {
1827                                 gfx_v9_0_select_se_sh(adev, 0xffffffff,
1828                                                       0xffffffff, 0xffffffff);
1829                                 mutex_unlock(&adev->grbm_idx_mutex);
1830                                 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1831                                          i, j);
1832                                 return;
1833                         }
1834                 }
1835         }
1836         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1837         mutex_unlock(&adev->grbm_idx_mutex);
1838
1839         mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1840                 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1841                 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1842                 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1843         for (k = 0; k < adev->usec_timeout; k++) {
1844                 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1845                         break;
1846                 udelay(1);
1847         }
1848 }
1849
1850 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1851                                                bool enable)
1852 {
1853         u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1854
1855         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1856         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1857         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1858         tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
1859
1860         WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1861 }
1862
1863 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
1864 {
1865         /* csib */
1866         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
1867                         adev->gfx.rlc.clear_state_gpu_addr >> 32);
1868         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
1869                         adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1870         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
1871                         adev->gfx.rlc.clear_state_size);
1872 }
1873
1874 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
1875                                 int indirect_offset,
1876                                 int list_size,
1877                                 int *unique_indirect_regs,
1878                                 int unique_indirect_reg_count,
1879                                 int *indirect_start_offsets,
1880                                 int *indirect_start_offsets_count,
1881                                 int max_start_offsets_count)
1882 {
1883         int idx;
1884
1885         for (; indirect_offset < list_size; indirect_offset++) {
1886                 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
1887                 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
1888                 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
1889
1890                 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
1891                         indirect_offset += 2;
1892
1893                         /* look for the matching indice */
1894                         for (idx = 0; idx < unique_indirect_reg_count; idx++) {
1895                                 if (unique_indirect_regs[idx] ==
1896                                         register_list_format[indirect_offset] ||
1897                                         !unique_indirect_regs[idx])
1898                                         break;
1899                         }
1900
1901                         BUG_ON(idx >= unique_indirect_reg_count);
1902
1903                         if (!unique_indirect_regs[idx])
1904                                 unique_indirect_regs[idx] = register_list_format[indirect_offset];
1905
1906                         indirect_offset++;
1907                 }
1908         }
1909 }
1910
1911 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
1912 {
1913         int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1914         int unique_indirect_reg_count = 0;
1915
1916         int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1917         int indirect_start_offsets_count = 0;
1918
1919         int list_size = 0;
1920         int i = 0, j = 0;
1921         u32 tmp = 0;
1922
1923         u32 *register_list_format =
1924                 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
1925         if (!register_list_format)
1926                 return -ENOMEM;
1927         memcpy(register_list_format, adev->gfx.rlc.register_list_format,
1928                 adev->gfx.rlc.reg_list_format_size_bytes);
1929
1930         /* setup unique_indirect_regs array and indirect_start_offsets array */
1931         unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
1932         gfx_v9_1_parse_ind_reg_list(register_list_format,
1933                                     adev->gfx.rlc.reg_list_format_direct_reg_list_length,
1934                                     adev->gfx.rlc.reg_list_format_size_bytes >> 2,
1935                                     unique_indirect_regs,
1936                                     unique_indirect_reg_count,
1937                                     indirect_start_offsets,
1938                                     &indirect_start_offsets_count,
1939                                     ARRAY_SIZE(indirect_start_offsets));
1940
1941         /* enable auto inc in case it is disabled */
1942         tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1943         tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1944         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1945
1946         /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
1947         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
1948                 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
1949         for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1950                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1951                         adev->gfx.rlc.register_restore[i]);
1952
1953         /* load indirect register */
1954         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1955                 adev->gfx.rlc.reg_list_format_start);
1956
1957         /* direct register portion */
1958         for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
1959                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1960                         register_list_format[i]);
1961
1962         /* indirect register portion */
1963         while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
1964                 if (register_list_format[i] == 0xFFFFFFFF) {
1965                         WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
1966                         continue;
1967                 }
1968
1969                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
1970                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
1971
1972                 for (j = 0; j < unique_indirect_reg_count; j++) {
1973                         if (register_list_format[i] == unique_indirect_regs[j]) {
1974                                 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
1975                                 break;
1976                         }
1977                 }
1978
1979                 BUG_ON(j >= unique_indirect_reg_count);
1980
1981                 i++;
1982         }
1983
1984         /* set save/restore list size */
1985         list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
1986         list_size = list_size >> 1;
1987         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1988                 adev->gfx.rlc.reg_restore_list_size);
1989         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
1990
1991         /* write the starting offsets to RLC scratch ram */
1992         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1993                 adev->gfx.rlc.starting_offsets_start);
1994         for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
1995                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1996                        indirect_start_offsets[i]);
1997
1998         /* load unique indirect regs*/
1999         for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2000                 if (unique_indirect_regs[i] != 0) {
2001                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2002                                + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2003                                unique_indirect_regs[i] & 0x3FFFF);
2004
2005                         WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2006                                + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2007                                unique_indirect_regs[i] >> 20);
2008                 }
2009         }
2010
2011         kfree(register_list_format);
2012         return 0;
2013 }
2014
2015 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2016 {
2017         WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2018 }
2019
2020 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2021                                              bool enable)
2022 {
2023         uint32_t data = 0;
2024         uint32_t default_data = 0;
2025
2026         default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2027         if (enable == true) {
2028                 /* enable GFXIP control over CGPG */
2029                 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2030                 if(default_data != data)
2031                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2032
2033                 /* update status */
2034                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2035                 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2036                 if(default_data != data)
2037                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2038         } else {
2039                 /* restore GFXIP control over GCPG */
2040                 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2041                 if(default_data != data)
2042                         WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2043         }
2044 }
2045
2046 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2047 {
2048         uint32_t data = 0;
2049
2050         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2051                               AMD_PG_SUPPORT_GFX_SMG |
2052                               AMD_PG_SUPPORT_GFX_DMG)) {
2053                 /* init IDLE_POLL_COUNT = 60 */
2054                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2055                 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2056                 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2057                 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2058
2059                 /* init RLC PG Delay */
2060                 data = 0;
2061                 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2062                 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2063                 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2064                 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2065                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2066
2067                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2068                 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2069                 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2070                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2071
2072                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2073                 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2074                 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2075                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2076
2077                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2078                 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2079
2080                 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2081                 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2082                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2083
2084                 pwr_10_0_gfxip_control_over_cgpg(adev, true);
2085         }
2086 }
2087
2088 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2089                                                 bool enable)
2090 {
2091         uint32_t data = 0;
2092         uint32_t default_data = 0;
2093
2094         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2095         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2096                              SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2097                              enable ? 1 : 0);
2098         if (default_data != data)
2099                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2100 }
2101
2102 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2103                                                 bool enable)
2104 {
2105         uint32_t data = 0;
2106         uint32_t default_data = 0;
2107
2108         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2109         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2110                              SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2111                              enable ? 1 : 0);
2112         if(default_data != data)
2113                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2114 }
2115
2116 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2117                                         bool enable)
2118 {
2119         uint32_t data = 0;
2120         uint32_t default_data = 0;
2121
2122         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2123         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2124                              CP_PG_DISABLE,
2125                              enable ? 0 : 1);
2126         if(default_data != data)
2127                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2128 }
2129
2130 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2131                                                 bool enable)
2132 {
2133         uint32_t data, default_data;
2134
2135         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2136         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2137                              GFX_POWER_GATING_ENABLE,
2138                              enable ? 1 : 0);
2139         if(default_data != data)
2140                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2141 }
2142
2143 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2144                                                 bool enable)
2145 {
2146         uint32_t data, default_data;
2147
2148         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2149         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2150                              GFX_PIPELINE_PG_ENABLE,
2151                              enable ? 1 : 0);
2152         if(default_data != data)
2153                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2154
2155         if (!enable)
2156                 /* read any GFX register to wake up GFX */
2157                 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2158 }
2159
2160 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2161                                                        bool enable)
2162 {
2163         uint32_t data, default_data;
2164
2165         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2166         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2167                              STATIC_PER_CU_PG_ENABLE,
2168                              enable ? 1 : 0);
2169         if(default_data != data)
2170                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2171 }
2172
2173 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2174                                                 bool enable)
2175 {
2176         uint32_t data, default_data;
2177
2178         default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2179         data = REG_SET_FIELD(data, RLC_PG_CNTL,
2180                              DYN_PER_CU_PG_ENABLE,
2181                              enable ? 1 : 0);
2182         if(default_data != data)
2183                 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2184 }
2185
2186 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2187 {
2188         gfx_v9_0_init_csb(adev);
2189
2190         /*
2191          * Rlc save restore list is workable since v2_1.
2192          * And it's needed by gfxoff feature.
2193          */
2194         if (adev->gfx.rlc.is_rlc_v2_1) {
2195                 gfx_v9_1_init_rlc_save_restore_list(adev);
2196                 gfx_v9_0_enable_save_restore_machine(adev);
2197         }
2198
2199         if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2200                               AMD_PG_SUPPORT_GFX_SMG |
2201                               AMD_PG_SUPPORT_GFX_DMG |
2202                               AMD_PG_SUPPORT_CP |
2203                               AMD_PG_SUPPORT_GDS |
2204                               AMD_PG_SUPPORT_RLC_SMU_HS)) {
2205                 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2206                        adev->gfx.rlc.cp_table_gpu_addr >> 8);
2207                 gfx_v9_0_init_gfx_power_gating(adev);
2208         }
2209 }
2210
2211 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2212 {
2213         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2214         gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2215         gfx_v9_0_wait_for_rlc_serdes(adev);
2216 }
2217
2218 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2219 {
2220         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2221         udelay(50);
2222         WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2223         udelay(50);
2224 }
2225
2226 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2227 {
2228 #ifdef AMDGPU_RLC_DEBUG_RETRY
2229         u32 rlc_ucode_ver;
2230 #endif
2231
2232         WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2233
2234         /* carrizo do enable cp interrupt after cp inited */
2235         if (!(adev->flags & AMD_IS_APU))
2236                 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2237
2238         udelay(50);
2239
2240 #ifdef AMDGPU_RLC_DEBUG_RETRY
2241         /* RLC_GPM_GENERAL_6 : RLC Ucode version */
2242         rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
2243         if(rlc_ucode_ver == 0x108) {
2244                 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2245                                 rlc_ucode_ver, adev->gfx.rlc_fw_version);
2246                 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2247                  * default is 0x9C4 to create a 100us interval */
2248                 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
2249                 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2250                  * to disable the page fault retry interrupts, default is
2251                  * 0x100 (256) */
2252                 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
2253         }
2254 #endif
2255 }
2256
2257 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
2258 {
2259         const struct rlc_firmware_header_v2_0 *hdr;
2260         const __le32 *fw_data;
2261         unsigned i, fw_size;
2262
2263         if (!adev->gfx.rlc_fw)
2264                 return -EINVAL;
2265
2266         hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2267         amdgpu_ucode_print_rlc_hdr(&hdr->header);
2268
2269         fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2270                            le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2271         fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2272
2273         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2274                         RLCG_UCODE_LOADING_START_ADDRESS);
2275         for (i = 0; i < fw_size; i++)
2276                 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2277         WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2278
2279         return 0;
2280 }
2281
2282 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2283 {
2284         int r;
2285
2286         if (amdgpu_sriov_vf(adev)) {
2287                 gfx_v9_0_init_csb(adev);
2288                 return 0;
2289         }
2290
2291         gfx_v9_0_rlc_stop(adev);
2292
2293         /* disable CG */
2294         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2295
2296         gfx_v9_0_rlc_reset(adev);
2297
2298         gfx_v9_0_init_pg(adev);
2299
2300         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2301                 /* legacy rlc firmware loading */
2302                 r = gfx_v9_0_rlc_load_microcode(adev);
2303                 if (r)
2304                         return r;
2305         }
2306
2307         if (adev->asic_type == CHIP_RAVEN) {
2308                 if (amdgpu_lbpw != 0)
2309                         gfx_v9_0_enable_lbpw(adev, true);
2310                 else
2311                         gfx_v9_0_enable_lbpw(adev, false);
2312         }
2313
2314         gfx_v9_0_rlc_start(adev);
2315
2316         return 0;
2317 }
2318
2319 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2320 {
2321         int i;
2322         u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2323
2324         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2325         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2326         tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2327         if (!enable) {
2328                 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2329                         adev->gfx.gfx_ring[i].ready = false;
2330         }
2331         WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2332         udelay(50);
2333 }
2334
2335 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2336 {
2337         const struct gfx_firmware_header_v1_0 *pfp_hdr;
2338         const struct gfx_firmware_header_v1_0 *ce_hdr;
2339         const struct gfx_firmware_header_v1_0 *me_hdr;
2340         const __le32 *fw_data;
2341         unsigned i, fw_size;
2342
2343         if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2344                 return -EINVAL;
2345
2346         pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2347                 adev->gfx.pfp_fw->data;
2348         ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2349                 adev->gfx.ce_fw->data;
2350         me_hdr = (const struct gfx_firmware_header_v1_0 *)
2351                 adev->gfx.me_fw->data;
2352
2353         amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2354         amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2355         amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2356
2357         gfx_v9_0_cp_gfx_enable(adev, false);
2358
2359         /* PFP */
2360         fw_data = (const __le32 *)
2361                 (adev->gfx.pfp_fw->data +
2362                  le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2363         fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2364         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2365         for (i = 0; i < fw_size; i++)
2366                 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2367         WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2368
2369         /* CE */
2370         fw_data = (const __le32 *)
2371                 (adev->gfx.ce_fw->data +
2372                  le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2373         fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2374         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2375         for (i = 0; i < fw_size; i++)
2376                 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2377         WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2378
2379         /* ME */
2380         fw_data = (const __le32 *)
2381                 (adev->gfx.me_fw->data +
2382                  le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2383         fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2384         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2385         for (i = 0; i < fw_size; i++)
2386                 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2387         WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2388
2389         return 0;
2390 }
2391
2392 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2393 {
2394         struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2395         const struct cs_section_def *sect = NULL;
2396         const struct cs_extent_def *ext = NULL;
2397         int r, i, tmp;
2398
2399         /* init the CP */
2400         WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2401         WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2402
2403         gfx_v9_0_cp_gfx_enable(adev, true);
2404
2405         r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
2406         if (r) {
2407                 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2408                 return r;
2409         }
2410
2411         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2412         amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2413
2414         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2415         amdgpu_ring_write(ring, 0x80000000);
2416         amdgpu_ring_write(ring, 0x80000000);
2417
2418         for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2419                 for (ext = sect->section; ext->extent != NULL; ++ext) {
2420                         if (sect->id == SECT_CONTEXT) {
2421                                 amdgpu_ring_write(ring,
2422                                        PACKET3(PACKET3_SET_CONTEXT_REG,
2423                                                ext->reg_count));
2424                                 amdgpu_ring_write(ring,
2425                                        ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2426                                 for (i = 0; i < ext->reg_count; i++)
2427                                         amdgpu_ring_write(ring, ext->extent[i]);
2428                         }
2429                 }
2430         }
2431
2432         amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2433         amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2434
2435         amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2436         amdgpu_ring_write(ring, 0);
2437
2438         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2439         amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2440         amdgpu_ring_write(ring, 0x8000);
2441         amdgpu_ring_write(ring, 0x8000);
2442
2443         amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
2444         tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
2445                 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
2446         amdgpu_ring_write(ring, tmp);
2447         amdgpu_ring_write(ring, 0);
2448
2449         amdgpu_ring_commit(ring);
2450
2451         return 0;
2452 }
2453
2454 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2455 {
2456         struct amdgpu_ring *ring;
2457         u32 tmp;
2458         u32 rb_bufsz;
2459         u64 rb_addr, rptr_addr, wptr_gpu_addr;
2460
2461         /* Set the write pointer delay */
2462         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2463
2464         /* set the RB to use vmid 0 */
2465         WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2466
2467         /* Set ring buffer size */
2468         ring = &adev->gfx.gfx_ring[0];
2469         rb_bufsz = order_base_2(ring->ring_size / 8);
2470         tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2471         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2472 #ifdef __BIG_ENDIAN
2473         tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2474 #endif
2475         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2476
2477         /* Initialize the ring buffer's write pointers */
2478         ring->wptr = 0;
2479         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2480         WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2481
2482         /* set the wb address wether it's enabled or not */
2483         rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2484         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2485         WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2486
2487         wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2488         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
2489         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
2490
2491         mdelay(1);
2492         WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2493
2494         rb_addr = ring->gpu_addr >> 8;
2495         WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2496         WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2497
2498         tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2499         if (ring->use_doorbell) {
2500                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2501                                     DOORBELL_OFFSET, ring->doorbell_index);
2502                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2503                                     DOORBELL_EN, 1);
2504         } else {
2505                 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
2506         }
2507         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2508
2509         tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2510                         DOORBELL_RANGE_LOWER, ring->doorbell_index);
2511         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2512
2513         WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2514                        CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2515
2516
2517         /* start the ring */
2518         gfx_v9_0_cp_gfx_start(adev);
2519         ring->ready = true;
2520
2521         return 0;
2522 }
2523
2524 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2525 {
2526         int i;
2527
2528         if (enable) {
2529                 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2530         } else {
2531                 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2532                         (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2533                 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2534                         adev->gfx.compute_ring[i].ready = false;
2535                 adev->gfx.kiq.ring.ready = false;
2536         }
2537         udelay(50);
2538 }
2539
2540 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2541 {
2542         const struct gfx_firmware_header_v1_0 *mec_hdr;
2543         const __le32 *fw_data;
2544         unsigned i;
2545         u32 tmp;
2546
2547         if (!adev->gfx.mec_fw)
2548                 return -EINVAL;
2549
2550         gfx_v9_0_cp_compute_enable(adev, false);
2551
2552         mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2553         amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2554
2555         fw_data = (const __le32 *)
2556                 (adev->gfx.mec_fw->data +
2557                  le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2558         tmp = 0;
2559         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2560         tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2561         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2562
2563         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2564                 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
2565         WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2566                 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2567
2568         /* MEC1 */
2569         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2570                          mec_hdr->jt_offset);
2571         for (i = 0; i < mec_hdr->jt_size; i++)
2572                 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2573                         le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2574
2575         WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2576                         adev->gfx.mec_fw_version);
2577         /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2578
2579         return 0;
2580 }
2581
2582 /* KIQ functions */
2583 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
2584 {
2585         uint32_t tmp;
2586         struct amdgpu_device *adev = ring->adev;
2587
2588         /* tell RLC which is KIQ queue */
2589         tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2590         tmp &= 0xffffff00;
2591         tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2592         WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2593         tmp |= 0x80;
2594         WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2595 }
2596
2597 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2598 {
2599         struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2600         uint32_t scratch, tmp = 0;
2601         uint64_t queue_mask = 0;
2602         int r, i;
2603
2604         for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
2605                 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
2606                         continue;
2607
2608                 /* This situation may be hit in the future if a new HW
2609                  * generation exposes more than 64 queues. If so, the
2610                  * definition of queue_mask needs updating */
2611                 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
2612                         DRM_ERROR("Invalid KCQ enabled: %d\n", i);
2613                         break;
2614                 }
2615
2616                 queue_mask |= (1ull << i);
2617         }
2618
2619         r = amdgpu_gfx_scratch_get(adev, &scratch);
2620         if (r) {
2621                 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
2622                 return r;
2623         }
2624         WREG32(scratch, 0xCAFEDEAD);
2625
2626         r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 11);
2627         if (r) {
2628                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2629                 amdgpu_gfx_scratch_free(adev, scratch);
2630                 return r;
2631         }
2632
2633         /* set resources */
2634         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2635         amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2636                           PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
2637         amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
2638         amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
2639         amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
2640         amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
2641         amdgpu_ring_write(kiq_ring, 0); /* oac mask */
2642         amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
2643         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2644                 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2645                 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2646                 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2647
2648                 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2649                 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2650                 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2651                                   PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2652                                   PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2653                                   PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2654                                   PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2655                                   PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2656                                   PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2657                                   PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
2658                                   PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2659                                   PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2660                 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2661                 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2662                 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2663                 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2664                 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2665         }
2666         /* write to scratch for completion */
2667         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2668         amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2669         amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
2670         amdgpu_ring_commit(kiq_ring);
2671
2672         for (i = 0; i < adev->usec_timeout; i++) {
2673                 tmp = RREG32(scratch);
2674                 if (tmp == 0xDEADBEEF)
2675                         break;
2676                 DRM_UDELAY(1);
2677         }
2678         if (i >= adev->usec_timeout) {
2679                 DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n",
2680                           scratch, tmp);
2681                 r = -EINVAL;
2682         }
2683         amdgpu_gfx_scratch_free(adev, scratch);
2684
2685         return r;
2686 }
2687
2688 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2689 {
2690         struct amdgpu_device *adev = ring->adev;
2691         struct v9_mqd *mqd = ring->mqd_ptr;
2692         uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2693         uint32_t tmp;
2694
2695         mqd->header = 0xC0310800;
2696         mqd->compute_pipelinestat_enable = 0x00000001;
2697         mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2698         mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2699         mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2700         mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2701         mqd->compute_misc_reserved = 0x00000003;
2702
2703         mqd->dynamic_cu_mask_addr_lo =
2704                 lower_32_bits(ring->mqd_gpu_addr
2705                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2706         mqd->dynamic_cu_mask_addr_hi =
2707                 upper_32_bits(ring->mqd_gpu_addr
2708                               + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2709
2710         eop_base_addr = ring->eop_gpu_addr >> 8;
2711         mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2712         mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2713
2714         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2715         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2716         tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2717                         (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
2718
2719         mqd->cp_hqd_eop_control = tmp;
2720
2721         /* enable doorbell? */
2722         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2723
2724         if (ring->use_doorbell) {
2725                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2726                                     DOORBELL_OFFSET, ring->doorbell_index);
2727                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2728                                     DOORBELL_EN, 1);
2729                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2730                                     DOORBELL_SOURCE, 0);
2731                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2732                                     DOORBELL_HIT, 0);
2733         } else {
2734                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2735                                          DOORBELL_EN, 0);
2736         }
2737
2738         mqd->cp_hqd_pq_doorbell_control = tmp;
2739
2740         /* disable the queue if it's active */
2741         ring->wptr = 0;
2742         mqd->cp_hqd_dequeue_request = 0;
2743         mqd->cp_hqd_pq_rptr = 0;
2744         mqd->cp_hqd_pq_wptr_lo = 0;
2745         mqd->cp_hqd_pq_wptr_hi = 0;
2746
2747         /* set the pointer to the MQD */
2748         mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2749         mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2750
2751         /* set MQD vmid to 0 */
2752         tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2753         tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2754         mqd->cp_mqd_control = tmp;
2755
2756         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2757         hqd_gpu_addr = ring->gpu_addr >> 8;
2758         mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2759         mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2760
2761         /* set up the HQD, this is similar to CP_RB0_CNTL */
2762         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2763         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2764                             (order_base_2(ring->ring_size / 4) - 1));
2765         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2766                         ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2767 #ifdef __BIG_ENDIAN
2768         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2769 #endif
2770         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2771         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2772         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2773         tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2774         mqd->cp_hqd_pq_control = tmp;
2775
2776         /* set the wb address whether it's enabled or not */
2777         wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2778         mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2779         mqd->cp_hqd_pq_rptr_report_addr_hi =
2780                 upper_32_bits(wb_gpu_addr) & 0xffff;
2781
2782         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2783         wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2784         mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2785         mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2786
2787         tmp = 0;
2788         /* enable the doorbell if requested */
2789         if (ring->use_doorbell) {
2790                 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2791                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2792                                 DOORBELL_OFFSET, ring->doorbell_index);
2793
2794                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2795                                          DOORBELL_EN, 1);
2796                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2797                                          DOORBELL_SOURCE, 0);
2798                 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2799                                          DOORBELL_HIT, 0);
2800         }
2801
2802         mqd->cp_hqd_pq_doorbell_control = tmp;
2803
2804         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2805         ring->wptr = 0;
2806         mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2807
2808         /* set the vmid for the queue */
2809         mqd->cp_hqd_vmid = 0;
2810
2811         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
2812         tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
2813         mqd->cp_hqd_persistent_state = tmp;
2814
2815         /* set MIN_IB_AVAIL_SIZE */
2816         tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
2817         tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
2818         mqd->cp_hqd_ib_control = tmp;
2819
2820         /* activate the queue */
2821         mqd->cp_hqd_active = 1;
2822
2823         return 0;
2824 }
2825
2826 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2827 {
2828         struct amdgpu_device *adev = ring->adev;
2829         struct v9_mqd *mqd = ring->mqd_ptr;
2830         int j;
2831
2832         /* disable wptr polling */
2833         WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2834
2835         WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
2836                mqd->cp_hqd_eop_base_addr_lo);
2837         WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
2838                mqd->cp_hqd_eop_base_addr_hi);
2839
2840         /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2841         WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
2842                mqd->cp_hqd_eop_control);
2843
2844         /* enable doorbell? */
2845         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2846                mqd->cp_hqd_pq_doorbell_control);
2847
2848         /* disable the queue if it's active */
2849         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2850                 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
2851                 for (j = 0; j < adev->usec_timeout; j++) {
2852                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
2853                                 break;
2854                         udelay(1);
2855                 }
2856                 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
2857                        mqd->cp_hqd_dequeue_request);
2858                 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
2859                        mqd->cp_hqd_pq_rptr);
2860                 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2861                        mqd->cp_hqd_pq_wptr_lo);
2862                 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2863                        mqd->cp_hqd_pq_wptr_hi);
2864         }
2865
2866         /* set the pointer to the MQD */
2867         WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
2868                mqd->cp_mqd_base_addr_lo);
2869         WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
2870                mqd->cp_mqd_base_addr_hi);
2871
2872         /* set MQD vmid to 0 */
2873         WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
2874                mqd->cp_mqd_control);
2875
2876         /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2877         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
2878                mqd->cp_hqd_pq_base_lo);
2879         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
2880                mqd->cp_hqd_pq_base_hi);
2881
2882         /* set up the HQD, this is similar to CP_RB0_CNTL */
2883         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
2884                mqd->cp_hqd_pq_control);
2885
2886         /* set the wb address whether it's enabled or not */
2887         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
2888                                 mqd->cp_hqd_pq_rptr_report_addr_lo);
2889         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2890                                 mqd->cp_hqd_pq_rptr_report_addr_hi);
2891
2892         /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2893         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
2894                mqd->cp_hqd_pq_wptr_poll_addr_lo);
2895         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2896                mqd->cp_hqd_pq_wptr_poll_addr_hi);
2897
2898         /* enable the doorbell if requested */
2899         if (ring->use_doorbell) {
2900                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
2901                                         (AMDGPU_DOORBELL64_KIQ *2) << 2);
2902                 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
2903                                         (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
2904         }
2905
2906         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2907                mqd->cp_hqd_pq_doorbell_control);
2908
2909         /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2910         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2911                mqd->cp_hqd_pq_wptr_lo);
2912         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2913                mqd->cp_hqd_pq_wptr_hi);
2914
2915         /* set the vmid for the queue */
2916         WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
2917
2918         WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
2919                mqd->cp_hqd_persistent_state);
2920
2921         /* activate the queue */
2922         WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
2923                mqd->cp_hqd_active);
2924
2925         if (ring->use_doorbell)
2926                 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2927
2928         return 0;
2929 }
2930
2931 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
2932 {
2933         struct amdgpu_device *adev = ring->adev;
2934         int j;
2935
2936         /* disable the queue if it's active */
2937         if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2938
2939                 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
2940
2941                 for (j = 0; j < adev->usec_timeout; j++) {
2942                         if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
2943                                 break;
2944                         udelay(1);
2945                 }
2946
2947                 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
2948                         DRM_DEBUG("KIQ dequeue request failed.\n");
2949
2950                         /* Manual disable if dequeue request times out */
2951                         WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
2952                 }
2953
2954                 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
2955                       0);
2956         }
2957
2958         WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
2959         WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
2960         WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
2961         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
2962         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
2963         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
2964         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
2965         WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
2966
2967         return 0;
2968 }
2969
2970 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
2971 {
2972         struct amdgpu_device *adev = ring->adev;
2973         struct v9_mqd *mqd = ring->mqd_ptr;
2974         int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
2975
2976         gfx_v9_0_kiq_setting(ring);
2977
2978         if (adev->in_gpu_reset) { /* for GPU_RESET case */
2979                 /* reset MQD to a clean status */
2980                 if (adev->gfx.mec.mqd_backup[mqd_idx])
2981                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2982
2983                 /* reset ring buffer */
2984                 ring->wptr = 0;
2985                 amdgpu_ring_clear_ring(ring);
2986
2987                 mutex_lock(&adev->srbm_mutex);
2988                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2989                 gfx_v9_0_kiq_init_register(ring);
2990                 soc15_grbm_select(adev, 0, 0, 0, 0);
2991                 mutex_unlock(&adev->srbm_mutex);
2992         } else {
2993                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2994                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2995                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2996                 mutex_lock(&adev->srbm_mutex);
2997                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2998                 gfx_v9_0_mqd_init(ring);
2999                 gfx_v9_0_kiq_init_register(ring);
3000                 soc15_grbm_select(adev, 0, 0, 0, 0);
3001                 mutex_unlock(&adev->srbm_mutex);
3002
3003                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3004                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3005         }
3006
3007         return 0;
3008 }
3009
3010 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3011 {
3012         struct amdgpu_device *adev = ring->adev;
3013         struct v9_mqd *mqd = ring->mqd_ptr;
3014         int mqd_idx = ring - &adev->gfx.compute_ring[0];
3015
3016         if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
3017                 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3018                 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3019                 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3020                 mutex_lock(&adev->srbm_mutex);
3021                 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3022                 gfx_v9_0_mqd_init(ring);
3023                 soc15_grbm_select(adev, 0, 0, 0, 0);
3024                 mutex_unlock(&adev->srbm_mutex);
3025
3026                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3027                         memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3028         } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3029                 /* reset MQD to a clean status */
3030                 if (adev->gfx.mec.mqd_backup[mqd_idx])
3031                         memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3032
3033                 /* reset ring buffer */
3034                 ring->wptr = 0;
3035                 amdgpu_ring_clear_ring(ring);
3036         } else {
3037                 amdgpu_ring_clear_ring(ring);
3038         }
3039
3040         return 0;
3041 }
3042
3043 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3044 {
3045         struct amdgpu_ring *ring = NULL;
3046         int r = 0, i;
3047
3048         gfx_v9_0_cp_compute_enable(adev, true);
3049
3050         ring = &adev->gfx.kiq.ring;
3051
3052         r = amdgpu_bo_reserve(ring->mqd_obj, false);
3053         if (unlikely(r != 0))
3054                 goto done;
3055
3056         r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3057         if (!r) {
3058                 r = gfx_v9_0_kiq_init_queue(ring);
3059                 amdgpu_bo_kunmap(ring->mqd_obj);
3060                 ring->mqd_ptr = NULL;
3061         }
3062         amdgpu_bo_unreserve(ring->mqd_obj);
3063         if (r)
3064                 goto done;
3065
3066         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3067                 ring = &adev->gfx.compute_ring[i];
3068
3069                 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3070                 if (unlikely(r != 0))
3071                         goto done;
3072                 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3073                 if (!r) {
3074                         r = gfx_v9_0_kcq_init_queue(ring);
3075                         amdgpu_bo_kunmap(ring->mqd_obj);
3076                         ring->mqd_ptr = NULL;
3077                 }
3078                 amdgpu_bo_unreserve(ring->mqd_obj);
3079                 if (r)
3080                         goto done;
3081         }
3082
3083         r = gfx_v9_0_kiq_kcq_enable(adev);
3084 done:
3085         return r;
3086 }
3087
3088 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3089 {
3090         int r, i;
3091         struct amdgpu_ring *ring;
3092
3093         if (!(adev->flags & AMD_IS_APU))
3094                 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3095
3096         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3097                 /* legacy firmware loading */
3098                 r = gfx_v9_0_cp_gfx_load_microcode(adev);
3099                 if (r)
3100                         return r;
3101
3102                 r = gfx_v9_0_cp_compute_load_microcode(adev);
3103                 if (r)
3104                         return r;
3105         }
3106
3107         r = gfx_v9_0_cp_gfx_resume(adev);
3108         if (r)
3109                 return r;
3110
3111         r = gfx_v9_0_kiq_resume(adev);
3112         if (r)
3113                 return r;
3114
3115         ring = &adev->gfx.gfx_ring[0];
3116         r = amdgpu_ring_test_ring(ring);
3117         if (r) {
3118                 ring->ready = false;
3119                 return r;
3120         }
3121
3122         ring = &adev->gfx.kiq.ring;
3123         ring->ready = true;
3124         r = amdgpu_ring_test_ring(ring);
3125         if (r)
3126                 ring->ready = false;
3127
3128         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3129                 ring = &adev->gfx.compute_ring[i];
3130
3131                 ring->ready = true;
3132                 r = amdgpu_ring_test_ring(ring);
3133                 if (r)
3134                         ring->ready = false;
3135         }
3136
3137         gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3138
3139         return 0;
3140 }
3141
3142 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3143 {
3144         gfx_v9_0_cp_gfx_enable(adev, enable);
3145         gfx_v9_0_cp_compute_enable(adev, enable);
3146 }
3147
3148 static int gfx_v9_0_hw_init(void *handle)
3149 {
3150         int r;
3151         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3152
3153         gfx_v9_0_init_golden_registers(adev);
3154
3155         gfx_v9_0_gpu_init(adev);
3156
3157         r = gfx_v9_0_csb_vram_pin(adev);
3158         if (r)
3159                 return r;
3160
3161         r = gfx_v9_0_rlc_resume(adev);
3162         if (r)
3163                 return r;
3164
3165         r = gfx_v9_0_cp_resume(adev);
3166         if (r)
3167                 return r;
3168
3169         r = gfx_v9_0_ngg_en(adev);
3170         if (r)
3171                 return r;
3172
3173         return r;
3174 }
3175
3176 static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
3177 {
3178         struct amdgpu_device *adev = kiq_ring->adev;
3179         uint32_t scratch, tmp = 0;
3180         int r, i;
3181
3182         r = amdgpu_gfx_scratch_get(adev, &scratch);
3183         if (r) {
3184                 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
3185                 return r;
3186         }
3187         WREG32(scratch, 0xCAFEDEAD);
3188
3189         r = amdgpu_ring_alloc(kiq_ring, 10);
3190         if (r) {
3191                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3192                 amdgpu_gfx_scratch_free(adev, scratch);
3193                 return r;
3194         }
3195
3196         /* unmap queues */
3197         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
3198         amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
3199                                                 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
3200                                                 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
3201                                                 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
3202                                                 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
3203         amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
3204         amdgpu_ring_write(kiq_ring, 0);
3205         amdgpu_ring_write(kiq_ring, 0);
3206         amdgpu_ring_write(kiq_ring, 0);
3207         /* write to scratch for completion */
3208         amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
3209         amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
3210         amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
3211         amdgpu_ring_commit(kiq_ring);
3212
3213         for (i = 0; i < adev->usec_timeout; i++) {
3214                 tmp = RREG32(scratch);
3215                 if (tmp == 0xDEADBEEF)
3216                         break;
3217                 DRM_UDELAY(1);
3218         }
3219         if (i >= adev->usec_timeout) {
3220                 DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
3221                 r = -EINVAL;
3222         }
3223         amdgpu_gfx_scratch_free(adev, scratch);
3224         return r;
3225 }
3226
3227 static int gfx_v9_0_hw_fini(void *handle)
3228 {
3229         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3230         int i;
3231
3232         amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
3233                                                AMD_PG_STATE_UNGATE);
3234
3235         amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3236         amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3237
3238         /* disable KCQ to avoid CPC touch memory not valid anymore */
3239         for (i = 0; i < adev->gfx.num_compute_rings; i++)
3240                 gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
3241
3242         if (amdgpu_sriov_vf(adev)) {
3243                 gfx_v9_0_cp_gfx_enable(adev, false);
3244                 /* must disable polling for SRIOV when hw finished, otherwise
3245                  * CPC engine may still keep fetching WB address which is already
3246                  * invalid after sw finished and trigger DMAR reading error in
3247                  * hypervisor side.
3248                  */
3249                 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3250                 return 0;
3251         }
3252
3253         /* Use deinitialize sequence from CAIL when unbinding device from driver,
3254          * otherwise KIQ is hanging when binding back
3255          */
3256         if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
3257                 mutex_lock(&adev->srbm_mutex);
3258                 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3259                                 adev->gfx.kiq.ring.pipe,
3260                                 adev->gfx.kiq.ring.queue, 0);
3261                 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3262                 soc15_grbm_select(adev, 0, 0, 0, 0);
3263                 mutex_unlock(&adev->srbm_mutex);
3264         }
3265
3266         gfx_v9_0_cp_enable(adev, false);
3267         gfx_v9_0_rlc_stop(adev);
3268
3269         gfx_v9_0_csb_vram_unpin(adev);
3270
3271         return 0;
3272 }
3273
3274 static int gfx_v9_0_suspend(void *handle)
3275 {
3276         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3277
3278         adev->gfx.in_suspend = true;
3279         return gfx_v9_0_hw_fini(adev);
3280 }
3281
3282 static int gfx_v9_0_resume(void *handle)
3283 {
3284         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3285         int r;
3286
3287         r = gfx_v9_0_hw_init(adev);
3288         adev->gfx.in_suspend = false;
3289         return r;
3290 }
3291
3292 static bool gfx_v9_0_is_idle(void *handle)
3293 {
3294         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3295
3296         if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3297                                 GRBM_STATUS, GUI_ACTIVE))
3298                 return false;
3299         else
3300                 return true;
3301 }
3302
3303 static int gfx_v9_0_wait_for_idle(void *handle)
3304 {
3305         unsigned i;
3306         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3307
3308         for (i = 0; i < adev->usec_timeout; i++) {
3309                 if (gfx_v9_0_is_idle(handle))
3310                         return 0;
3311                 udelay(1);
3312         }
3313         return -ETIMEDOUT;
3314 }
3315
3316 static int gfx_v9_0_soft_reset(void *handle)
3317 {
3318         u32 grbm_soft_reset = 0;
3319         u32 tmp;
3320         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3321
3322         /* GRBM_STATUS */
3323         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3324         if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3325                    GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3326                    GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
3327                    GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
3328                    GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
3329                    GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
3330                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3331                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3332                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3333                                                 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
3334         }
3335
3336         if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3337                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3338                                                 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3339         }
3340
3341         /* GRBM_STATUS2 */
3342         tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3343         if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3344                 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3345                                                 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3346
3347
3348         if (grbm_soft_reset) {
3349                 /* stop the rlc */
3350                 gfx_v9_0_rlc_stop(adev);
3351
3352                 /* Disable GFX parsing/prefetching */
3353                 gfx_v9_0_cp_gfx_enable(adev, false);
3354
3355                 /* Disable MEC parsing/prefetching */
3356                 gfx_v9_0_cp_compute_enable(adev, false);
3357
3358                 if (grbm_soft_reset) {
3359                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3360                         tmp |= grbm_soft_reset;
3361                         dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3362                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3363                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3364
3365                         udelay(50);
3366
3367                         tmp &= ~grbm_soft_reset;
3368                         WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3369                         tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3370                 }
3371
3372                 /* Wait a little for things to settle down */
3373                 udelay(50);
3374         }
3375         return 0;
3376 }
3377
3378 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3379 {
3380         uint64_t clock;
3381
3382         mutex_lock(&adev->gfx.gpu_clock_mutex);
3383         WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3384         clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3385                 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3386         mutex_unlock(&adev->gfx.gpu_clock_mutex);
3387         return clock;
3388 }
3389
3390 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3391                                           uint32_t vmid,
3392                                           uint32_t gds_base, uint32_t gds_size,
3393                                           uint32_t gws_base, uint32_t gws_size,
3394                                           uint32_t oa_base, uint32_t oa_size)
3395 {
3396         struct amdgpu_device *adev = ring->adev;
3397
3398         gds_base = gds_base >> AMDGPU_GDS_SHIFT;
3399         gds_size = gds_size >> AMDGPU_GDS_SHIFT;
3400
3401         gws_base = gws_base >> AMDGPU_GWS_SHIFT;
3402         gws_size = gws_size >> AMDGPU_GWS_SHIFT;
3403
3404         oa_base = oa_base >> AMDGPU_OA_SHIFT;
3405         oa_size = oa_size >> AMDGPU_OA_SHIFT;
3406
3407         /* GDS Base */
3408         gfx_v9_0_write_data_to_reg(ring, 0, false,
3409                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3410                                    gds_base);
3411
3412         /* GDS Size */
3413         gfx_v9_0_write_data_to_reg(ring, 0, false,
3414                                    SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3415                                    gds_size);
3416
3417         /* GWS */
3418         gfx_v9_0_write_data_to_reg(ring, 0, false,
3419                                    SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3420                                    gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3421
3422         /* OA */
3423         gfx_v9_0_write_data_to_reg(ring, 0, false,
3424                                    SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3425                                    (1 << (oa_size + oa_base)) - (1 << oa_base));
3426 }
3427
3428 static int gfx_v9_0_early_init(void *handle)
3429 {
3430         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3431
3432         adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
3433         adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3434         gfx_v9_0_set_ring_funcs(adev);
3435         gfx_v9_0_set_irq_funcs(adev);
3436         gfx_v9_0_set_gds_init(adev);
3437         gfx_v9_0_set_rlc_funcs(adev);
3438
3439         return 0;
3440 }
3441
3442 static int gfx_v9_0_late_init(void *handle)
3443 {
3444         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3445         int r;
3446
3447         r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3448         if (r)
3449                 return r;
3450
3451         r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3452         if (r)
3453                 return r;
3454
3455         return 0;
3456 }
3457
3458 static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3459 {
3460         uint32_t rlc_setting, data;
3461         unsigned i;
3462
3463         if (adev->gfx.rlc.in_safe_mode)
3464                 return;
3465
3466         /* if RLC is not enabled, do nothing */
3467         rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3468         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3469                 return;
3470
3471         if (adev->cg_flags &
3472             (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
3473              AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3474                 data = RLC_SAFE_MODE__CMD_MASK;
3475                 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3476                 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3477
3478                 /* wait for RLC_SAFE_MODE */
3479                 for (i = 0; i < adev->usec_timeout; i++) {
3480                         if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3481                                 break;
3482                         udelay(1);
3483                 }
3484                 adev->gfx.rlc.in_safe_mode = true;
3485         }
3486 }
3487
3488 static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3489 {
3490         uint32_t rlc_setting, data;
3491
3492         if (!adev->gfx.rlc.in_safe_mode)
3493                 return;
3494
3495         /* if RLC is not enabled, do nothing */
3496         rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3497         if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3498                 return;
3499
3500         if (adev->cg_flags &
3501             (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
3502                 /*
3503                  * Try to exit safe mode only if it is already in safe
3504                  * mode.
3505                  */
3506                 data = RLC_SAFE_MODE__CMD_MASK;
3507                 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3508                 adev->gfx.rlc.in_safe_mode = false;
3509         }
3510 }
3511
3512 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3513                                                 bool enable)
3514 {
3515         gfx_v9_0_enter_rlc_safe_mode(adev);
3516
3517         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
3518                 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
3519                 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
3520                         gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
3521         } else {
3522                 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
3523                 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
3524         }
3525
3526         gfx_v9_0_exit_rlc_safe_mode(adev);
3527 }
3528
3529 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
3530                                                 bool enable)
3531 {
3532         /* TODO: double check if we need to perform under safe mode */
3533         /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3534
3535         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
3536                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
3537         else
3538                 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
3539
3540         if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
3541                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
3542         else
3543                 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
3544
3545         /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3546 }
3547
3548 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3549                                                       bool enable)
3550 {
3551         uint32_t data, def;
3552
3553         /* It is disabled by HW by default */
3554         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3555                 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
3556                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3557
3558                 if (adev->asic_type != CHIP_VEGA12)
3559                         data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
3560
3561                 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3562                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3563                           RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3564
3565                 /* only for Vega10 & Raven1 */
3566                 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3567
3568                 if (def != data)
3569                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3570
3571                 /* MGLS is a global flag to control all MGLS in GFX */
3572                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3573                         /* 2 - RLC memory Light sleep */
3574                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3575                                 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3576                                 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3577                                 if (def != data)
3578                                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3579                         }
3580                         /* 3 - CP memory Light sleep */
3581                         if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3582                                 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3583                                 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3584                                 if (def != data)
3585                                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3586                         }
3587                 }
3588         } else {
3589                 /* 1 - MGCG_OVERRIDE */
3590                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3591
3592                 if (adev->asic_type != CHIP_VEGA12)
3593                         data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
3594
3595                 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3596                          RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3597                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3598                          RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3599
3600                 if (def != data)
3601                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3602
3603                 /* 2 - disable MGLS in RLC */
3604                 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3605                 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3606                         data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3607                         WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3608                 }
3609
3610                 /* 3 - disable MGLS in CP */
3611                 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3612                 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3613                         data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3614                         WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3615                 }
3616         }
3617 }
3618
3619 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
3620                                            bool enable)
3621 {
3622         uint32_t data, def;
3623
3624         adev->gfx.rlc.funcs->enter_safe_mode(adev);
3625
3626         /* Enable 3D CGCG/CGLS */
3627         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3628                 /* write cmd to clear cgcg/cgls ov */
3629                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3630                 /* unset CGCG override */
3631                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3632                 /* update CGCG and CGLS override bits */
3633                 if (def != data)
3634                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3635                 /* enable 3Dcgcg FSM(0x0020003f) */
3636                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3637                 data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3638                         RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3639                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3640                         data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3641                                 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3642                 if (def != data)
3643                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3644
3645                 /* set IDLE_POLL_COUNT(0x00900100) */
3646                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3647                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3648                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3649                 if (def != data)
3650                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3651         } else {
3652                 /* Disable CGCG/CGLS */
3653                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3654                 /* disable cgcg, cgls should be disabled */
3655                 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3656                           RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3657                 /* disable cgcg and cgls in FSM */
3658                 if (def != data)
3659                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3660         }
3661
3662         adev->gfx.rlc.funcs->exit_safe_mode(adev);
3663 }
3664
3665 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3666                                                       bool enable)
3667 {
3668         uint32_t def, data;
3669
3670         adev->gfx.rlc.funcs->enter_safe_mode(adev);
3671
3672         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3673                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3674                 /* unset CGCG override */
3675                 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3676                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3677                         data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3678                 else
3679                         data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3680                 /* update CGCG and CGLS override bits */
3681                 if (def != data)
3682                         WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3683
3684                 /* enable cgcg FSM(0x0020003F) */
3685                 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3686                 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3687                         RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3688                 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3689                         data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3690                                 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3691                 if (def != data)
3692                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3693
3694                 /* set IDLE_POLL_COUNT(0x00900100) */
3695                 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3696                 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3697                         (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3698                 if (def != data)
3699                         WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3700         } else {
3701                 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3702                 /* reset CGCG/CGLS bits */
3703                 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3704                 /* disable cgcg and cgls in FSM */
3705                 if (def != data)
3706                         WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3707         }
3708
3709         adev->gfx.rlc.funcs->exit_safe_mode(adev);
3710 }
3711
3712 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3713                                             bool enable)
3714 {
3715         if (enable) {
3716                 /* CGCG/CGLS should be enabled after MGCG/MGLS
3717                  * ===  MGCG + MGLS ===
3718                  */
3719                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3720                 /* ===  CGCG /CGLS for GFX 3D Only === */
3721                 gfx_v9_0_update_3d_clock_gating(adev, enable);
3722                 /* ===  CGCG + CGLS === */
3723                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3724         } else {
3725                 /* CGCG/CGLS should be disabled before MGCG/MGLS
3726                  * ===  CGCG + CGLS ===
3727                  */
3728                 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3729                 /* ===  CGCG /CGLS for GFX 3D Only === */
3730                 gfx_v9_0_update_3d_clock_gating(adev, enable);
3731                 /* ===  MGCG + MGLS === */
3732                 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3733         }
3734         return 0;
3735 }
3736
3737 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
3738         .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
3739         .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
3740 };
3741
3742 static int gfx_v9_0_set_powergating_state(void *handle,
3743                                           enum amd_powergating_state state)
3744 {
3745         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3746         bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
3747
3748         switch (adev->asic_type) {
3749         case CHIP_RAVEN:
3750                 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
3751                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
3752                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
3753                 } else {
3754                         gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
3755                         gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
3756                 }
3757
3758                 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
3759                         gfx_v9_0_enable_cp_power_gating(adev, true);
3760                 else
3761                         gfx_v9_0_enable_cp_power_gating(adev, false);
3762
3763                 /* update gfx cgpg state */
3764                 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
3765
3766                 /* update mgcg state */
3767                 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
3768
3769                 /* set gfx off through smu */
3770                 if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
3771                         amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
3772                 break;
3773         default:
3774                 break;
3775         }
3776
3777         return 0;
3778 }
3779
3780 static int gfx_v9_0_set_clockgating_state(void *handle,
3781                                           enum amd_clockgating_state state)
3782 {
3783         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3784
3785         if (amdgpu_sriov_vf(adev))
3786                 return 0;
3787
3788         switch (adev->asic_type) {
3789         case CHIP_VEGA10:
3790         case CHIP_VEGA12:
3791         case CHIP_VEGA20:
3792         case CHIP_RAVEN:
3793                 gfx_v9_0_update_gfx_clock_gating(adev,
3794                                                  state == AMD_CG_STATE_GATE ? true : false);
3795                 break;
3796         default:
3797                 break;
3798         }
3799         return 0;
3800 }
3801
3802 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
3803 {
3804         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3805         int data;
3806
3807         if (amdgpu_sriov_vf(adev))
3808                 *flags = 0;
3809
3810         /* AMD_CG_SUPPORT_GFX_MGCG */
3811         data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3812         if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3813                 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
3814
3815         /* AMD_CG_SUPPORT_GFX_CGCG */
3816         data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3817         if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3818                 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
3819
3820         /* AMD_CG_SUPPORT_GFX_CGLS */
3821         if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3822                 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
3823
3824         /* AMD_CG_SUPPORT_GFX_RLC_LS */
3825         data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3826         if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
3827                 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
3828
3829         /* AMD_CG_SUPPORT_GFX_CP_LS */
3830         data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3831         if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
3832                 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
3833
3834         /* AMD_CG_SUPPORT_GFX_3D_CGCG */
3835         data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3836         if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
3837                 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
3838
3839         /* AMD_CG_SUPPORT_GFX_3D_CGLS */
3840         if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
3841                 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
3842 }
3843
3844 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
3845 {
3846         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
3847 }
3848
3849 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
3850 {
3851         struct amdgpu_device *adev = ring->adev;
3852         u64 wptr;
3853
3854         /* XXX check if swapping is necessary on BE */
3855         if (ring->use_doorbell) {
3856                 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
3857         } else {
3858                 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
3859                 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
3860         }
3861
3862         return wptr;
3863 }
3864
3865 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
3866 {
3867         struct amdgpu_device *adev = ring->adev;
3868
3869         if (ring->use_doorbell) {
3870                 /* XXX check if swapping is necessary on BE */
3871                 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3872                 WDOORBELL64(ring->doorbell_index, ring->wptr);
3873         } else {
3874                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3875                 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3876         }
3877 }
3878
3879 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3880 {
3881         struct amdgpu_device *adev = ring->adev;
3882         u32 ref_and_mask, reg_mem_engine;
3883         const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
3884
3885         if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3886                 switch (ring->me) {
3887                 case 1:
3888                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
3889                         break;
3890                 case 2:
3891                         ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
3892                         break;
3893                 default:
3894                         return;
3895                 }
3896                 reg_mem_engine = 0;
3897         } else {
3898                 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
3899                 reg_mem_engine = 1; /* pfp */
3900         }
3901
3902         gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
3903                               adev->nbio_funcs->get_hdp_flush_req_offset(adev),
3904                               adev->nbio_funcs->get_hdp_flush_done_offset(adev),
3905                               ref_and_mask, ref_and_mask, 0x20);
3906 }
3907
3908 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3909                                       struct amdgpu_ib *ib,
3910                                       unsigned vmid, bool ctx_switch)
3911 {
3912         u32 header, control = 0;
3913
3914         if (ib->flags & AMDGPU_IB_FLAG_CE)
3915                 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3916         else
3917                 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3918
3919         control |= ib->length_dw | (vmid << 24);
3920
3921         if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
3922                 control |= INDIRECT_BUFFER_PRE_ENB(1);
3923
3924                 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
3925                         gfx_v9_0_ring_emit_de_meta(ring);
3926         }
3927
3928         amdgpu_ring_write(ring, header);
3929         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3930         amdgpu_ring_write(ring,
3931 #ifdef __BIG_ENDIAN
3932                 (2 << 0) |
3933 #endif
3934                 lower_32_bits(ib->gpu_addr));
3935         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3936         amdgpu_ring_write(ring, control);
3937 }
3938
3939 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3940                                           struct amdgpu_ib *ib,
3941                                           unsigned vmid, bool ctx_switch)
3942 {
3943         u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
3944
3945         amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3946         BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3947         amdgpu_ring_write(ring,
3948 #ifdef __BIG_ENDIAN
3949                                 (2 << 0) |
3950 #endif
3951                                 lower_32_bits(ib->gpu_addr));
3952         amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3953         amdgpu_ring_write(ring, control);
3954 }
3955
3956 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3957                                      u64 seq, unsigned flags)
3958 {
3959         bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3960         bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3961         bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
3962
3963         /* RELEASE_MEM - flush caches, send int */
3964         amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3965         amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
3966                                                EOP_TC_NC_ACTION_EN) :
3967                                               (EOP_TCL1_ACTION_EN |
3968                                                EOP_TC_ACTION_EN |
3969                                                EOP_TC_WB_ACTION_EN |
3970                                                EOP_TC_MD_ACTION_EN)) |
3971                                  EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3972                                  EVENT_INDEX(5)));
3973         amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
3974
3975         /*
3976          * the address should be Qword aligned if 64bit write, Dword
3977          * aligned if only send 32bit data low (discard data high)
3978          */
3979         if (write64bit)
3980                 BUG_ON(addr & 0x7);
3981         else
3982                 BUG_ON(addr & 0x3);
3983         amdgpu_ring_write(ring, lower_32_bits(addr));
3984         amdgpu_ring_write(ring, upper_32_bits(addr));
3985         amdgpu_ring_write(ring, lower_32_bits(seq));
3986         amdgpu_ring_write(ring, upper_32_bits(seq));
3987         amdgpu_ring_write(ring, 0);
3988 }
3989
3990 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3991 {
3992         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3993         uint32_t seq = ring->fence_drv.sync_seq;
3994         uint64_t addr = ring->fence_drv.gpu_addr;
3995
3996         gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
3997                               lower_32_bits(addr), upper_32_bits(addr),
3998                               seq, 0xffffffff, 4);
3999 }
4000
4001 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4002                                         unsigned vmid, uint64_t pd_addr)
4003 {
4004         amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4005
4006         /* compute doesn't have PFP */
4007         if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4008                 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4009                 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4010                 amdgpu_ring_write(ring, 0x0);
4011         }
4012 }
4013
4014 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4015 {
4016         return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
4017 }
4018
4019 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4020 {
4021         u64 wptr;
4022
4023         /* XXX check if swapping is necessary on BE */
4024         if (ring->use_doorbell)
4025                 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
4026         else
4027                 BUG();
4028         return wptr;
4029 }
4030
4031 static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
4032                                            bool acquire)
4033 {
4034         struct amdgpu_device *adev = ring->adev;
4035         int pipe_num, tmp, reg;
4036         int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
4037
4038         pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
4039
4040         /* first me only has 2 entries, GFX and HP3D */
4041         if (ring->me > 0)
4042                 pipe_num -= 2;
4043
4044         reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
4045         tmp = RREG32(reg);
4046         tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
4047         WREG32(reg, tmp);
4048 }
4049
4050 static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
4051                                             struct amdgpu_ring *ring,
4052                                             bool acquire)
4053 {
4054         int i, pipe;
4055         bool reserve;
4056         struct amdgpu_ring *iring;
4057
4058         mutex_lock(&adev->gfx.pipe_reserve_mutex);
4059         pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
4060         if (acquire)
4061                 set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4062         else
4063                 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4064
4065         if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
4066                 /* Clear all reservations - everyone reacquires all resources */
4067                 for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
4068                         gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
4069                                                        true);
4070
4071                 for (i = 0; i < adev->gfx.num_compute_rings; ++i)
4072                         gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
4073                                                        true);
4074         } else {
4075                 /* Lower all pipes without a current reservation */
4076                 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
4077                         iring = &adev->gfx.gfx_ring[i];
4078                         pipe = amdgpu_gfx_queue_to_bit(adev,
4079                                                        iring->me,
4080                                                        iring->pipe,
4081                                                        0);
4082                         reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4083                         gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4084                 }
4085
4086                 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
4087                         iring = &adev->gfx.compute_ring[i];
4088                         pipe = amdgpu_gfx_queue_to_bit(adev,
4089                                                        iring->me,
4090                                                        iring->pipe,
4091                                                        0);
4092                         reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4093                         gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4094                 }
4095         }
4096
4097         mutex_unlock(&adev->gfx.pipe_reserve_mutex);
4098 }
4099
4100 static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
4101                                       struct amdgpu_ring *ring,
4102                                       bool acquire)
4103 {
4104         uint32_t pipe_priority = acquire ? 0x2 : 0x0;
4105         uint32_t queue_priority = acquire ? 0xf : 0x0;
4106
4107         mutex_lock(&adev->srbm_mutex);
4108         soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4109
4110         WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
4111         WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
4112
4113         soc15_grbm_select(adev, 0, 0, 0, 0);
4114         mutex_unlock(&adev->srbm_mutex);
4115 }
4116
4117 static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
4118                                                enum drm_sched_priority priority)
4119 {
4120         struct amdgpu_device *adev = ring->adev;
4121         bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
4122
4123         if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
4124                 return;
4125
4126         gfx_v9_0_hqd_set_priority(adev, ring, acquire);
4127         gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
4128 }
4129
4130 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4131 {
4132         struct amdgpu_device *adev = ring->adev;
4133
4134         /* XXX check if swapping is necessary on BE */
4135         if (ring->use_doorbell) {
4136                 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4137                 WDOORBELL64(ring->doorbell_index, ring->wptr);
4138         } else{
4139                 BUG(); /* only DOORBELL method supported on gfx9 now */
4140         }
4141 }
4142
4143 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4144                                          u64 seq, unsigned int flags)
4145 {
4146         struct amdgpu_device *adev = ring->adev;
4147
4148         /* we only allocate 32bit for each seq wb address */
4149         BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4150
4151         /* write fence seq to the "addr" */
4152         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4153         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4154                                  WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4155         amdgpu_ring_write(ring, lower_32_bits(addr));
4156         amdgpu_ring_write(ring, upper_32_bits(addr));
4157         amdgpu_ring_write(ring, lower_32_bits(seq));
4158
4159         if (flags & AMDGPU_FENCE_FLAG_INT) {
4160                 /* set register to trigger INT */
4161                 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4162                 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4163                                          WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4164                 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4165                 amdgpu_ring_write(ring, 0);
4166                 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
4167         }
4168 }
4169
4170 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
4171 {
4172         amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4173         amdgpu_ring_write(ring, 0);
4174 }
4175
4176 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
4177 {
4178         struct v9_ce_ib_state ce_payload = {0};
4179         uint64_t csa_addr;
4180         int cnt;
4181
4182         cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
4183         csa_addr = amdgpu_csa_vaddr(ring->adev);
4184
4185         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4186         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4187                                  WRITE_DATA_DST_SEL(8) |
4188                                  WR_CONFIRM) |
4189                                  WRITE_DATA_CACHE_POLICY(0));
4190         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4191         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4192         amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
4193 }
4194
4195 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
4196 {
4197         struct v9_de_ib_state de_payload = {0};
4198         uint64_t csa_addr, gds_addr;
4199         int cnt;
4200
4201         csa_addr = amdgpu_csa_vaddr(ring->adev);
4202         gds_addr = csa_addr + 4096;
4203         de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4204         de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4205
4206         cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4207         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4208         amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4209                                  WRITE_DATA_DST_SEL(8) |
4210                                  WR_CONFIRM) |
4211                                  WRITE_DATA_CACHE_POLICY(0));
4212         amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4213         amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4214         amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
4215 }
4216
4217 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4218 {
4219         amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4220         amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
4221 }
4222
4223 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4224 {
4225         uint32_t dw2 = 0;
4226
4227         if (amdgpu_sriov_vf(ring->adev))
4228                 gfx_v9_0_ring_emit_ce_meta(ring);
4229
4230         gfx_v9_0_ring_emit_tmz(ring, true);
4231
4232         dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4233         if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4234                 /* set load_global_config & load_global_uconfig */
4235                 dw2 |= 0x8001;
4236                 /* set load_cs_sh_regs */
4237                 dw2 |= 0x01000000;
4238                 /* set load_per_context_state & load_gfx_sh_regs for GFX */
4239                 dw2 |= 0x10002;
4240
4241                 /* set load_ce_ram if preamble presented */
4242                 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4243                         dw2 |= 0x10000000;
4244         } else {
4245                 /* still load_ce_ram if this is the first time preamble presented
4246                  * although there is no context switch happens.
4247                  */
4248                 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4249                         dw2 |= 0x10000000;
4250         }
4251
4252         amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4253         amdgpu_ring_write(ring, dw2);
4254         amdgpu_ring_write(ring, 0);
4255 }
4256
4257 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4258 {
4259         unsigned ret;
4260         amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4261         amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4262         amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4263         amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
4264         ret = ring->wptr & ring->buf_mask;
4265         amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
4266         return ret;
4267 }
4268
4269 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4270 {
4271         unsigned cur;
4272         BUG_ON(offset > ring->buf_mask);
4273         BUG_ON(ring->ring[offset] != 0x55aa55aa);
4274
4275         cur = (ring->wptr & ring->buf_mask) - 1;
4276         if (likely(cur > offset))
4277                 ring->ring[offset] = cur - offset;
4278         else
4279                 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
4280 }
4281
4282 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4283 {
4284         struct amdgpu_device *adev = ring->adev;
4285
4286         amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4287         amdgpu_ring_write(ring, 0 |     /* src: register*/
4288                                 (5 << 8) |      /* dst: memory */
4289                                 (1 << 20));     /* write confirm */
4290         amdgpu_ring_write(ring, reg);
4291         amdgpu_ring_write(ring, 0);
4292         amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4293                                 adev->virt.reg_val_offs * 4));
4294         amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4295                                 adev->virt.reg_val_offs * 4));
4296 }
4297
4298 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4299                                     uint32_t val)
4300 {
4301         uint32_t cmd = 0;
4302
4303         switch (ring->funcs->type) {
4304         case AMDGPU_RING_TYPE_GFX:
4305                 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4306                 break;
4307         case AMDGPU_RING_TYPE_KIQ:
4308                 cmd = (1 << 16); /* no inc addr */
4309                 break;
4310         default:
4311                 cmd = WR_CONFIRM;
4312                 break;
4313         }
4314         amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4315         amdgpu_ring_write(ring, cmd);
4316         amdgpu_ring_write(ring, reg);
4317         amdgpu_ring_write(ring, 0);
4318         amdgpu_ring_write(ring, val);
4319 }
4320
4321 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4322                                         uint32_t val, uint32_t mask)
4323 {
4324         gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4325 }
4326
4327 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4328                                                   uint32_t reg0, uint32_t reg1,
4329                                                   uint32_t ref, uint32_t mask)
4330 {
4331         int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4332
4333         if (amdgpu_sriov_vf(ring->adev))
4334                 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
4335                                       ref, mask, 0x20);
4336         else
4337                 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
4338                                                            ref, mask);
4339 }
4340
4341 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4342                                                  enum amdgpu_interrupt_state state)
4343 {
4344         switch (state) {
4345         case AMDGPU_IRQ_STATE_DISABLE:
4346         case AMDGPU_IRQ_STATE_ENABLE:
4347                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4348                                TIME_STAMP_INT_ENABLE,
4349                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4350                 break;
4351         default:
4352                 break;
4353         }
4354 }
4355
4356 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4357                                                      int me, int pipe,
4358                                                      enum amdgpu_interrupt_state state)
4359 {
4360         u32 mec_int_cntl, mec_int_cntl_reg;
4361
4362         /*
4363          * amdgpu controls only the first MEC. That's why this function only
4364          * handles the setting of interrupts for this specific MEC. All other
4365          * pipes' interrupts are set by amdkfd.
4366          */
4367
4368         if (me == 1) {
4369                 switch (pipe) {
4370                 case 0:
4371                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4372                         break;
4373                 case 1:
4374                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4375                         break;
4376                 case 2:
4377                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4378                         break;
4379                 case 3:
4380                         mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4381                         break;
4382                 default:
4383                         DRM_DEBUG("invalid pipe %d\n", pipe);
4384                         return;
4385                 }
4386         } else {
4387                 DRM_DEBUG("invalid me %d\n", me);
4388                 return;
4389         }
4390
4391         switch (state) {
4392         case AMDGPU_IRQ_STATE_DISABLE:
4393                 mec_int_cntl = RREG32(mec_int_cntl_reg);
4394                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4395                                              TIME_STAMP_INT_ENABLE, 0);
4396                 WREG32(mec_int_cntl_reg, mec_int_cntl);
4397                 break;
4398         case AMDGPU_IRQ_STATE_ENABLE:
4399                 mec_int_cntl = RREG32(mec_int_cntl_reg);
4400                 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4401                                              TIME_STAMP_INT_ENABLE, 1);
4402                 WREG32(mec_int_cntl_reg, mec_int_cntl);
4403                 break;
4404         default:
4405                 break;
4406         }
4407 }
4408
4409 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4410                                              struct amdgpu_irq_src *source,
4411                                              unsigned type,
4412                                              enum amdgpu_interrupt_state state)
4413 {
4414         switch (state) {
4415         case AMDGPU_IRQ_STATE_DISABLE:
4416         case AMDGPU_IRQ_STATE_ENABLE:
4417                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4418                                PRIV_REG_INT_ENABLE,
4419                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4420                 break;
4421         default:
4422                 break;
4423         }
4424
4425         return 0;
4426 }
4427
4428 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4429                                               struct amdgpu_irq_src *source,
4430                                               unsigned type,
4431                                               enum amdgpu_interrupt_state state)
4432 {
4433         switch (state) {
4434         case AMDGPU_IRQ_STATE_DISABLE:
4435         case AMDGPU_IRQ_STATE_ENABLE:
4436                 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4437                                PRIV_INSTR_INT_ENABLE,
4438                                state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4439         default:
4440                 break;
4441         }
4442
4443         return 0;
4444 }
4445
4446 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4447                                             struct amdgpu_irq_src *src,
4448                                             unsigned type,
4449                                             enum amdgpu_interrupt_state state)
4450 {
4451         switch (type) {
4452         case AMDGPU_CP_IRQ_GFX_EOP:
4453                 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
4454                 break;
4455         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4456                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4457                 break;
4458         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4459                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4460                 break;
4461         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4462                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4463                 break;
4464         case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4465                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4466                 break;
4467         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4468                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4469                 break;
4470         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4471                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4472                 break;
4473         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4474                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4475                 break;
4476         case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4477                 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4478                 break;
4479         default:
4480                 break;
4481         }
4482         return 0;
4483 }
4484
4485 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
4486                             struct amdgpu_irq_src *source,
4487                             struct amdgpu_iv_entry *entry)
4488 {
4489         int i;
4490         u8 me_id, pipe_id, queue_id;
4491         struct amdgpu_ring *ring;
4492
4493         DRM_DEBUG("IH: CP EOP\n");
4494         me_id = (entry->ring_id & 0x0c) >> 2;
4495         pipe_id = (entry->ring_id & 0x03) >> 0;
4496         queue_id = (entry->ring_id & 0x70) >> 4;
4497
4498         switch (me_id) {
4499         case 0:
4500                 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4501                 break;
4502         case 1:
4503         case 2:
4504                 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4505                         ring = &adev->gfx.compute_ring[i];
4506                         /* Per-queue interrupt is supported for MEC starting from VI.
4507                           * The interrupt can only be enabled/disabled per pipe instead of per queue.
4508                           */
4509                         if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4510                                 amdgpu_fence_process(ring);
4511                 }
4512                 break;
4513         }
4514         return 0;
4515 }
4516
4517 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
4518                                  struct amdgpu_irq_src *source,
4519                                  struct amdgpu_iv_entry *entry)
4520 {
4521         DRM_ERROR("Illegal register access in command stream\n");
4522         schedule_work(&adev->reset_work);
4523         return 0;
4524 }
4525
4526 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
4527                                   struct amdgpu_irq_src *source,
4528                                   struct amdgpu_iv_entry *entry)
4529 {
4530         DRM_ERROR("Illegal instruction in command stream\n");
4531         schedule_work(&adev->reset_work);
4532         return 0;
4533 }
4534
4535 static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
4536                                             struct amdgpu_irq_src *src,
4537                                             unsigned int type,
4538                                             enum amdgpu_interrupt_state state)
4539 {
4540         uint32_t tmp, target;
4541         struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4542
4543         if (ring->me == 1)
4544                 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4545         else
4546                 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
4547         target += ring->pipe;
4548
4549         switch (type) {
4550         case AMDGPU_CP_KIQ_IRQ_DRIVER0:
4551                 if (state == AMDGPU_IRQ_STATE_DISABLE) {
4552                         tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4553                         tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4554                                                  GENERIC2_INT_ENABLE, 0);
4555                         WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4556
4557                         tmp = RREG32(target);
4558                         tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4559                                                  GENERIC2_INT_ENABLE, 0);
4560                         WREG32(target, tmp);
4561                 } else {
4562                         tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4563                         tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4564                                                  GENERIC2_INT_ENABLE, 1);
4565                         WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4566
4567                         tmp = RREG32(target);
4568                         tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4569                                                  GENERIC2_INT_ENABLE, 1);
4570                         WREG32(target, tmp);
4571                 }
4572                 break;
4573         default:
4574                 BUG(); /* kiq only support GENERIC2_INT now */
4575                 break;
4576         }
4577         return 0;
4578 }
4579
4580 static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
4581                             struct amdgpu_irq_src *source,
4582                             struct amdgpu_iv_entry *entry)
4583 {
4584         u8 me_id, pipe_id, queue_id;
4585         struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4586
4587         me_id = (entry->ring_id & 0x0c) >> 2;
4588         pipe_id = (entry->ring_id & 0x03) >> 0;
4589         queue_id = (entry->ring_id & 0x70) >> 4;
4590         DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
4591                    me_id, pipe_id, queue_id);
4592
4593         amdgpu_fence_process(ring);
4594         return 0;
4595 }
4596
4597 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
4598         .name = "gfx_v9_0",
4599         .early_init = gfx_v9_0_early_init,
4600         .late_init = gfx_v9_0_late_init,
4601         .sw_init = gfx_v9_0_sw_init,
4602         .sw_fini = gfx_v9_0_sw_fini,
4603         .hw_init = gfx_v9_0_hw_init,
4604         .hw_fini = gfx_v9_0_hw_fini,
4605         .suspend = gfx_v9_0_suspend,
4606         .resume = gfx_v9_0_resume,
4607         .is_idle = gfx_v9_0_is_idle,
4608         .wait_for_idle = gfx_v9_0_wait_for_idle,
4609         .soft_reset = gfx_v9_0_soft_reset,
4610         .set_clockgating_state = gfx_v9_0_set_clockgating_state,
4611         .set_powergating_state = gfx_v9_0_set_powergating_state,
4612         .get_clockgating_state = gfx_v9_0_get_clockgating_state,
4613 };
4614
4615 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4616         .type = AMDGPU_RING_TYPE_GFX,
4617         .align_mask = 0xff,
4618         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4619         .support_64bit_ptrs = true,
4620         .vmhub = AMDGPU_GFXHUB,
4621         .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
4622         .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
4623         .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
4624         .emit_frame_size = /* totally 242 maximum if 16 IBs */
4625                 5 +  /* COND_EXEC */
4626                 7 +  /* PIPELINE_SYNC */
4627                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4628                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4629                 2 + /* VM_FLUSH */
4630                 8 +  /* FENCE for VM_FLUSH */
4631                 20 + /* GDS switch */
4632                 4 + /* double SWITCH_BUFFER,
4633                        the first COND_EXEC jump to the place just
4634                            prior to this double SWITCH_BUFFER  */
4635                 5 + /* COND_EXEC */
4636                 7 +      /*     HDP_flush */
4637                 4 +      /*     VGT_flush */
4638                 14 + /* CE_META */
4639                 31 + /* DE_META */
4640                 3 + /* CNTX_CTRL */
4641                 5 + /* HDP_INVL */
4642                 8 + 8 + /* FENCE x2 */
4643                 2, /* SWITCH_BUFFER */
4644         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
4645         .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
4646         .emit_fence = gfx_v9_0_ring_emit_fence,
4647         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4648         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4649         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4650         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4651         .test_ring = gfx_v9_0_ring_test_ring,
4652         .test_ib = gfx_v9_0_ring_test_ib,
4653         .insert_nop = amdgpu_ring_insert_nop,
4654         .pad_ib = amdgpu_ring_generic_pad_ib,
4655         .emit_switch_buffer = gfx_v9_ring_emit_sb,
4656         .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
4657         .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
4658         .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4659         .emit_tmz = gfx_v9_0_ring_emit_tmz,
4660         .emit_wreg = gfx_v9_0_ring_emit_wreg,
4661         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4662         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4663 };
4664
4665 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4666         .type = AMDGPU_RING_TYPE_COMPUTE,
4667         .align_mask = 0xff,
4668         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4669         .support_64bit_ptrs = true,
4670         .vmhub = AMDGPU_GFXHUB,
4671         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4672         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4673         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4674         .emit_frame_size =
4675                 20 + /* gfx_v9_0_ring_emit_gds_switch */
4676                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4677                 5 + /* hdp invalidate */
4678                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4679                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4680                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4681                 2 + /* gfx_v9_0_ring_emit_vm_flush */
4682                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4683         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4684         .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4685         .emit_fence = gfx_v9_0_ring_emit_fence,
4686         .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4687         .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4688         .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4689         .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4690         .test_ring = gfx_v9_0_ring_test_ring,
4691         .test_ib = gfx_v9_0_ring_test_ib,
4692         .insert_nop = amdgpu_ring_insert_nop,
4693         .pad_ib = amdgpu_ring_generic_pad_ib,
4694         .set_priority = gfx_v9_0_ring_set_priority_compute,
4695         .emit_wreg = gfx_v9_0_ring_emit_wreg,
4696         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4697         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4698 };
4699
4700 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4701         .type = AMDGPU_RING_TYPE_KIQ,
4702         .align_mask = 0xff,
4703         .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4704         .support_64bit_ptrs = true,
4705         .vmhub = AMDGPU_GFXHUB,
4706         .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4707         .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4708         .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4709         .emit_frame_size =
4710                 20 + /* gfx_v9_0_ring_emit_gds_switch */
4711                 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4712                 5 + /* hdp invalidate */
4713                 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4714                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4715                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4716                 2 + /* gfx_v9_0_ring_emit_vm_flush */
4717                 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4718         .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4719         .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4720         .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
4721         .test_ring = gfx_v9_0_ring_test_ring,
4722         .test_ib = gfx_v9_0_ring_test_ib,
4723         .insert_nop = amdgpu_ring_insert_nop,
4724         .pad_ib = amdgpu_ring_generic_pad_ib,
4725         .emit_rreg = gfx_v9_0_ring_emit_rreg,
4726         .emit_wreg = gfx_v9_0_ring_emit_wreg,
4727         .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4728         .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4729 };
4730
4731 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
4732 {
4733         int i;
4734
4735         adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
4736
4737         for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4738                 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
4739
4740         for (i = 0; i < adev->gfx.num_compute_rings; i++)
4741                 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
4742 }
4743
4744 static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
4745         .set = gfx_v9_0_kiq_set_interrupt_state,
4746         .process = gfx_v9_0_kiq_irq,
4747 };
4748
4749 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
4750         .set = gfx_v9_0_set_eop_interrupt_state,
4751         .process = gfx_v9_0_eop_irq,
4752 };
4753
4754 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
4755         .set = gfx_v9_0_set_priv_reg_fault_state,
4756         .process = gfx_v9_0_priv_reg_irq,
4757 };
4758
4759 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
4760         .set = gfx_v9_0_set_priv_inst_fault_state,
4761         .process = gfx_v9_0_priv_inst_irq,
4762 };
4763
4764 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
4765 {
4766         adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4767         adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
4768
4769         adev->gfx.priv_reg_irq.num_types = 1;
4770         adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
4771
4772         adev->gfx.priv_inst_irq.num_types = 1;
4773         adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
4774
4775         adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
4776         adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
4777 }
4778
4779 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
4780 {
4781         switch (adev->asic_type) {
4782         case CHIP_VEGA10:
4783         case CHIP_VEGA12:
4784         case CHIP_VEGA20:
4785         case CHIP_RAVEN:
4786                 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
4787                 break;
4788         default:
4789                 break;
4790         }
4791 }
4792
4793 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
4794 {
4795         /* init asci gds info */
4796         adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
4797         adev->gds.gws.total_size = 64;
4798         adev->gds.oa.total_size = 16;
4799
4800         if (adev->gds.mem.total_size == 64 * 1024) {
4801                 adev->gds.mem.gfx_partition_size = 4096;
4802                 adev->gds.mem.cs_partition_size = 4096;
4803
4804                 adev->gds.gws.gfx_partition_size = 4;
4805                 adev->gds.gws.cs_partition_size = 4;
4806
4807                 adev->gds.oa.gfx_partition_size = 4;
4808                 adev->gds.oa.cs_partition_size = 1;
4809         } else {
4810                 adev->gds.mem.gfx_partition_size = 1024;
4811                 adev->gds.mem.cs_partition_size = 1024;
4812
4813                 adev->gds.gws.gfx_partition_size = 16;
4814                 adev->gds.gws.cs_partition_size = 16;
4815
4816                 adev->gds.oa.gfx_partition_size = 4;
4817                 adev->gds.oa.cs_partition_size = 4;
4818         }
4819 }
4820
4821 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4822                                                  u32 bitmap)
4823 {
4824         u32 data;
4825
4826         if (!bitmap)
4827                 return;
4828
4829         data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4830         data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4831
4832         WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
4833 }
4834
4835 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
4836 {
4837         u32 data, mask;
4838
4839         data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
4840         data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
4841
4842         data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4843         data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4844
4845         mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4846
4847         return (~data) & mask;
4848 }
4849
4850 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
4851                                  struct amdgpu_cu_info *cu_info)
4852 {
4853         int i, j, k, counter, active_cu_number = 0;
4854         u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4855         unsigned disable_masks[4 * 2];
4856
4857         if (!adev || !cu_info)
4858                 return -EINVAL;
4859
4860         amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
4861
4862         mutex_lock(&adev->grbm_idx_mutex);
4863         for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4864                 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4865                         mask = 1;
4866                         ao_bitmap = 0;
4867                         counter = 0;
4868                         gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
4869                         if (i < 4 && j < 2)
4870                                 gfx_v9_0_set_user_cu_inactive_bitmap(
4871                                         adev, disable_masks[i * 2 + j]);
4872                         bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
4873                         cu_info->bitmap[i][j] = bitmap;
4874
4875                         for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
4876                                 if (bitmap & mask) {
4877                                         if (counter < adev->gfx.config.max_cu_per_sh)
4878                                                 ao_bitmap |= mask;
4879                                         counter ++;
4880                                 }
4881                                 mask <<= 1;
4882                         }
4883                         active_cu_number += counter;
4884                         if (i < 2 && j < 2)
4885                                 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4886                         cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4887                 }
4888         }
4889         gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
4890         mutex_unlock(&adev->grbm_idx_mutex);
4891
4892         cu_info->number = active_cu_number;
4893         cu_info->ao_cu_mask = ao_cu_mask;
4894         cu_info->simd_per_cu = NUM_SIMD_PER_CU;
4895
4896         return 0;
4897 }
4898
4899 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
4900 {
4901         .type = AMD_IP_BLOCK_TYPE_GFX,
4902         .major = 9,
4903         .minor = 0,
4904         .rev = 0,
4905         .funcs = &gfx_v9_0_ip_funcs,
4906 };