2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/kernel.h>
24 #include <linux/firmware.h>
27 #include "amdgpu_gfx.h"
30 #include "amdgpu_atomfirmware.h"
32 #include "gc/gc_9_0_offset.h"
33 #include "gc/gc_9_0_sh_mask.h"
34 #include "vega10_enum.h"
35 #include "hdp/hdp_4_0_offset.h"
37 #include "soc15_common.h"
38 #include "clearstate_gfx9.h"
39 #include "v9_structs.h"
41 #define GFX9_NUM_GFX_RINGS 1
42 #define GFX9_MEC_HPD_SIZE 2048
43 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
44 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
46 #define mmPWR_MISC_CNTL_STATUS 0x0183
47 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
48 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
49 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
50 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
51 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
53 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
54 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
55 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
56 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
57 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
58 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
60 MODULE_FIRMWARE("amdgpu/vega12_ce.bin");
61 MODULE_FIRMWARE("amdgpu/vega12_pfp.bin");
62 MODULE_FIRMWARE("amdgpu/vega12_me.bin");
63 MODULE_FIRMWARE("amdgpu/vega12_mec.bin");
64 MODULE_FIRMWARE("amdgpu/vega12_mec2.bin");
65 MODULE_FIRMWARE("amdgpu/vega12_rlc.bin");
67 MODULE_FIRMWARE("amdgpu/vega20_ce.bin");
68 MODULE_FIRMWARE("amdgpu/vega20_pfp.bin");
69 MODULE_FIRMWARE("amdgpu/vega20_me.bin");
70 MODULE_FIRMWARE("amdgpu/vega20_mec.bin");
71 MODULE_FIRMWARE("amdgpu/vega20_mec2.bin");
72 MODULE_FIRMWARE("amdgpu/vega20_rlc.bin");
74 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
75 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
76 MODULE_FIRMWARE("amdgpu/raven_me.bin");
77 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
78 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
79 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
81 static const struct soc15_reg_golden golden_settings_gc_9_0[] =
83 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
84 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
85 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
86 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
87 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
88 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
89 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
90 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
91 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
92 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
93 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
94 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
95 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
96 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
97 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
98 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
101 static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
103 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0x0000f000, 0x00012107),
104 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
105 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
106 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x2a114042),
107 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0x00008000, 0x00048000),
108 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
109 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800)
112 static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
114 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080),
115 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
116 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
117 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042),
118 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042),
119 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400),
120 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000),
121 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000),
122 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107),
123 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000),
124 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000)
127 static const struct soc15_reg_golden golden_settings_gc_9_1[] =
129 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
130 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080),
131 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080),
132 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080),
133 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
134 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
135 SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080),
136 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
137 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
138 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
139 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080),
140 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080),
141 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080),
142 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080),
143 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080),
144 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
145 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
146 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120),
147 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
148 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff),
149 SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
152 static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] =
154 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000),
155 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24000042),
156 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24000042),
157 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04048000),
158 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_MODE_CNTL_1, 0x06000000, 0x06000000),
159 SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
160 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x00000800)
163 static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
165 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
166 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
169 static const struct soc15_reg_golden golden_settings_gc_9_2_1[] =
171 SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
172 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
173 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
174 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
175 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
176 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
177 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
178 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
179 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
180 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
181 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
182 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
183 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
184 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
185 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
186 SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
189 static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] =
191 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x00000080, 0x04000080),
192 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104),
193 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000),
194 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xffff77ff, 0x24104041),
195 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xffff77ff, 0x24104041),
196 SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xffffffff, 0x04040000),
197 SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107),
198 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000),
199 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410),
200 SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000)
203 static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
205 mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
206 mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
207 mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
208 mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
209 mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
210 mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
211 mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
212 mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
215 static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
217 mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0,
218 mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0,
219 mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0,
220 mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0,
221 mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0,
222 mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0,
223 mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0,
224 mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
227 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
228 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
229 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
231 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
232 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
233 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
234 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
235 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
236 struct amdgpu_cu_info *cu_info);
237 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
238 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
239 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
241 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
243 switch (adev->asic_type) {
245 soc15_program_register_sequence(adev,
246 golden_settings_gc_9_0,
247 ARRAY_SIZE(golden_settings_gc_9_0));
248 soc15_program_register_sequence(adev,
249 golden_settings_gc_9_0_vg10,
250 ARRAY_SIZE(golden_settings_gc_9_0_vg10));
253 soc15_program_register_sequence(adev,
254 golden_settings_gc_9_2_1,
255 ARRAY_SIZE(golden_settings_gc_9_2_1));
256 soc15_program_register_sequence(adev,
257 golden_settings_gc_9_2_1_vg12,
258 ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
261 soc15_program_register_sequence(adev,
262 golden_settings_gc_9_0,
263 ARRAY_SIZE(golden_settings_gc_9_0));
264 soc15_program_register_sequence(adev,
265 golden_settings_gc_9_0_vg20,
266 ARRAY_SIZE(golden_settings_gc_9_0_vg20));
269 soc15_program_register_sequence(adev,
270 golden_settings_gc_9_1,
271 ARRAY_SIZE(golden_settings_gc_9_1));
272 soc15_program_register_sequence(adev,
273 golden_settings_gc_9_1_rv1,
274 ARRAY_SIZE(golden_settings_gc_9_1_rv1));
280 soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
281 (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
284 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
286 adev->gfx.scratch.num_reg = 8;
287 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
288 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
291 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
292 bool wc, uint32_t reg, uint32_t val)
294 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
295 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
296 WRITE_DATA_DST_SEL(0) |
297 (wc ? WR_CONFIRM : 0));
298 amdgpu_ring_write(ring, reg);
299 amdgpu_ring_write(ring, 0);
300 amdgpu_ring_write(ring, val);
303 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
304 int mem_space, int opt, uint32_t addr0,
305 uint32_t addr1, uint32_t ref, uint32_t mask,
308 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
309 amdgpu_ring_write(ring,
310 /* memory (1) or register (0) */
311 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
312 WAIT_REG_MEM_OPERATION(opt) | /* wait */
313 WAIT_REG_MEM_FUNCTION(3) | /* equal */
314 WAIT_REG_MEM_ENGINE(eng_sel)));
317 BUG_ON(addr0 & 0x3); /* Dword align */
318 amdgpu_ring_write(ring, addr0);
319 amdgpu_ring_write(ring, addr1);
320 amdgpu_ring_write(ring, ref);
321 amdgpu_ring_write(ring, mask);
322 amdgpu_ring_write(ring, inv); /* poll interval */
325 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
327 struct amdgpu_device *adev = ring->adev;
333 r = amdgpu_gfx_scratch_get(adev, &scratch);
335 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
338 WREG32(scratch, 0xCAFEDEAD);
339 r = amdgpu_ring_alloc(ring, 3);
341 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
343 amdgpu_gfx_scratch_free(adev, scratch);
346 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
347 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
348 amdgpu_ring_write(ring, 0xDEADBEEF);
349 amdgpu_ring_commit(ring);
351 for (i = 0; i < adev->usec_timeout; i++) {
352 tmp = RREG32(scratch);
353 if (tmp == 0xDEADBEEF)
357 if (i < adev->usec_timeout) {
358 DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
361 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
362 ring->idx, scratch, tmp);
365 amdgpu_gfx_scratch_free(adev, scratch);
369 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
371 struct amdgpu_device *adev = ring->adev;
373 struct dma_fence *f = NULL;
380 r = amdgpu_device_wb_get(adev, &index);
382 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
386 gpu_addr = adev->wb.gpu_addr + (index * 4);
387 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
388 memset(&ib, 0, sizeof(ib));
389 r = amdgpu_ib_get(adev, NULL, 16, &ib);
391 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
394 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
395 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
396 ib.ptr[2] = lower_32_bits(gpu_addr);
397 ib.ptr[3] = upper_32_bits(gpu_addr);
398 ib.ptr[4] = 0xDEADBEEF;
401 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
405 r = dma_fence_wait_timeout(f, false, timeout);
407 DRM_ERROR("amdgpu: IB test timed out.\n");
411 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
415 tmp = adev->wb.wb[index];
416 if (tmp == 0xDEADBEEF) {
417 DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
420 DRM_ERROR("ib test on ring %d failed\n", ring->idx);
425 amdgpu_ib_free(adev, &ib, NULL);
428 amdgpu_device_wb_free(adev, index);
433 static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
435 release_firmware(adev->gfx.pfp_fw);
436 adev->gfx.pfp_fw = NULL;
437 release_firmware(adev->gfx.me_fw);
438 adev->gfx.me_fw = NULL;
439 release_firmware(adev->gfx.ce_fw);
440 adev->gfx.ce_fw = NULL;
441 release_firmware(adev->gfx.rlc_fw);
442 adev->gfx.rlc_fw = NULL;
443 release_firmware(adev->gfx.mec_fw);
444 adev->gfx.mec_fw = NULL;
445 release_firmware(adev->gfx.mec2_fw);
446 adev->gfx.mec2_fw = NULL;
448 kfree(adev->gfx.rlc.register_list_format);
451 static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
453 const struct rlc_firmware_header_v2_1 *rlc_hdr;
455 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
456 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
457 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
458 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
459 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
460 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
461 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
462 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
463 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
464 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
465 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
466 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
467 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
468 adev->gfx.rlc.reg_list_format_direct_reg_list_length =
469 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
472 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
474 const char *chip_name;
477 struct amdgpu_firmware_info *info = NULL;
478 const struct common_firmware_header *header = NULL;
479 const struct gfx_firmware_header_v1_0 *cp_hdr;
480 const struct rlc_firmware_header_v2_0 *rlc_hdr;
481 unsigned int *tmp = NULL;
483 uint16_t version_major;
484 uint16_t version_minor;
488 switch (adev->asic_type) {
490 chip_name = "vega10";
493 chip_name = "vega12";
496 chip_name = "vega20";
505 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
506 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
509 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
512 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
513 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
514 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
516 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
517 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
520 err = amdgpu_ucode_validate(adev->gfx.me_fw);
523 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
524 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
525 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
527 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
528 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
531 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
534 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
535 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
536 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
538 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
539 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
542 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
543 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
545 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
546 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
547 if (version_major == 2 && version_minor == 1)
548 adev->gfx.rlc.is_rlc_v2_1 = true;
550 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
551 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
552 adev->gfx.rlc.save_and_restore_offset =
553 le32_to_cpu(rlc_hdr->save_and_restore_offset);
554 adev->gfx.rlc.clear_state_descriptor_offset =
555 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
556 adev->gfx.rlc.avail_scratch_ram_locations =
557 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
558 adev->gfx.rlc.reg_restore_list_size =
559 le32_to_cpu(rlc_hdr->reg_restore_list_size);
560 adev->gfx.rlc.reg_list_format_start =
561 le32_to_cpu(rlc_hdr->reg_list_format_start);
562 adev->gfx.rlc.reg_list_format_separate_start =
563 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
564 adev->gfx.rlc.starting_offsets_start =
565 le32_to_cpu(rlc_hdr->starting_offsets_start);
566 adev->gfx.rlc.reg_list_format_size_bytes =
567 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
568 adev->gfx.rlc.reg_list_size_bytes =
569 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
570 adev->gfx.rlc.register_list_format =
571 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
572 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
573 if (!adev->gfx.rlc.register_list_format) {
578 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
579 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
580 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
581 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
583 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
585 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
586 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
587 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
588 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
590 if (adev->gfx.rlc.is_rlc_v2_1)
591 gfx_v9_0_init_rlc_ext_microcode(adev);
593 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
594 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
597 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
600 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
601 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
602 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
605 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
606 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
608 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
611 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
612 adev->gfx.mec2_fw->data;
613 adev->gfx.mec2_fw_version =
614 le32_to_cpu(cp_hdr->header.ucode_version);
615 adev->gfx.mec2_feature_version =
616 le32_to_cpu(cp_hdr->ucode_feature_version);
619 adev->gfx.mec2_fw = NULL;
622 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
623 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
624 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
625 info->fw = adev->gfx.pfp_fw;
626 header = (const struct common_firmware_header *)info->fw->data;
627 adev->firmware.fw_size +=
628 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
630 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
631 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
632 info->fw = adev->gfx.me_fw;
633 header = (const struct common_firmware_header *)info->fw->data;
634 adev->firmware.fw_size +=
635 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
637 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
638 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
639 info->fw = adev->gfx.ce_fw;
640 header = (const struct common_firmware_header *)info->fw->data;
641 adev->firmware.fw_size +=
642 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
644 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
645 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
646 info->fw = adev->gfx.rlc_fw;
647 header = (const struct common_firmware_header *)info->fw->data;
648 adev->firmware.fw_size +=
649 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
651 if (adev->gfx.rlc.is_rlc_v2_1 &&
652 adev->gfx.rlc.save_restore_list_cntl_size_bytes &&
653 adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
654 adev->gfx.rlc.save_restore_list_srm_size_bytes) {
655 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
656 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
657 info->fw = adev->gfx.rlc_fw;
658 adev->firmware.fw_size +=
659 ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
661 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
662 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
663 info->fw = adev->gfx.rlc_fw;
664 adev->firmware.fw_size +=
665 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
667 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
668 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
669 info->fw = adev->gfx.rlc_fw;
670 adev->firmware.fw_size +=
671 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
674 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
675 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
676 info->fw = adev->gfx.mec_fw;
677 header = (const struct common_firmware_header *)info->fw->data;
678 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
679 adev->firmware.fw_size +=
680 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
682 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
683 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
684 info->fw = adev->gfx.mec_fw;
685 adev->firmware.fw_size +=
686 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
688 if (adev->gfx.mec2_fw) {
689 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
690 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
691 info->fw = adev->gfx.mec2_fw;
692 header = (const struct common_firmware_header *)info->fw->data;
693 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
694 adev->firmware.fw_size +=
695 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
696 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
697 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
698 info->fw = adev->gfx.mec2_fw;
699 adev->firmware.fw_size +=
700 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
708 "gfx9: Failed to load firmware \"%s\"\n",
710 release_firmware(adev->gfx.pfp_fw);
711 adev->gfx.pfp_fw = NULL;
712 release_firmware(adev->gfx.me_fw);
713 adev->gfx.me_fw = NULL;
714 release_firmware(adev->gfx.ce_fw);
715 adev->gfx.ce_fw = NULL;
716 release_firmware(adev->gfx.rlc_fw);
717 adev->gfx.rlc_fw = NULL;
718 release_firmware(adev->gfx.mec_fw);
719 adev->gfx.mec_fw = NULL;
720 release_firmware(adev->gfx.mec2_fw);
721 adev->gfx.mec2_fw = NULL;
726 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
729 const struct cs_section_def *sect = NULL;
730 const struct cs_extent_def *ext = NULL;
732 /* begin clear state */
734 /* context control state */
737 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
738 for (ext = sect->section; ext->extent != NULL; ++ext) {
739 if (sect->id == SECT_CONTEXT)
740 count += 2 + ext->reg_count;
746 /* end clear state */
754 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
755 volatile u32 *buffer)
758 const struct cs_section_def *sect = NULL;
759 const struct cs_extent_def *ext = NULL;
761 if (adev->gfx.rlc.cs_data == NULL)
766 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
767 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
769 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
770 buffer[count++] = cpu_to_le32(0x80000000);
771 buffer[count++] = cpu_to_le32(0x80000000);
773 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
774 for (ext = sect->section; ext->extent != NULL; ++ext) {
775 if (sect->id == SECT_CONTEXT) {
777 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
778 buffer[count++] = cpu_to_le32(ext->reg_index -
779 PACKET3_SET_CONTEXT_REG_START);
780 for (i = 0; i < ext->reg_count; i++)
781 buffer[count++] = cpu_to_le32(ext->extent[i]);
788 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
789 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
791 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
792 buffer[count++] = cpu_to_le32(0);
795 static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev)
799 /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */
800 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F);
801 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x0333A5A7);
802 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077);
803 WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x30 | 0x40 << 8 | 0x02FA << 16));
805 /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */
806 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000);
808 /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */
809 WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000500);
811 mutex_lock(&adev->grbm_idx_mutex);
812 /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/
813 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
814 WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff);
816 /* set mmRLC_LB_PARAMS = 0x003F_1006 */
817 data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003);
818 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010);
819 data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F);
820 WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data);
822 /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */
823 data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7);
826 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data);
828 /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */
829 WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF);
831 /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved,
832 * but used for RLC_LB_CNTL configuration */
833 data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK;
834 data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09);
835 data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000);
836 WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data);
837 mutex_unlock(&adev->grbm_idx_mutex);
840 static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
842 WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
845 static void rv_init_cp_jump_table(struct amdgpu_device *adev)
847 const __le32 *fw_data;
848 volatile u32 *dst_ptr;
849 int me, i, max_me = 5;
851 u32 table_offset, table_size;
853 /* write the cp table buffer */
854 dst_ptr = adev->gfx.rlc.cp_table_ptr;
855 for (me = 0; me < max_me; me++) {
857 const struct gfx_firmware_header_v1_0 *hdr =
858 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
859 fw_data = (const __le32 *)
860 (adev->gfx.ce_fw->data +
861 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
862 table_offset = le32_to_cpu(hdr->jt_offset);
863 table_size = le32_to_cpu(hdr->jt_size);
864 } else if (me == 1) {
865 const struct gfx_firmware_header_v1_0 *hdr =
866 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
867 fw_data = (const __le32 *)
868 (adev->gfx.pfp_fw->data +
869 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
870 table_offset = le32_to_cpu(hdr->jt_offset);
871 table_size = le32_to_cpu(hdr->jt_size);
872 } else if (me == 2) {
873 const struct gfx_firmware_header_v1_0 *hdr =
874 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
875 fw_data = (const __le32 *)
876 (adev->gfx.me_fw->data +
877 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
878 table_offset = le32_to_cpu(hdr->jt_offset);
879 table_size = le32_to_cpu(hdr->jt_size);
880 } else if (me == 3) {
881 const struct gfx_firmware_header_v1_0 *hdr =
882 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
883 fw_data = (const __le32 *)
884 (adev->gfx.mec_fw->data +
885 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
886 table_offset = le32_to_cpu(hdr->jt_offset);
887 table_size = le32_to_cpu(hdr->jt_size);
888 } else if (me == 4) {
889 const struct gfx_firmware_header_v1_0 *hdr =
890 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
891 fw_data = (const __le32 *)
892 (adev->gfx.mec2_fw->data +
893 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
894 table_offset = le32_to_cpu(hdr->jt_offset);
895 table_size = le32_to_cpu(hdr->jt_size);
898 for (i = 0; i < table_size; i ++) {
899 dst_ptr[bo_offset + i] =
900 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
903 bo_offset += table_size;
907 static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
909 /* clear state block */
910 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
911 &adev->gfx.rlc.clear_state_gpu_addr,
912 (void **)&adev->gfx.rlc.cs_ptr);
914 /* jump table block */
915 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
916 &adev->gfx.rlc.cp_table_gpu_addr,
917 (void **)&adev->gfx.rlc.cp_table_ptr);
920 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
922 volatile u32 *dst_ptr;
924 const struct cs_section_def *cs_data;
927 adev->gfx.rlc.cs_data = gfx9_cs_data;
929 cs_data = adev->gfx.rlc.cs_data;
932 /* clear state block */
933 adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
934 r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
935 AMDGPU_GEM_DOMAIN_VRAM,
936 &adev->gfx.rlc.clear_state_obj,
937 &adev->gfx.rlc.clear_state_gpu_addr,
938 (void **)&adev->gfx.rlc.cs_ptr);
940 dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
942 gfx_v9_0_rlc_fini(adev);
945 /* set up the cs buffer */
946 dst_ptr = adev->gfx.rlc.cs_ptr;
947 gfx_v9_0_get_csb_buffer(adev, dst_ptr);
948 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
949 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
950 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
953 if (adev->asic_type == CHIP_RAVEN) {
954 /* TODO: double check the cp_table_size for RV */
955 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
956 r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
957 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
958 &adev->gfx.rlc.cp_table_obj,
959 &adev->gfx.rlc.cp_table_gpu_addr,
960 (void **)&adev->gfx.rlc.cp_table_ptr);
963 "(%d) failed to create cp table bo\n", r);
964 gfx_v9_0_rlc_fini(adev);
968 rv_init_cp_jump_table(adev);
969 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
970 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
972 gfx_v9_0_init_lbpw(adev);
978 static int gfx_v9_0_csb_vram_pin(struct amdgpu_device *adev)
982 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
983 if (unlikely(r != 0))
986 r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj,
987 AMDGPU_GEM_DOMAIN_VRAM);
989 adev->gfx.rlc.clear_state_gpu_addr =
990 amdgpu_bo_gpu_offset(adev->gfx.rlc.clear_state_obj);
992 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
997 static void gfx_v9_0_csb_vram_unpin(struct amdgpu_device *adev)
1001 if (!adev->gfx.rlc.clear_state_obj)
1004 r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true);
1005 if (likely(r == 0)) {
1006 amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
1007 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1011 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
1013 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1014 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1017 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
1021 const __le32 *fw_data;
1024 size_t mec_hpd_size;
1026 const struct gfx_firmware_header_v1_0 *mec_hdr;
1028 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1030 /* take ownership of the relevant compute queues */
1031 amdgpu_gfx_compute_queue_acquire(adev);
1032 mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
1034 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1035 AMDGPU_GEM_DOMAIN_GTT,
1036 &adev->gfx.mec.hpd_eop_obj,
1037 &adev->gfx.mec.hpd_eop_gpu_addr,
1040 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1041 gfx_v9_0_mec_fini(adev);
1045 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
1047 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1048 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1050 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1052 fw_data = (const __le32 *)
1053 (adev->gfx.mec_fw->data +
1054 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1055 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
1057 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
1058 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1059 &adev->gfx.mec.mec_fw_obj,
1060 &adev->gfx.mec.mec_fw_gpu_addr,
1063 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
1064 gfx_v9_0_mec_fini(adev);
1068 memcpy(fw, fw_data, fw_size);
1070 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
1071 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
1076 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1078 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1079 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1080 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1081 (address << SQ_IND_INDEX__INDEX__SHIFT) |
1082 (SQ_IND_INDEX__FORCE_READ_MASK));
1083 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1086 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1087 uint32_t wave, uint32_t thread,
1088 uint32_t regno, uint32_t num, uint32_t *out)
1090 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1091 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1092 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1093 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1094 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1095 (SQ_IND_INDEX__FORCE_READ_MASK) |
1096 (SQ_IND_INDEX__AUTO_INCR_MASK));
1098 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1101 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1103 /* type 1 wave data */
1104 dst[(*no_fields)++] = 1;
1105 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1106 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1107 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1108 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1109 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1110 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1111 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1112 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1113 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1114 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1115 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1116 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1117 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1118 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1121 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1122 uint32_t wave, uint32_t start,
1123 uint32_t size, uint32_t *dst)
1126 adev, simd, wave, 0,
1127 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1130 static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1131 uint32_t wave, uint32_t thread,
1132 uint32_t start, uint32_t size,
1136 adev, simd, wave, thread,
1137 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1140 static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
1141 u32 me, u32 pipe, u32 q)
1143 soc15_grbm_select(adev, me, pipe, q, 0);
1146 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1147 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1148 .select_se_sh = &gfx_v9_0_select_se_sh,
1149 .read_wave_data = &gfx_v9_0_read_wave_data,
1150 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1151 .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
1152 .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q
1155 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
1160 adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
1162 switch (adev->asic_type) {
1164 adev->gfx.config.max_hw_contexts = 8;
1165 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1166 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1167 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1168 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1169 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
1172 adev->gfx.config.max_hw_contexts = 8;
1173 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1174 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1175 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1176 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1177 gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
1178 DRM_INFO("fix gfx.config for vega12\n");
1181 adev->gfx.config.max_hw_contexts = 8;
1182 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1183 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1184 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1185 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1186 gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
1187 gb_addr_config &= ~0xf3e777ff;
1188 gb_addr_config |= 0x22014042;
1189 /* check vbios table if gpu info is not available */
1190 err = amdgpu_atomfirmware_get_gfx_info(adev);
1195 adev->gfx.config.max_hw_contexts = 8;
1196 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1197 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1198 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1199 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1200 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
1207 adev->gfx.config.gb_addr_config = gb_addr_config;
1209 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1211 adev->gfx.config.gb_addr_config,
1215 adev->gfx.config.max_tile_pipes =
1216 adev->gfx.config.gb_addr_config_fields.num_pipes;
1218 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
1220 adev->gfx.config.gb_addr_config,
1223 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1225 adev->gfx.config.gb_addr_config,
1227 MAX_COMPRESSED_FRAGS);
1228 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1230 adev->gfx.config.gb_addr_config,
1233 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1235 adev->gfx.config.gb_addr_config,
1237 NUM_SHADER_ENGINES);
1238 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1240 adev->gfx.config.gb_addr_config,
1242 PIPE_INTERLEAVE_SIZE));
1247 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1248 struct amdgpu_ngg_buf *ngg_buf,
1250 int default_size_se)
1255 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1258 size_se = size_se ? size_se : default_size_se;
1260 ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1261 r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1262 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1267 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1270 ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1275 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1279 for (i = 0; i < NGG_BUF_MAX; i++)
1280 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1281 &adev->gfx.ngg.buf[i].gpu_addr,
1284 memset(&adev->gfx.ngg.buf[0], 0,
1285 sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1287 adev->gfx.ngg.init = false;
1292 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1296 if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1299 /* GDS reserve memory: 64 bytes alignment */
1300 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1301 adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1302 adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1303 adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE);
1304 adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
1306 /* Primitive Buffer */
1307 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1308 amdgpu_prim_buf_per_se,
1311 dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1315 /* Position Buffer */
1316 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1317 amdgpu_pos_buf_per_se,
1320 dev_err(adev->dev, "Failed to create Position Buffer\n");
1324 /* Control Sideband */
1325 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1326 amdgpu_cntl_sb_buf_per_se,
1329 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1333 /* Parameter Cache, not created by default */
1334 if (amdgpu_param_buf_per_se <= 0)
1337 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1338 amdgpu_param_buf_per_se,
1341 dev_err(adev->dev, "Failed to create Parameter Cache\n");
1346 adev->gfx.ngg.init = true;
1349 gfx_v9_0_ngg_fini(adev);
1353 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1355 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1362 /* Program buffer size */
1363 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
1364 adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
1365 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
1366 adev->gfx.ngg.buf[NGG_POS].size >> 8);
1367 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1369 data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
1370 adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
1371 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
1372 adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
1373 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1375 /* Program buffer base address */
1376 base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1377 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1378 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1380 base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1381 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1382 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1384 base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1385 data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1386 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1388 base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1389 data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1390 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1392 base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1393 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1394 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1396 base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1397 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1398 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1400 /* Clear GDS reserved memory */
1401 r = amdgpu_ring_alloc(ring, 17);
1403 DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
1408 gfx_v9_0_write_data_to_reg(ring, 0, false,
1409 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
1410 (adev->gds.mem.total_size +
1411 adev->gfx.ngg.gds_reserve_size) >>
1414 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1415 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1416 PACKET3_DMA_DATA_DST_SEL(1) |
1417 PACKET3_DMA_DATA_SRC_SEL(2)));
1418 amdgpu_ring_write(ring, 0);
1419 amdgpu_ring_write(ring, 0);
1420 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1421 amdgpu_ring_write(ring, 0);
1422 amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT |
1423 adev->gfx.ngg.gds_reserve_size);
1425 gfx_v9_0_write_data_to_reg(ring, 0, false,
1426 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0);
1428 amdgpu_ring_commit(ring);
1433 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1434 int mec, int pipe, int queue)
1438 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1440 ring = &adev->gfx.compute_ring[ring_id];
1445 ring->queue = queue;
1447 ring->ring_obj = NULL;
1448 ring->use_doorbell = true;
1449 ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
1450 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1451 + (ring_id * GFX9_MEC_HPD_SIZE);
1452 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1454 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1455 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1458 /* type-2 packets are deprecated on MEC, use type-3 instead */
1459 r = amdgpu_ring_init(adev, ring, 1024,
1460 &adev->gfx.eop_irq, irq_type);
1468 static int gfx_v9_0_sw_init(void *handle)
1470 int i, j, k, r, ring_id;
1471 struct amdgpu_ring *ring;
1472 struct amdgpu_kiq *kiq;
1473 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1475 switch (adev->asic_type) {
1480 adev->gfx.mec.num_mec = 2;
1483 adev->gfx.mec.num_mec = 1;
1487 adev->gfx.mec.num_pipe_per_mec = 4;
1488 adev->gfx.mec.num_queue_per_pipe = 8;
1491 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
1496 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
1500 /* Privileged reg */
1501 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 184,
1502 &adev->gfx.priv_reg_irq);
1506 /* Privileged inst */
1507 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, 185,
1508 &adev->gfx.priv_inst_irq);
1512 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1514 gfx_v9_0_scratch_init(adev);
1516 r = gfx_v9_0_init_microcode(adev);
1518 DRM_ERROR("Failed to load gfx firmware!\n");
1522 r = gfx_v9_0_rlc_init(adev);
1524 DRM_ERROR("Failed to init rlc BOs!\n");
1528 r = gfx_v9_0_mec_init(adev);
1530 DRM_ERROR("Failed to init MEC BOs!\n");
1534 /* set up the gfx ring */
1535 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1536 ring = &adev->gfx.gfx_ring[i];
1537 ring->ring_obj = NULL;
1539 sprintf(ring->name, "gfx");
1541 sprintf(ring->name, "gfx_%d", i);
1542 ring->use_doorbell = true;
1543 ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
1544 r = amdgpu_ring_init(adev, ring, 1024,
1545 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1550 /* set up the compute queues - allocate horizontally across pipes */
1552 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1553 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1554 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1555 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k, j))
1558 r = gfx_v9_0_compute_ring_init(adev,
1569 r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE);
1571 DRM_ERROR("Failed to init KIQ BOs!\n");
1575 kiq = &adev->gfx.kiq;
1576 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1580 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1581 r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
1585 /* reserve GDS, GWS and OA resource for gfx */
1586 r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
1587 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
1588 &adev->gds.gds_gfx_bo, NULL, NULL);
1592 r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
1593 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
1594 &adev->gds.gws_gfx_bo, NULL, NULL);
1598 r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
1599 PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
1600 &adev->gds.oa_gfx_bo, NULL, NULL);
1604 adev->gfx.ce_ram_size = 0x8000;
1606 r = gfx_v9_0_gpu_early_init(adev);
1610 r = gfx_v9_0_ngg_init(adev);
1618 static int gfx_v9_0_sw_fini(void *handle)
1621 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1623 amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1624 amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1625 amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1627 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1628 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1629 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1630 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1632 amdgpu_gfx_compute_mqd_sw_fini(adev);
1633 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1634 amdgpu_gfx_kiq_fini(adev);
1636 gfx_v9_0_mec_fini(adev);
1637 gfx_v9_0_ngg_fini(adev);
1638 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
1639 &adev->gfx.rlc.clear_state_gpu_addr,
1640 (void **)&adev->gfx.rlc.cs_ptr);
1641 if (adev->asic_type == CHIP_RAVEN) {
1642 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
1643 &adev->gfx.rlc.cp_table_gpu_addr,
1644 (void **)&adev->gfx.rlc.cp_table_ptr);
1646 gfx_v9_0_free_microcode(adev);
1652 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1657 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1661 if (instance == 0xffffffff)
1662 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1664 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1666 if (se_num == 0xffffffff)
1667 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1669 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1671 if (sh_num == 0xffffffff)
1672 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1674 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1676 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1679 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1683 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1684 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1686 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1687 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1689 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1690 adev->gfx.config.max_sh_per_se);
1692 return (~data) & mask;
1695 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1700 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1701 adev->gfx.config.max_sh_per_se;
1703 mutex_lock(&adev->grbm_idx_mutex);
1704 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1705 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1706 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1707 data = gfx_v9_0_get_rb_active_bitmap(adev);
1708 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1709 rb_bitmap_width_per_sh);
1712 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1713 mutex_unlock(&adev->grbm_idx_mutex);
1715 adev->gfx.config.backend_enable_mask = active_rbs;
1716 adev->gfx.config.num_rbs = hweight32(active_rbs);
1719 #define DEFAULT_SH_MEM_BASES (0x6000)
1720 #define FIRST_COMPUTE_VMID (8)
1721 #define LAST_COMPUTE_VMID (16)
1722 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1725 uint32_t sh_mem_config;
1726 uint32_t sh_mem_bases;
1729 * Configure apertures:
1730 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1731 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1732 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1734 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1736 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1737 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1738 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1740 mutex_lock(&adev->srbm_mutex);
1741 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1742 soc15_grbm_select(adev, 0, 0, 0, i);
1743 /* CP and shaders */
1744 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1745 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1747 soc15_grbm_select(adev, 0, 0, 0, 0);
1748 mutex_unlock(&adev->srbm_mutex);
1751 static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
1756 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1758 gfx_v9_0_tiling_mode_table_init(adev);
1760 gfx_v9_0_setup_rb(adev);
1761 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1762 adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2);
1764 /* XXX SH_MEM regs */
1765 /* where to put LDS, scratch, GPUVM in FSA64 space */
1766 mutex_lock(&adev->srbm_mutex);
1767 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids; i++) {
1768 soc15_grbm_select(adev, 0, 0, 0, i);
1769 /* CP and shaders */
1771 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1772 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1773 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1774 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1776 tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1777 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1778 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1779 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1780 (adev->gmc.private_aperture_start >> 48));
1781 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1782 (adev->gmc.shared_aperture_start >> 48));
1783 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
1786 soc15_grbm_select(adev, 0, 0, 0, 0);
1788 mutex_unlock(&adev->srbm_mutex);
1790 gfx_v9_0_init_compute_vmid(adev);
1792 mutex_lock(&adev->grbm_idx_mutex);
1794 * making sure that the following register writes will be broadcasted
1795 * to all the shaders
1797 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1799 WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
1800 (adev->gfx.config.sc_prim_fifo_size_frontend <<
1801 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1802 (adev->gfx.config.sc_prim_fifo_size_backend <<
1803 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1804 (adev->gfx.config.sc_hiz_tile_fifo_size <<
1805 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1806 (adev->gfx.config.sc_earlyz_tile_fifo_size <<
1807 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
1808 mutex_unlock(&adev->grbm_idx_mutex);
1812 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
1817 mutex_lock(&adev->grbm_idx_mutex);
1818 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1819 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1820 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1821 for (k = 0; k < adev->usec_timeout; k++) {
1822 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
1826 if (k == adev->usec_timeout) {
1827 gfx_v9_0_select_se_sh(adev, 0xffffffff,
1828 0xffffffff, 0xffffffff);
1829 mutex_unlock(&adev->grbm_idx_mutex);
1830 DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1836 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1837 mutex_unlock(&adev->grbm_idx_mutex);
1839 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1840 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1841 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1842 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1843 for (k = 0; k < adev->usec_timeout; k++) {
1844 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1850 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1853 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1855 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1856 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1857 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1858 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
1860 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1863 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
1866 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
1867 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1868 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
1869 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1870 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
1871 adev->gfx.rlc.clear_state_size);
1874 static void gfx_v9_1_parse_ind_reg_list(int *register_list_format,
1875 int indirect_offset,
1877 int *unique_indirect_regs,
1878 int unique_indirect_reg_count,
1879 int *indirect_start_offsets,
1880 int *indirect_start_offsets_count,
1881 int max_start_offsets_count)
1885 for (; indirect_offset < list_size; indirect_offset++) {
1886 WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count);
1887 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
1888 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
1890 while (register_list_format[indirect_offset] != 0xFFFFFFFF) {
1891 indirect_offset += 2;
1893 /* look for the matching indice */
1894 for (idx = 0; idx < unique_indirect_reg_count; idx++) {
1895 if (unique_indirect_regs[idx] ==
1896 register_list_format[indirect_offset] ||
1897 !unique_indirect_regs[idx])
1901 BUG_ON(idx >= unique_indirect_reg_count);
1903 if (!unique_indirect_regs[idx])
1904 unique_indirect_regs[idx] = register_list_format[indirect_offset];
1911 static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev)
1913 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1914 int unique_indirect_reg_count = 0;
1916 int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1917 int indirect_start_offsets_count = 0;
1923 u32 *register_list_format =
1924 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
1925 if (!register_list_format)
1927 memcpy(register_list_format, adev->gfx.rlc.register_list_format,
1928 adev->gfx.rlc.reg_list_format_size_bytes);
1930 /* setup unique_indirect_regs array and indirect_start_offsets array */
1931 unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs);
1932 gfx_v9_1_parse_ind_reg_list(register_list_format,
1933 adev->gfx.rlc.reg_list_format_direct_reg_list_length,
1934 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
1935 unique_indirect_regs,
1936 unique_indirect_reg_count,
1937 indirect_start_offsets,
1938 &indirect_start_offsets_count,
1939 ARRAY_SIZE(indirect_start_offsets));
1941 /* enable auto inc in case it is disabled */
1942 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1943 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1944 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1946 /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
1947 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
1948 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
1949 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1950 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1951 adev->gfx.rlc.register_restore[i]);
1953 /* load indirect register */
1954 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1955 adev->gfx.rlc.reg_list_format_start);
1957 /* direct register portion */
1958 for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++)
1959 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1960 register_list_format[i]);
1962 /* indirect register portion */
1963 while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) {
1964 if (register_list_format[i] == 0xFFFFFFFF) {
1965 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
1969 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
1970 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]);
1972 for (j = 0; j < unique_indirect_reg_count; j++) {
1973 if (register_list_format[i] == unique_indirect_regs[j]) {
1974 WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j);
1979 BUG_ON(j >= unique_indirect_reg_count);
1984 /* set save/restore list size */
1985 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
1986 list_size = list_size >> 1;
1987 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1988 adev->gfx.rlc.reg_restore_list_size);
1989 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
1991 /* write the starting offsets to RLC scratch ram */
1992 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1993 adev->gfx.rlc.starting_offsets_start);
1994 for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
1995 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1996 indirect_start_offsets[i]);
1998 /* load unique indirect regs*/
1999 for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
2000 if (unique_indirect_regs[i] != 0) {
2001 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0)
2002 + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i],
2003 unique_indirect_regs[i] & 0x3FFFF);
2005 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0)
2006 + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i],
2007 unique_indirect_regs[i] >> 20);
2011 kfree(register_list_format);
2015 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
2017 WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
2020 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
2024 uint32_t default_data = 0;
2026 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
2027 if (enable == true) {
2028 /* enable GFXIP control over CGPG */
2029 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2030 if(default_data != data)
2031 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2034 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
2035 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
2036 if(default_data != data)
2037 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2039 /* restore GFXIP control over GCPG */
2040 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
2041 if(default_data != data)
2042 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
2046 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
2050 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2051 AMD_PG_SUPPORT_GFX_SMG |
2052 AMD_PG_SUPPORT_GFX_DMG)) {
2053 /* init IDLE_POLL_COUNT = 60 */
2054 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
2055 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
2056 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2057 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
2059 /* init RLC PG Delay */
2061 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
2062 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
2063 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
2064 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
2065 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
2067 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
2068 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
2069 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
2070 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
2072 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
2073 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
2074 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
2075 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
2077 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
2078 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
2080 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
2081 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
2082 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
2084 pwr_10_0_gfxip_control_over_cgpg(adev, true);
2088 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
2092 uint32_t default_data = 0;
2094 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2095 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2096 SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
2098 if (default_data != data)
2099 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2102 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
2106 uint32_t default_data = 0;
2108 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2109 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2110 SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
2112 if(default_data != data)
2113 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2116 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
2120 uint32_t default_data = 0;
2122 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2123 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2126 if(default_data != data)
2127 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2130 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
2133 uint32_t data, default_data;
2135 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2136 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2137 GFX_POWER_GATING_ENABLE,
2139 if(default_data != data)
2140 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2143 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
2146 uint32_t data, default_data;
2148 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2149 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2150 GFX_PIPELINE_PG_ENABLE,
2152 if(default_data != data)
2153 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2156 /* read any GFX register to wake up GFX */
2157 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2160 static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2163 uint32_t data, default_data;
2165 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2166 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2167 STATIC_PER_CU_PG_ENABLE,
2169 if(default_data != data)
2170 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2173 static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2176 uint32_t data, default_data;
2178 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2179 data = REG_SET_FIELD(data, RLC_PG_CNTL,
2180 DYN_PER_CU_PG_ENABLE,
2182 if(default_data != data)
2183 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2186 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2188 gfx_v9_0_init_csb(adev);
2191 * Rlc save restore list is workable since v2_1.
2192 * And it's needed by gfxoff feature.
2194 if (adev->gfx.rlc.is_rlc_v2_1) {
2195 gfx_v9_1_init_rlc_save_restore_list(adev);
2196 gfx_v9_0_enable_save_restore_machine(adev);
2199 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2200 AMD_PG_SUPPORT_GFX_SMG |
2201 AMD_PG_SUPPORT_GFX_DMG |
2203 AMD_PG_SUPPORT_GDS |
2204 AMD_PG_SUPPORT_RLC_SMU_HS)) {
2205 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2206 adev->gfx.rlc.cp_table_gpu_addr >> 8);
2207 gfx_v9_0_init_gfx_power_gating(adev);
2211 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2213 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
2214 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2215 gfx_v9_0_wait_for_rlc_serdes(adev);
2218 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2220 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2222 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2226 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2228 #ifdef AMDGPU_RLC_DEBUG_RETRY
2232 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2234 /* carrizo do enable cp interrupt after cp inited */
2235 if (!(adev->flags & AMD_IS_APU))
2236 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2240 #ifdef AMDGPU_RLC_DEBUG_RETRY
2241 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
2242 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
2243 if(rlc_ucode_ver == 0x108) {
2244 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2245 rlc_ucode_ver, adev->gfx.rlc_fw_version);
2246 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2247 * default is 0x9C4 to create a 100us interval */
2248 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
2249 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2250 * to disable the page fault retry interrupts, default is
2252 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
2257 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
2259 const struct rlc_firmware_header_v2_0 *hdr;
2260 const __le32 *fw_data;
2261 unsigned i, fw_size;
2263 if (!adev->gfx.rlc_fw)
2266 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2267 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2269 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2270 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2271 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2273 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2274 RLCG_UCODE_LOADING_START_ADDRESS);
2275 for (i = 0; i < fw_size; i++)
2276 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2277 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2282 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2286 if (amdgpu_sriov_vf(adev)) {
2287 gfx_v9_0_init_csb(adev);
2291 gfx_v9_0_rlc_stop(adev);
2294 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2297 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
2299 gfx_v9_0_rlc_reset(adev);
2301 gfx_v9_0_init_pg(adev);
2303 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2304 /* legacy rlc firmware loading */
2305 r = gfx_v9_0_rlc_load_microcode(adev);
2310 if (adev->asic_type == CHIP_RAVEN) {
2311 if (amdgpu_lbpw != 0)
2312 gfx_v9_0_enable_lbpw(adev, true);
2314 gfx_v9_0_enable_lbpw(adev, false);
2317 gfx_v9_0_rlc_start(adev);
2322 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2325 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2327 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2328 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2329 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2331 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2332 adev->gfx.gfx_ring[i].ready = false;
2334 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2338 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2340 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2341 const struct gfx_firmware_header_v1_0 *ce_hdr;
2342 const struct gfx_firmware_header_v1_0 *me_hdr;
2343 const __le32 *fw_data;
2344 unsigned i, fw_size;
2346 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2349 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2350 adev->gfx.pfp_fw->data;
2351 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2352 adev->gfx.ce_fw->data;
2353 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2354 adev->gfx.me_fw->data;
2356 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2357 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2358 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2360 gfx_v9_0_cp_gfx_enable(adev, false);
2363 fw_data = (const __le32 *)
2364 (adev->gfx.pfp_fw->data +
2365 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2366 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2367 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2368 for (i = 0; i < fw_size; i++)
2369 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2370 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2373 fw_data = (const __le32 *)
2374 (adev->gfx.ce_fw->data +
2375 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2376 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2377 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2378 for (i = 0; i < fw_size; i++)
2379 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2380 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2383 fw_data = (const __le32 *)
2384 (adev->gfx.me_fw->data +
2385 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2386 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2387 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2388 for (i = 0; i < fw_size; i++)
2389 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2390 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2395 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2397 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2398 const struct cs_section_def *sect = NULL;
2399 const struct cs_extent_def *ext = NULL;
2403 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2404 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2406 gfx_v9_0_cp_gfx_enable(adev, true);
2408 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
2410 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2414 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2415 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2417 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2418 amdgpu_ring_write(ring, 0x80000000);
2419 amdgpu_ring_write(ring, 0x80000000);
2421 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2422 for (ext = sect->section; ext->extent != NULL; ++ext) {
2423 if (sect->id == SECT_CONTEXT) {
2424 amdgpu_ring_write(ring,
2425 PACKET3(PACKET3_SET_CONTEXT_REG,
2427 amdgpu_ring_write(ring,
2428 ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2429 for (i = 0; i < ext->reg_count; i++)
2430 amdgpu_ring_write(ring, ext->extent[i]);
2435 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2436 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2438 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2439 amdgpu_ring_write(ring, 0);
2441 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2442 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2443 amdgpu_ring_write(ring, 0x8000);
2444 amdgpu_ring_write(ring, 0x8000);
2446 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG,1));
2447 tmp = (PACKET3_SET_UCONFIG_REG_INDEX_TYPE |
2448 (SOC15_REG_OFFSET(GC, 0, mmVGT_INDEX_TYPE) - PACKET3_SET_UCONFIG_REG_START));
2449 amdgpu_ring_write(ring, tmp);
2450 amdgpu_ring_write(ring, 0);
2452 amdgpu_ring_commit(ring);
2457 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2459 struct amdgpu_ring *ring;
2462 u64 rb_addr, rptr_addr, wptr_gpu_addr;
2464 /* Set the write pointer delay */
2465 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2467 /* set the RB to use vmid 0 */
2468 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2470 /* Set ring buffer size */
2471 ring = &adev->gfx.gfx_ring[0];
2472 rb_bufsz = order_base_2(ring->ring_size / 8);
2473 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2474 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2476 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2478 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2480 /* Initialize the ring buffer's write pointers */
2482 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2483 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2485 /* set the wb address wether it's enabled or not */
2486 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2487 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2488 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2490 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2491 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
2492 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
2495 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2497 rb_addr = ring->gpu_addr >> 8;
2498 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2499 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2501 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2502 if (ring->use_doorbell) {
2503 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2504 DOORBELL_OFFSET, ring->doorbell_index);
2505 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2508 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
2510 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2512 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2513 DOORBELL_RANGE_LOWER, ring->doorbell_index);
2514 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2516 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2517 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2520 /* start the ring */
2521 gfx_v9_0_cp_gfx_start(adev);
2527 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2532 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2534 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2535 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2536 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2537 adev->gfx.compute_ring[i].ready = false;
2538 adev->gfx.kiq.ring.ready = false;
2543 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2545 const struct gfx_firmware_header_v1_0 *mec_hdr;
2546 const __le32 *fw_data;
2550 if (!adev->gfx.mec_fw)
2553 gfx_v9_0_cp_compute_enable(adev, false);
2555 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2556 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2558 fw_data = (const __le32 *)
2559 (adev->gfx.mec_fw->data +
2560 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2562 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2563 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2564 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2566 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2567 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
2568 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2569 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2572 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2573 mec_hdr->jt_offset);
2574 for (i = 0; i < mec_hdr->jt_size; i++)
2575 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2576 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2578 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2579 adev->gfx.mec_fw_version);
2580 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2586 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
2589 struct amdgpu_device *adev = ring->adev;
2591 /* tell RLC which is KIQ queue */
2592 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2594 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2595 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2597 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2600 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2602 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2603 uint32_t scratch, tmp = 0;
2604 uint64_t queue_mask = 0;
2607 for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
2608 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
2611 /* This situation may be hit in the future if a new HW
2612 * generation exposes more than 64 queues. If so, the
2613 * definition of queue_mask needs updating */
2614 if (WARN_ON(i >= (sizeof(queue_mask)*8))) {
2615 DRM_ERROR("Invalid KCQ enabled: %d\n", i);
2619 queue_mask |= (1ull << i);
2622 r = amdgpu_gfx_scratch_get(adev, &scratch);
2624 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
2627 WREG32(scratch, 0xCAFEDEAD);
2629 r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 11);
2631 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2632 amdgpu_gfx_scratch_free(adev, scratch);
2637 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2638 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2639 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
2640 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
2641 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
2642 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
2643 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
2644 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
2645 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
2646 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2647 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2648 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2649 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2651 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2652 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2653 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2654 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2655 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2656 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2657 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2658 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2659 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2660 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
2661 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2662 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2663 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2664 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2665 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2666 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2667 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2669 /* write to scratch for completion */
2670 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2671 amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2672 amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
2673 amdgpu_ring_commit(kiq_ring);
2675 for (i = 0; i < adev->usec_timeout; i++) {
2676 tmp = RREG32(scratch);
2677 if (tmp == 0xDEADBEEF)
2681 if (i >= adev->usec_timeout) {
2682 DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n",
2686 amdgpu_gfx_scratch_free(adev, scratch);
2691 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2693 struct amdgpu_device *adev = ring->adev;
2694 struct v9_mqd *mqd = ring->mqd_ptr;
2695 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2698 mqd->header = 0xC0310800;
2699 mqd->compute_pipelinestat_enable = 0x00000001;
2700 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2701 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2702 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2703 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2704 mqd->compute_misc_reserved = 0x00000003;
2706 mqd->dynamic_cu_mask_addr_lo =
2707 lower_32_bits(ring->mqd_gpu_addr
2708 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2709 mqd->dynamic_cu_mask_addr_hi =
2710 upper_32_bits(ring->mqd_gpu_addr
2711 + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
2713 eop_base_addr = ring->eop_gpu_addr >> 8;
2714 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2715 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2717 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2718 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2719 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2720 (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
2722 mqd->cp_hqd_eop_control = tmp;
2724 /* enable doorbell? */
2725 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2727 if (ring->use_doorbell) {
2728 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2729 DOORBELL_OFFSET, ring->doorbell_index);
2730 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2732 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2733 DOORBELL_SOURCE, 0);
2734 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2737 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2741 mqd->cp_hqd_pq_doorbell_control = tmp;
2743 /* disable the queue if it's active */
2745 mqd->cp_hqd_dequeue_request = 0;
2746 mqd->cp_hqd_pq_rptr = 0;
2747 mqd->cp_hqd_pq_wptr_lo = 0;
2748 mqd->cp_hqd_pq_wptr_hi = 0;
2750 /* set the pointer to the MQD */
2751 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2752 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2754 /* set MQD vmid to 0 */
2755 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2756 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2757 mqd->cp_mqd_control = tmp;
2759 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2760 hqd_gpu_addr = ring->gpu_addr >> 8;
2761 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2762 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2764 /* set up the HQD, this is similar to CP_RB0_CNTL */
2765 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2766 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2767 (order_base_2(ring->ring_size / 4) - 1));
2768 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2769 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2771 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2773 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2774 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2775 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2776 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2777 mqd->cp_hqd_pq_control = tmp;
2779 /* set the wb address whether it's enabled or not */
2780 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2781 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2782 mqd->cp_hqd_pq_rptr_report_addr_hi =
2783 upper_32_bits(wb_gpu_addr) & 0xffff;
2785 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2786 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2787 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2788 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2791 /* enable the doorbell if requested */
2792 if (ring->use_doorbell) {
2793 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2794 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2795 DOORBELL_OFFSET, ring->doorbell_index);
2797 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2799 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2800 DOORBELL_SOURCE, 0);
2801 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2805 mqd->cp_hqd_pq_doorbell_control = tmp;
2807 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2809 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2811 /* set the vmid for the queue */
2812 mqd->cp_hqd_vmid = 0;
2814 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
2815 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
2816 mqd->cp_hqd_persistent_state = tmp;
2818 /* set MIN_IB_AVAIL_SIZE */
2819 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
2820 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
2821 mqd->cp_hqd_ib_control = tmp;
2823 /* activate the queue */
2824 mqd->cp_hqd_active = 1;
2829 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2831 struct amdgpu_device *adev = ring->adev;
2832 struct v9_mqd *mqd = ring->mqd_ptr;
2835 /* disable wptr polling */
2836 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2838 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
2839 mqd->cp_hqd_eop_base_addr_lo);
2840 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
2841 mqd->cp_hqd_eop_base_addr_hi);
2843 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2844 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
2845 mqd->cp_hqd_eop_control);
2847 /* enable doorbell? */
2848 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2849 mqd->cp_hqd_pq_doorbell_control);
2851 /* disable the queue if it's active */
2852 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2853 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
2854 for (j = 0; j < adev->usec_timeout; j++) {
2855 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
2859 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
2860 mqd->cp_hqd_dequeue_request);
2861 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
2862 mqd->cp_hqd_pq_rptr);
2863 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2864 mqd->cp_hqd_pq_wptr_lo);
2865 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2866 mqd->cp_hqd_pq_wptr_hi);
2869 /* set the pointer to the MQD */
2870 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
2871 mqd->cp_mqd_base_addr_lo);
2872 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
2873 mqd->cp_mqd_base_addr_hi);
2875 /* set MQD vmid to 0 */
2876 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
2877 mqd->cp_mqd_control);
2879 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2880 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
2881 mqd->cp_hqd_pq_base_lo);
2882 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
2883 mqd->cp_hqd_pq_base_hi);
2885 /* set up the HQD, this is similar to CP_RB0_CNTL */
2886 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
2887 mqd->cp_hqd_pq_control);
2889 /* set the wb address whether it's enabled or not */
2890 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
2891 mqd->cp_hqd_pq_rptr_report_addr_lo);
2892 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2893 mqd->cp_hqd_pq_rptr_report_addr_hi);
2895 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2896 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
2897 mqd->cp_hqd_pq_wptr_poll_addr_lo);
2898 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2899 mqd->cp_hqd_pq_wptr_poll_addr_hi);
2901 /* enable the doorbell if requested */
2902 if (ring->use_doorbell) {
2903 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
2904 (AMDGPU_DOORBELL64_KIQ *2) << 2);
2905 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
2906 (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
2909 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2910 mqd->cp_hqd_pq_doorbell_control);
2912 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2913 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2914 mqd->cp_hqd_pq_wptr_lo);
2915 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2916 mqd->cp_hqd_pq_wptr_hi);
2918 /* set the vmid for the queue */
2919 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
2921 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
2922 mqd->cp_hqd_persistent_state);
2924 /* activate the queue */
2925 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
2926 mqd->cp_hqd_active);
2928 if (ring->use_doorbell)
2929 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2934 static int gfx_v9_0_kiq_fini_register(struct amdgpu_ring *ring)
2936 struct amdgpu_device *adev = ring->adev;
2939 /* disable the queue if it's active */
2940 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2942 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
2944 for (j = 0; j < adev->usec_timeout; j++) {
2945 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
2950 if (j == AMDGPU_MAX_USEC_TIMEOUT) {
2951 DRM_DEBUG("KIQ dequeue request failed.\n");
2953 /* Manual disable if dequeue request times out */
2954 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, 0);
2957 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
2961 WREG32_SOC15(GC, 0, mmCP_HQD_IQ_TIMER, 0);
2962 WREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL, 0);
2963 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, 0);
2964 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
2965 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
2966 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, 0);
2967 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, 0);
2968 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, 0);
2973 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
2975 struct amdgpu_device *adev = ring->adev;
2976 struct v9_mqd *mqd = ring->mqd_ptr;
2977 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
2979 gfx_v9_0_kiq_setting(ring);
2981 if (adev->in_gpu_reset) { /* for GPU_RESET case */
2982 /* reset MQD to a clean status */
2983 if (adev->gfx.mec.mqd_backup[mqd_idx])
2984 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2986 /* reset ring buffer */
2988 amdgpu_ring_clear_ring(ring);
2990 mutex_lock(&adev->srbm_mutex);
2991 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2992 gfx_v9_0_kiq_init_register(ring);
2993 soc15_grbm_select(adev, 0, 0, 0, 0);
2994 mutex_unlock(&adev->srbm_mutex);
2996 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2997 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2998 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2999 mutex_lock(&adev->srbm_mutex);
3000 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3001 gfx_v9_0_mqd_init(ring);
3002 gfx_v9_0_kiq_init_register(ring);
3003 soc15_grbm_select(adev, 0, 0, 0, 0);
3004 mutex_unlock(&adev->srbm_mutex);
3006 if (adev->gfx.mec.mqd_backup[mqd_idx])
3007 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3013 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
3015 struct amdgpu_device *adev = ring->adev;
3016 struct v9_mqd *mqd = ring->mqd_ptr;
3017 int mqd_idx = ring - &adev->gfx.compute_ring[0];
3019 if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
3020 memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
3021 ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
3022 ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
3023 mutex_lock(&adev->srbm_mutex);
3024 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3025 gfx_v9_0_mqd_init(ring);
3026 soc15_grbm_select(adev, 0, 0, 0, 0);
3027 mutex_unlock(&adev->srbm_mutex);
3029 if (adev->gfx.mec.mqd_backup[mqd_idx])
3030 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
3031 } else if (adev->in_gpu_reset) { /* for GPU_RESET case */
3032 /* reset MQD to a clean status */
3033 if (adev->gfx.mec.mqd_backup[mqd_idx])
3034 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
3036 /* reset ring buffer */
3038 amdgpu_ring_clear_ring(ring);
3040 amdgpu_ring_clear_ring(ring);
3046 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
3048 struct amdgpu_ring *ring = NULL;
3051 gfx_v9_0_cp_compute_enable(adev, true);
3053 ring = &adev->gfx.kiq.ring;
3055 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3056 if (unlikely(r != 0))
3059 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3061 r = gfx_v9_0_kiq_init_queue(ring);
3062 amdgpu_bo_kunmap(ring->mqd_obj);
3063 ring->mqd_ptr = NULL;
3065 amdgpu_bo_unreserve(ring->mqd_obj);
3069 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3070 ring = &adev->gfx.compute_ring[i];
3072 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3073 if (unlikely(r != 0))
3075 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3077 r = gfx_v9_0_kcq_init_queue(ring);
3078 amdgpu_bo_kunmap(ring->mqd_obj);
3079 ring->mqd_ptr = NULL;
3081 amdgpu_bo_unreserve(ring->mqd_obj);
3086 r = gfx_v9_0_kiq_kcq_enable(adev);
3091 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
3094 struct amdgpu_ring *ring;
3096 if (!(adev->flags & AMD_IS_APU))
3097 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
3099 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
3100 /* legacy firmware loading */
3101 r = gfx_v9_0_cp_gfx_load_microcode(adev);
3105 r = gfx_v9_0_cp_compute_load_microcode(adev);
3110 r = gfx_v9_0_cp_gfx_resume(adev);
3114 r = gfx_v9_0_kiq_resume(adev);
3118 ring = &adev->gfx.gfx_ring[0];
3119 r = amdgpu_ring_test_ring(ring);
3121 ring->ready = false;
3125 ring = &adev->gfx.kiq.ring;
3127 r = amdgpu_ring_test_ring(ring);
3129 ring->ready = false;
3131 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3132 ring = &adev->gfx.compute_ring[i];
3135 r = amdgpu_ring_test_ring(ring);
3137 ring->ready = false;
3140 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
3145 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
3147 gfx_v9_0_cp_gfx_enable(adev, enable);
3148 gfx_v9_0_cp_compute_enable(adev, enable);
3151 static int gfx_v9_0_hw_init(void *handle)
3154 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3156 gfx_v9_0_init_golden_registers(adev);
3158 gfx_v9_0_gpu_init(adev);
3160 r = gfx_v9_0_csb_vram_pin(adev);
3164 r = gfx_v9_0_rlc_resume(adev);
3168 r = gfx_v9_0_cp_resume(adev);
3172 r = gfx_v9_0_ngg_en(adev);
3179 static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
3181 struct amdgpu_device *adev = kiq_ring->adev;
3182 uint32_t scratch, tmp = 0;
3185 r = amdgpu_gfx_scratch_get(adev, &scratch);
3187 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
3190 WREG32(scratch, 0xCAFEDEAD);
3192 r = amdgpu_ring_alloc(kiq_ring, 10);
3194 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3195 amdgpu_gfx_scratch_free(adev, scratch);
3200 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
3201 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
3202 PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
3203 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
3204 PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
3205 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
3206 amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
3207 amdgpu_ring_write(kiq_ring, 0);
3208 amdgpu_ring_write(kiq_ring, 0);
3209 amdgpu_ring_write(kiq_ring, 0);
3210 /* write to scratch for completion */
3211 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
3212 amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
3213 amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
3214 amdgpu_ring_commit(kiq_ring);
3216 for (i = 0; i < adev->usec_timeout; i++) {
3217 tmp = RREG32(scratch);
3218 if (tmp == 0xDEADBEEF)
3222 if (i >= adev->usec_timeout) {
3223 DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
3226 amdgpu_gfx_scratch_free(adev, scratch);
3230 static int gfx_v9_0_hw_fini(void *handle)
3232 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3235 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX,
3236 AMD_PG_STATE_UNGATE);
3238 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3239 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3241 /* disable KCQ to avoid CPC touch memory not valid anymore */
3242 for (i = 0; i < adev->gfx.num_compute_rings; i++)
3243 gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
3245 if (amdgpu_sriov_vf(adev)) {
3246 gfx_v9_0_cp_gfx_enable(adev, false);
3247 /* must disable polling for SRIOV when hw finished, otherwise
3248 * CPC engine may still keep fetching WB address which is already
3249 * invalid after sw finished and trigger DMAR reading error in
3252 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3256 /* Use deinitialize sequence from CAIL when unbinding device from driver,
3257 * otherwise KIQ is hanging when binding back
3259 if (!adev->in_gpu_reset && !adev->gfx.in_suspend) {
3260 mutex_lock(&adev->srbm_mutex);
3261 soc15_grbm_select(adev, adev->gfx.kiq.ring.me,
3262 adev->gfx.kiq.ring.pipe,
3263 adev->gfx.kiq.ring.queue, 0);
3264 gfx_v9_0_kiq_fini_register(&adev->gfx.kiq.ring);
3265 soc15_grbm_select(adev, 0, 0, 0, 0);
3266 mutex_unlock(&adev->srbm_mutex);
3269 gfx_v9_0_cp_enable(adev, false);
3270 gfx_v9_0_rlc_stop(adev);
3272 gfx_v9_0_csb_vram_unpin(adev);
3277 static int gfx_v9_0_suspend(void *handle)
3279 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3281 adev->gfx.in_suspend = true;
3282 return gfx_v9_0_hw_fini(adev);
3285 static int gfx_v9_0_resume(void *handle)
3287 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3290 r = gfx_v9_0_hw_init(adev);
3291 adev->gfx.in_suspend = false;
3295 static bool gfx_v9_0_is_idle(void *handle)
3297 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3299 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3300 GRBM_STATUS, GUI_ACTIVE))
3306 static int gfx_v9_0_wait_for_idle(void *handle)
3309 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3311 for (i = 0; i < adev->usec_timeout; i++) {
3312 if (gfx_v9_0_is_idle(handle))
3319 static int gfx_v9_0_soft_reset(void *handle)
3321 u32 grbm_soft_reset = 0;
3323 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3326 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3327 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3328 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3329 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
3330 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
3331 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
3332 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
3333 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3334 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3335 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3336 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
3339 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3340 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3341 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3345 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3346 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3347 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3348 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3351 if (grbm_soft_reset) {
3353 gfx_v9_0_rlc_stop(adev);
3355 /* Disable GFX parsing/prefetching */
3356 gfx_v9_0_cp_gfx_enable(adev, false);
3358 /* Disable MEC parsing/prefetching */
3359 gfx_v9_0_cp_compute_enable(adev, false);
3361 if (grbm_soft_reset) {
3362 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3363 tmp |= grbm_soft_reset;
3364 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3365 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3366 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3370 tmp &= ~grbm_soft_reset;
3371 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3372 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3375 /* Wait a little for things to settle down */
3381 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3385 mutex_lock(&adev->gfx.gpu_clock_mutex);
3386 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3387 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3388 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3389 mutex_unlock(&adev->gfx.gpu_clock_mutex);
3393 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3395 uint32_t gds_base, uint32_t gds_size,
3396 uint32_t gws_base, uint32_t gws_size,
3397 uint32_t oa_base, uint32_t oa_size)
3399 struct amdgpu_device *adev = ring->adev;
3401 gds_base = gds_base >> AMDGPU_GDS_SHIFT;
3402 gds_size = gds_size >> AMDGPU_GDS_SHIFT;
3404 gws_base = gws_base >> AMDGPU_GWS_SHIFT;
3405 gws_size = gws_size >> AMDGPU_GWS_SHIFT;
3407 oa_base = oa_base >> AMDGPU_OA_SHIFT;
3408 oa_size = oa_size >> AMDGPU_OA_SHIFT;
3411 gfx_v9_0_write_data_to_reg(ring, 0, false,
3412 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE) + 2 * vmid,
3416 gfx_v9_0_write_data_to_reg(ring, 0, false,
3417 SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE) + 2 * vmid,
3421 gfx_v9_0_write_data_to_reg(ring, 0, false,
3422 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0) + vmid,
3423 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3426 gfx_v9_0_write_data_to_reg(ring, 0, false,
3427 SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) + vmid,
3428 (1 << (oa_size + oa_base)) - (1 << oa_base));
3431 static int gfx_v9_0_early_init(void *handle)
3433 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3435 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
3436 adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
3437 gfx_v9_0_set_ring_funcs(adev);
3438 gfx_v9_0_set_irq_funcs(adev);
3439 gfx_v9_0_set_gds_init(adev);
3440 gfx_v9_0_set_rlc_funcs(adev);
3445 static int gfx_v9_0_late_init(void *handle)
3447 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3450 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3454 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3461 static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3463 uint32_t rlc_setting, data;
3466 if (adev->gfx.rlc.in_safe_mode)
3469 /* if RLC is not enabled, do nothing */
3470 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3471 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3474 if (adev->cg_flags &
3475 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
3476 AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3477 data = RLC_SAFE_MODE__CMD_MASK;
3478 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3479 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3481 /* wait for RLC_SAFE_MODE */
3482 for (i = 0; i < adev->usec_timeout; i++) {
3483 if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3487 adev->gfx.rlc.in_safe_mode = true;
3491 static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3493 uint32_t rlc_setting, data;
3495 if (!adev->gfx.rlc.in_safe_mode)
3498 /* if RLC is not enabled, do nothing */
3499 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3500 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3503 if (adev->cg_flags &
3504 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
3506 * Try to exit safe mode only if it is already in safe
3509 data = RLC_SAFE_MODE__CMD_MASK;
3510 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3511 adev->gfx.rlc.in_safe_mode = false;
3515 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3518 gfx_v9_0_enter_rlc_safe_mode(adev);
3520 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
3521 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
3522 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
3523 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
3525 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
3526 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
3529 gfx_v9_0_exit_rlc_safe_mode(adev);
3532 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
3535 /* TODO: double check if we need to perform under safe mode */
3536 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3538 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
3539 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
3541 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
3543 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
3544 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
3546 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
3548 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3551 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3556 /* It is disabled by HW by default */
3557 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3558 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
3559 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3560 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
3561 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3562 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3563 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3565 /* only for Vega10 & Raven1 */
3566 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3569 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3571 /* MGLS is a global flag to control all MGLS in GFX */
3572 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3573 /* 2 - RLC memory Light sleep */
3574 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3575 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3576 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3578 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3580 /* 3 - CP memory Light sleep */
3581 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3582 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3583 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3585 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3589 /* 1 - MGCG_OVERRIDE */
3590 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3591 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
3592 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3593 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3594 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3595 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3597 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3599 /* 2 - disable MGLS in RLC */
3600 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3601 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3602 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3603 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3606 /* 3 - disable MGLS in CP */
3607 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3608 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3609 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3610 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3615 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
3620 adev->gfx.rlc.funcs->enter_safe_mode(adev);
3622 /* Enable 3D CGCG/CGLS */
3623 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3624 /* write cmd to clear cgcg/cgls ov */
3625 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3626 /* unset CGCG override */
3627 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3628 /* update CGCG and CGLS override bits */
3630 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3631 /* enable 3Dcgcg FSM(0x0020003f) */
3632 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3633 data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3634 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3635 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3636 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3637 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3639 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3641 /* set IDLE_POLL_COUNT(0x00900100) */
3642 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3643 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3644 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3646 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3648 /* Disable CGCG/CGLS */
3649 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3650 /* disable cgcg, cgls should be disabled */
3651 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3652 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3653 /* disable cgcg and cgls in FSM */
3655 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3658 adev->gfx.rlc.funcs->exit_safe_mode(adev);
3661 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3666 adev->gfx.rlc.funcs->enter_safe_mode(adev);
3668 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3669 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3670 /* unset CGCG override */
3671 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3672 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3673 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3675 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3676 /* update CGCG and CGLS override bits */
3678 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3680 /* enable cgcg FSM(0x0020003F) */
3681 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3682 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3683 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3684 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3685 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3686 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3688 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3690 /* set IDLE_POLL_COUNT(0x00900100) */
3691 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3692 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3693 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3695 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3697 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3698 /* reset CGCG/CGLS bits */
3699 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3700 /* disable cgcg and cgls in FSM */
3702 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3705 adev->gfx.rlc.funcs->exit_safe_mode(adev);
3708 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3712 /* CGCG/CGLS should be enabled after MGCG/MGLS
3713 * === MGCG + MGLS ===
3715 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3716 /* === CGCG /CGLS for GFX 3D Only === */
3717 gfx_v9_0_update_3d_clock_gating(adev, enable);
3718 /* === CGCG + CGLS === */
3719 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3721 /* CGCG/CGLS should be disabled before MGCG/MGLS
3722 * === CGCG + CGLS ===
3724 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3725 /* === CGCG /CGLS for GFX 3D Only === */
3726 gfx_v9_0_update_3d_clock_gating(adev, enable);
3727 /* === MGCG + MGLS === */
3728 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3733 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
3734 .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
3735 .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
3738 static int gfx_v9_0_set_powergating_state(void *handle,
3739 enum amd_powergating_state state)
3741 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3742 bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
3744 switch (adev->asic_type) {
3746 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
3747 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
3748 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
3750 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
3751 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
3754 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
3755 gfx_v9_0_enable_cp_power_gating(adev, true);
3757 gfx_v9_0_enable_cp_power_gating(adev, false);
3759 /* update gfx cgpg state */
3760 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
3762 /* update mgcg state */
3763 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
3765 /* set gfx off through smu */
3766 if (enable && adev->powerplay.pp_funcs->set_powergating_by_smu)
3767 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true);
3776 static int gfx_v9_0_set_clockgating_state(void *handle,
3777 enum amd_clockgating_state state)
3779 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3781 if (amdgpu_sriov_vf(adev))
3784 switch (adev->asic_type) {
3789 gfx_v9_0_update_gfx_clock_gating(adev,
3790 state == AMD_CG_STATE_GATE ? true : false);
3798 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
3800 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3803 if (amdgpu_sriov_vf(adev))
3806 /* AMD_CG_SUPPORT_GFX_MGCG */
3807 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3808 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3809 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
3811 /* AMD_CG_SUPPORT_GFX_CGCG */
3812 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3813 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3814 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
3816 /* AMD_CG_SUPPORT_GFX_CGLS */
3817 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3818 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
3820 /* AMD_CG_SUPPORT_GFX_RLC_LS */
3821 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3822 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
3823 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
3825 /* AMD_CG_SUPPORT_GFX_CP_LS */
3826 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3827 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
3828 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
3830 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
3831 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3832 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
3833 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
3835 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
3836 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
3837 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
3840 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
3842 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
3845 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
3847 struct amdgpu_device *adev = ring->adev;
3850 /* XXX check if swapping is necessary on BE */
3851 if (ring->use_doorbell) {
3852 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
3854 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
3855 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
3861 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
3863 struct amdgpu_device *adev = ring->adev;
3865 if (ring->use_doorbell) {
3866 /* XXX check if swapping is necessary on BE */
3867 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3868 WDOORBELL64(ring->doorbell_index, ring->wptr);
3870 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3871 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3875 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3877 struct amdgpu_device *adev = ring->adev;
3878 u32 ref_and_mask, reg_mem_engine;
3879 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg;
3881 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3884 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
3887 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
3894 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
3895 reg_mem_engine = 1; /* pfp */
3898 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
3899 adev->nbio_funcs->get_hdp_flush_req_offset(adev),
3900 adev->nbio_funcs->get_hdp_flush_done_offset(adev),
3901 ref_and_mask, ref_and_mask, 0x20);
3904 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3905 struct amdgpu_ib *ib,
3906 unsigned vmid, bool ctx_switch)
3908 u32 header, control = 0;
3910 if (ib->flags & AMDGPU_IB_FLAG_CE)
3911 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3913 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3915 control |= ib->length_dw | (vmid << 24);
3917 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
3918 control |= INDIRECT_BUFFER_PRE_ENB(1);
3920 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
3921 gfx_v9_0_ring_emit_de_meta(ring);
3924 amdgpu_ring_write(ring, header);
3925 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3926 amdgpu_ring_write(ring,
3930 lower_32_bits(ib->gpu_addr));
3931 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3932 amdgpu_ring_write(ring, control);
3935 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3936 struct amdgpu_ib *ib,
3937 unsigned vmid, bool ctx_switch)
3939 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
3941 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3942 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3943 amdgpu_ring_write(ring,
3947 lower_32_bits(ib->gpu_addr));
3948 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3949 amdgpu_ring_write(ring, control);
3952 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3953 u64 seq, unsigned flags)
3955 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3956 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3957 bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
3959 /* RELEASE_MEM - flush caches, send int */
3960 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3961 amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
3962 EOP_TC_NC_ACTION_EN) :
3963 (EOP_TCL1_ACTION_EN |
3965 EOP_TC_WB_ACTION_EN |
3966 EOP_TC_MD_ACTION_EN)) |
3967 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3969 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
3972 * the address should be Qword aligned if 64bit write, Dword
3973 * aligned if only send 32bit data low (discard data high)
3979 amdgpu_ring_write(ring, lower_32_bits(addr));
3980 amdgpu_ring_write(ring, upper_32_bits(addr));
3981 amdgpu_ring_write(ring, lower_32_bits(seq));
3982 amdgpu_ring_write(ring, upper_32_bits(seq));
3983 amdgpu_ring_write(ring, 0);
3986 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3988 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3989 uint32_t seq = ring->fence_drv.sync_seq;
3990 uint64_t addr = ring->fence_drv.gpu_addr;
3992 gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
3993 lower_32_bits(addr), upper_32_bits(addr),
3994 seq, 0xffffffff, 4);
3997 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3998 unsigned vmid, uint64_t pd_addr)
4000 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
4002 /* compute doesn't have PFP */
4003 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
4004 /* sync PFP to ME, otherwise we might get invalid PFP reads */
4005 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
4006 amdgpu_ring_write(ring, 0x0);
4010 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
4012 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
4015 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
4019 /* XXX check if swapping is necessary on BE */
4020 if (ring->use_doorbell)
4021 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
4027 static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
4030 struct amdgpu_device *adev = ring->adev;
4031 int pipe_num, tmp, reg;
4032 int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
4034 pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
4036 /* first me only has 2 entries, GFX and HP3D */
4040 reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
4042 tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
4046 static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
4047 struct amdgpu_ring *ring,
4052 struct amdgpu_ring *iring;
4054 mutex_lock(&adev->gfx.pipe_reserve_mutex);
4055 pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
4057 set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4059 clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4061 if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
4062 /* Clear all reservations - everyone reacquires all resources */
4063 for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
4064 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
4067 for (i = 0; i < adev->gfx.num_compute_rings; ++i)
4068 gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
4071 /* Lower all pipes without a current reservation */
4072 for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
4073 iring = &adev->gfx.gfx_ring[i];
4074 pipe = amdgpu_gfx_queue_to_bit(adev,
4078 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4079 gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4082 for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
4083 iring = &adev->gfx.compute_ring[i];
4084 pipe = amdgpu_gfx_queue_to_bit(adev,
4088 reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
4089 gfx_v9_0_ring_set_pipe_percent(iring, reserve);
4093 mutex_unlock(&adev->gfx.pipe_reserve_mutex);
4096 static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
4097 struct amdgpu_ring *ring,
4100 uint32_t pipe_priority = acquire ? 0x2 : 0x0;
4101 uint32_t queue_priority = acquire ? 0xf : 0x0;
4103 mutex_lock(&adev->srbm_mutex);
4104 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4106 WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
4107 WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
4109 soc15_grbm_select(adev, 0, 0, 0, 0);
4110 mutex_unlock(&adev->srbm_mutex);
4113 static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
4114 enum drm_sched_priority priority)
4116 struct amdgpu_device *adev = ring->adev;
4117 bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
4119 if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
4122 gfx_v9_0_hqd_set_priority(adev, ring, acquire);
4123 gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
4126 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
4128 struct amdgpu_device *adev = ring->adev;
4130 /* XXX check if swapping is necessary on BE */
4131 if (ring->use_doorbell) {
4132 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
4133 WDOORBELL64(ring->doorbell_index, ring->wptr);
4135 BUG(); /* only DOORBELL method supported on gfx9 now */
4139 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
4140 u64 seq, unsigned int flags)
4142 struct amdgpu_device *adev = ring->adev;
4144 /* we only allocate 32bit for each seq wb address */
4145 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
4147 /* write fence seq to the "addr" */
4148 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4149 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4150 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
4151 amdgpu_ring_write(ring, lower_32_bits(addr));
4152 amdgpu_ring_write(ring, upper_32_bits(addr));
4153 amdgpu_ring_write(ring, lower_32_bits(seq));
4155 if (flags & AMDGPU_FENCE_FLAG_INT) {
4156 /* set register to trigger INT */
4157 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4158 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
4159 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
4160 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
4161 amdgpu_ring_write(ring, 0);
4162 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
4166 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
4168 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
4169 amdgpu_ring_write(ring, 0);
4172 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
4174 struct v9_ce_ib_state ce_payload = {0};
4178 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
4179 csa_addr = amdgpu_csa_vaddr(ring->adev);
4181 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4182 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
4183 WRITE_DATA_DST_SEL(8) |
4185 WRITE_DATA_CACHE_POLICY(0));
4186 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4187 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
4188 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
4191 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
4193 struct v9_de_ib_state de_payload = {0};
4194 uint64_t csa_addr, gds_addr;
4197 csa_addr = amdgpu_csa_vaddr(ring->adev);
4198 gds_addr = csa_addr + 4096;
4199 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
4200 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
4202 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
4203 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
4204 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
4205 WRITE_DATA_DST_SEL(8) |
4207 WRITE_DATA_CACHE_POLICY(0));
4208 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4209 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
4210 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
4213 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
4215 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
4216 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
4219 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
4223 if (amdgpu_sriov_vf(ring->adev))
4224 gfx_v9_0_ring_emit_ce_meta(ring);
4226 gfx_v9_0_ring_emit_tmz(ring, true);
4228 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
4229 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
4230 /* set load_global_config & load_global_uconfig */
4232 /* set load_cs_sh_regs */
4234 /* set load_per_context_state & load_gfx_sh_regs for GFX */
4237 /* set load_ce_ram if preamble presented */
4238 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
4241 /* still load_ce_ram if this is the first time preamble presented
4242 * although there is no context switch happens.
4244 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
4248 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
4249 amdgpu_ring_write(ring, dw2);
4250 amdgpu_ring_write(ring, 0);
4253 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
4256 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
4257 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
4258 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
4259 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
4260 ret = ring->wptr & ring->buf_mask;
4261 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
4265 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
4268 BUG_ON(offset > ring->buf_mask);
4269 BUG_ON(ring->ring[offset] != 0x55aa55aa);
4271 cur = (ring->wptr & ring->buf_mask) - 1;
4272 if (likely(cur > offset))
4273 ring->ring[offset] = cur - offset;
4275 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
4278 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
4280 struct amdgpu_device *adev = ring->adev;
4282 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
4283 amdgpu_ring_write(ring, 0 | /* src: register*/
4284 (5 << 8) | /* dst: memory */
4285 (1 << 20)); /* write confirm */
4286 amdgpu_ring_write(ring, reg);
4287 amdgpu_ring_write(ring, 0);
4288 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
4289 adev->virt.reg_val_offs * 4));
4290 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
4291 adev->virt.reg_val_offs * 4));
4294 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
4299 switch (ring->funcs->type) {
4300 case AMDGPU_RING_TYPE_GFX:
4301 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
4303 case AMDGPU_RING_TYPE_KIQ:
4304 cmd = (1 << 16); /* no inc addr */
4310 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
4311 amdgpu_ring_write(ring, cmd);
4312 amdgpu_ring_write(ring, reg);
4313 amdgpu_ring_write(ring, 0);
4314 amdgpu_ring_write(ring, val);
4317 static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
4318 uint32_t val, uint32_t mask)
4320 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
4323 static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4324 uint32_t reg0, uint32_t reg1,
4325 uint32_t ref, uint32_t mask)
4327 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
4329 if (amdgpu_sriov_vf(ring->adev))
4330 gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
4333 amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
4337 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
4338 enum amdgpu_interrupt_state state)
4341 case AMDGPU_IRQ_STATE_DISABLE:
4342 case AMDGPU_IRQ_STATE_ENABLE:
4343 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4344 TIME_STAMP_INT_ENABLE,
4345 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4352 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
4354 enum amdgpu_interrupt_state state)
4356 u32 mec_int_cntl, mec_int_cntl_reg;
4359 * amdgpu controls only the first MEC. That's why this function only
4360 * handles the setting of interrupts for this specific MEC. All other
4361 * pipes' interrupts are set by amdkfd.
4367 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4370 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE1_INT_CNTL);
4373 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE2_INT_CNTL);
4376 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE3_INT_CNTL);
4379 DRM_DEBUG("invalid pipe %d\n", pipe);
4383 DRM_DEBUG("invalid me %d\n", me);
4388 case AMDGPU_IRQ_STATE_DISABLE:
4389 mec_int_cntl = RREG32(mec_int_cntl_reg);
4390 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4391 TIME_STAMP_INT_ENABLE, 0);
4392 WREG32(mec_int_cntl_reg, mec_int_cntl);
4394 case AMDGPU_IRQ_STATE_ENABLE:
4395 mec_int_cntl = RREG32(mec_int_cntl_reg);
4396 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4397 TIME_STAMP_INT_ENABLE, 1);
4398 WREG32(mec_int_cntl_reg, mec_int_cntl);
4405 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4406 struct amdgpu_irq_src *source,
4408 enum amdgpu_interrupt_state state)
4411 case AMDGPU_IRQ_STATE_DISABLE:
4412 case AMDGPU_IRQ_STATE_ENABLE:
4413 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4414 PRIV_REG_INT_ENABLE,
4415 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4424 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4425 struct amdgpu_irq_src *source,
4427 enum amdgpu_interrupt_state state)
4430 case AMDGPU_IRQ_STATE_DISABLE:
4431 case AMDGPU_IRQ_STATE_ENABLE:
4432 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4433 PRIV_INSTR_INT_ENABLE,
4434 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4442 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4443 struct amdgpu_irq_src *src,
4445 enum amdgpu_interrupt_state state)
4448 case AMDGPU_CP_IRQ_GFX_EOP:
4449 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
4451 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4452 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4454 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4455 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4457 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4458 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4460 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4461 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4463 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4464 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4466 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4467 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4469 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4470 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4472 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4473 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4481 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
4482 struct amdgpu_irq_src *source,
4483 struct amdgpu_iv_entry *entry)
4486 u8 me_id, pipe_id, queue_id;
4487 struct amdgpu_ring *ring;
4489 DRM_DEBUG("IH: CP EOP\n");
4490 me_id = (entry->ring_id & 0x0c) >> 2;
4491 pipe_id = (entry->ring_id & 0x03) >> 0;
4492 queue_id = (entry->ring_id & 0x70) >> 4;
4496 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4500 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4501 ring = &adev->gfx.compute_ring[i];
4502 /* Per-queue interrupt is supported for MEC starting from VI.
4503 * The interrupt can only be enabled/disabled per pipe instead of per queue.
4505 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4506 amdgpu_fence_process(ring);
4513 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
4514 struct amdgpu_irq_src *source,
4515 struct amdgpu_iv_entry *entry)
4517 DRM_ERROR("Illegal register access in command stream\n");
4518 schedule_work(&adev->reset_work);
4522 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
4523 struct amdgpu_irq_src *source,
4524 struct amdgpu_iv_entry *entry)
4526 DRM_ERROR("Illegal instruction in command stream\n");
4527 schedule_work(&adev->reset_work);
4531 static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
4532 struct amdgpu_irq_src *src,
4534 enum amdgpu_interrupt_state state)
4536 uint32_t tmp, target;
4537 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4540 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4542 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
4543 target += ring->pipe;
4546 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
4547 if (state == AMDGPU_IRQ_STATE_DISABLE) {
4548 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4549 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4550 GENERIC2_INT_ENABLE, 0);
4551 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4553 tmp = RREG32(target);
4554 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4555 GENERIC2_INT_ENABLE, 0);
4556 WREG32(target, tmp);
4558 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4559 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4560 GENERIC2_INT_ENABLE, 1);
4561 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4563 tmp = RREG32(target);
4564 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4565 GENERIC2_INT_ENABLE, 1);
4566 WREG32(target, tmp);
4570 BUG(); /* kiq only support GENERIC2_INT now */
4576 static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
4577 struct amdgpu_irq_src *source,
4578 struct amdgpu_iv_entry *entry)
4580 u8 me_id, pipe_id, queue_id;
4581 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4583 me_id = (entry->ring_id & 0x0c) >> 2;
4584 pipe_id = (entry->ring_id & 0x03) >> 0;
4585 queue_id = (entry->ring_id & 0x70) >> 4;
4586 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
4587 me_id, pipe_id, queue_id);
4589 amdgpu_fence_process(ring);
4593 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
4595 .early_init = gfx_v9_0_early_init,
4596 .late_init = gfx_v9_0_late_init,
4597 .sw_init = gfx_v9_0_sw_init,
4598 .sw_fini = gfx_v9_0_sw_fini,
4599 .hw_init = gfx_v9_0_hw_init,
4600 .hw_fini = gfx_v9_0_hw_fini,
4601 .suspend = gfx_v9_0_suspend,
4602 .resume = gfx_v9_0_resume,
4603 .is_idle = gfx_v9_0_is_idle,
4604 .wait_for_idle = gfx_v9_0_wait_for_idle,
4605 .soft_reset = gfx_v9_0_soft_reset,
4606 .set_clockgating_state = gfx_v9_0_set_clockgating_state,
4607 .set_powergating_state = gfx_v9_0_set_powergating_state,
4608 .get_clockgating_state = gfx_v9_0_get_clockgating_state,
4611 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4612 .type = AMDGPU_RING_TYPE_GFX,
4614 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4615 .support_64bit_ptrs = true,
4616 .vmhub = AMDGPU_GFXHUB,
4617 .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
4618 .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
4619 .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
4620 .emit_frame_size = /* totally 242 maximum if 16 IBs */
4622 7 + /* PIPELINE_SYNC */
4623 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4624 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4626 8 + /* FENCE for VM_FLUSH */
4627 20 + /* GDS switch */
4628 4 + /* double SWITCH_BUFFER,
4629 the first COND_EXEC jump to the place just
4630 prior to this double SWITCH_BUFFER */
4638 8 + 8 + /* FENCE x2 */
4639 2, /* SWITCH_BUFFER */
4640 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
4641 .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
4642 .emit_fence = gfx_v9_0_ring_emit_fence,
4643 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4644 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4645 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4646 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4647 .test_ring = gfx_v9_0_ring_test_ring,
4648 .test_ib = gfx_v9_0_ring_test_ib,
4649 .insert_nop = amdgpu_ring_insert_nop,
4650 .pad_ib = amdgpu_ring_generic_pad_ib,
4651 .emit_switch_buffer = gfx_v9_ring_emit_sb,
4652 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
4653 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
4654 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4655 .emit_tmz = gfx_v9_0_ring_emit_tmz,
4656 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4657 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4658 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4661 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4662 .type = AMDGPU_RING_TYPE_COMPUTE,
4664 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4665 .support_64bit_ptrs = true,
4666 .vmhub = AMDGPU_GFXHUB,
4667 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4668 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4669 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4671 20 + /* gfx_v9_0_ring_emit_gds_switch */
4672 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4673 5 + /* hdp invalidate */
4674 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4675 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4676 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4677 2 + /* gfx_v9_0_ring_emit_vm_flush */
4678 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4679 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4680 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4681 .emit_fence = gfx_v9_0_ring_emit_fence,
4682 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4683 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4684 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4685 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4686 .test_ring = gfx_v9_0_ring_test_ring,
4687 .test_ib = gfx_v9_0_ring_test_ib,
4688 .insert_nop = amdgpu_ring_insert_nop,
4689 .pad_ib = amdgpu_ring_generic_pad_ib,
4690 .set_priority = gfx_v9_0_ring_set_priority_compute,
4691 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4692 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4693 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4696 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4697 .type = AMDGPU_RING_TYPE_KIQ,
4699 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4700 .support_64bit_ptrs = true,
4701 .vmhub = AMDGPU_GFXHUB,
4702 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4703 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4704 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4706 20 + /* gfx_v9_0_ring_emit_gds_switch */
4707 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4708 5 + /* hdp invalidate */
4709 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4710 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4711 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4712 2 + /* gfx_v9_0_ring_emit_vm_flush */
4713 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4714 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4715 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4716 .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
4717 .test_ring = gfx_v9_0_ring_test_ring,
4718 .test_ib = gfx_v9_0_ring_test_ib,
4719 .insert_nop = amdgpu_ring_insert_nop,
4720 .pad_ib = amdgpu_ring_generic_pad_ib,
4721 .emit_rreg = gfx_v9_0_ring_emit_rreg,
4722 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4723 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
4724 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
4727 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
4731 adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
4733 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4734 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
4736 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4737 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
4740 static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
4741 .set = gfx_v9_0_kiq_set_interrupt_state,
4742 .process = gfx_v9_0_kiq_irq,
4745 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
4746 .set = gfx_v9_0_set_eop_interrupt_state,
4747 .process = gfx_v9_0_eop_irq,
4750 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
4751 .set = gfx_v9_0_set_priv_reg_fault_state,
4752 .process = gfx_v9_0_priv_reg_irq,
4755 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
4756 .set = gfx_v9_0_set_priv_inst_fault_state,
4757 .process = gfx_v9_0_priv_inst_irq,
4760 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
4762 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4763 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
4765 adev->gfx.priv_reg_irq.num_types = 1;
4766 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
4768 adev->gfx.priv_inst_irq.num_types = 1;
4769 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
4771 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
4772 adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
4775 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
4777 switch (adev->asic_type) {
4782 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
4789 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
4791 /* init asci gds info */
4792 adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
4793 adev->gds.gws.total_size = 64;
4794 adev->gds.oa.total_size = 16;
4796 if (adev->gds.mem.total_size == 64 * 1024) {
4797 adev->gds.mem.gfx_partition_size = 4096;
4798 adev->gds.mem.cs_partition_size = 4096;
4800 adev->gds.gws.gfx_partition_size = 4;
4801 adev->gds.gws.cs_partition_size = 4;
4803 adev->gds.oa.gfx_partition_size = 4;
4804 adev->gds.oa.cs_partition_size = 1;
4806 adev->gds.mem.gfx_partition_size = 1024;
4807 adev->gds.mem.cs_partition_size = 1024;
4809 adev->gds.gws.gfx_partition_size = 16;
4810 adev->gds.gws.cs_partition_size = 16;
4812 adev->gds.oa.gfx_partition_size = 4;
4813 adev->gds.oa.cs_partition_size = 4;
4817 static void gfx_v9_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4825 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4826 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4828 WREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG, data);
4831 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
4835 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
4836 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
4838 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4839 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4841 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4843 return (~data) & mask;
4846 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
4847 struct amdgpu_cu_info *cu_info)
4849 int i, j, k, counter, active_cu_number = 0;
4850 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4851 unsigned disable_masks[4 * 2];
4853 if (!adev || !cu_info)
4856 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
4858 mutex_lock(&adev->grbm_idx_mutex);
4859 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4860 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4864 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
4866 gfx_v9_0_set_user_cu_inactive_bitmap(
4867 adev, disable_masks[i * 2 + j]);
4868 bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
4869 cu_info->bitmap[i][j] = bitmap;
4871 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
4872 if (bitmap & mask) {
4873 if (counter < adev->gfx.config.max_cu_per_sh)
4879 active_cu_number += counter;
4881 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4882 cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4885 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
4886 mutex_unlock(&adev->grbm_idx_mutex);
4888 cu_info->number = active_cu_number;
4889 cu_info->ao_cu_mask = ao_cu_mask;
4890 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
4895 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
4897 .type = AMD_IP_BLOCK_TYPE_GFX,
4901 .funcs = &gfx_v9_0_ip_funcs,