2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
26 #include "amdgpu_gfx.h"
30 #include "vega10/soc15ip.h"
31 #include "vega10/GC/gc_9_0_offset.h"
32 #include "vega10/GC/gc_9_0_sh_mask.h"
33 #include "vega10/vega10_enum.h"
34 #include "vega10/HDP/hdp_4_0_offset.h"
36 #include "soc15_common.h"
37 #include "clearstate_gfx9.h"
38 #include "v9_structs.h"
40 #define GFX9_NUM_GFX_RINGS 1
41 #define GFX9_NUM_COMPUTE_RINGS 8
42 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
43 #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
44 #define GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH 34
46 #define mmPWR_MISC_CNTL_STATUS 0x0183
47 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
48 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
49 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
50 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
51 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
53 MODULE_FIRMWARE("amdgpu/vega10_ce.bin");
54 MODULE_FIRMWARE("amdgpu/vega10_pfp.bin");
55 MODULE_FIRMWARE("amdgpu/vega10_me.bin");
56 MODULE_FIRMWARE("amdgpu/vega10_mec.bin");
57 MODULE_FIRMWARE("amdgpu/vega10_mec2.bin");
58 MODULE_FIRMWARE("amdgpu/vega10_rlc.bin");
60 MODULE_FIRMWARE("amdgpu/raven_ce.bin");
61 MODULE_FIRMWARE("amdgpu/raven_pfp.bin");
62 MODULE_FIRMWARE("amdgpu/raven_me.bin");
63 MODULE_FIRMWARE("amdgpu/raven_mec.bin");
64 MODULE_FIRMWARE("amdgpu/raven_mec2.bin");
65 MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
67 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
69 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
70 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0)},
71 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
72 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1)},
73 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
74 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2)},
75 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
76 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3)},
77 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
78 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4)},
79 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
80 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5)},
81 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
82 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6)},
83 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
84 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7)},
85 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
86 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8)},
87 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
88 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9)},
89 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
90 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10)},
91 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
92 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11)},
93 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
94 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
95 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
96 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13)},
97 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
98 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14)},
99 {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
100 SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15)}
103 static const u32 golden_settings_gc_9_0[] =
105 SOC15_REG_OFFSET(GC, 0, mmCPC_UTCL1_CNTL), 0x08000000, 0x08000080,
106 SOC15_REG_OFFSET(GC, 0, mmCPF_UTCL1_CNTL), 0x08000000, 0x08000080,
107 SOC15_REG_OFFSET(GC, 0, mmCPG_UTCL1_CNTL), 0x08000000, 0x08000080,
108 SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
109 SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
110 SOC15_REG_OFFSET(GC, 0, mmIA_UTCL1_CNTL), 0x08000000, 0x08000080,
111 SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
112 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
113 SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
114 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_0), 0x08000000, 0x08000080,
115 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_1), 0x08000000, 0x08000080,
116 SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080,
117 SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080,
118 SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080,
119 SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107,
120 SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
121 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68,
122 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197,
123 SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION), 0x3fff3af3, 0x19200000,
124 SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000003ff,
125 SOC15_REG_OFFSET(GC, 0, mmWD_UTCL1_CNTL), 0x08000000, 0x08000080
128 static const u32 golden_settings_gc_9_0_vg10[] =
130 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0x0000f000, 0x00012107,
131 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL_3), 0x30000000, 0x10000000,
132 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x2a114042,
133 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x2a114042,
134 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0x00008000, 0x00048000,
135 SOC15_REG_OFFSET(GC, 0, mmRMI_UTCL1_CNTL2), 0x00030000, 0x00020000,
136 SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x00001800, 0x00000800
139 static const u32 golden_settings_gc_9_1[] =
141 SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL), 0xfffdf3cf, 0x00014104,
142 SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2), 0xf00fffff, 0x00000420,
143 SOC15_REG_OFFSET(GC, 0, mmGB_GPU_ID), 0x0000000f, 0x00000000,
144 SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3), 0x00000003, 0x82400024,
145 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE), 0x3fffffff, 0x00000001,
146 SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE), 0x0000ff0f, 0x00000000,
147 SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000,
148 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x00000000,
149 SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0x00003120,
150 SOC15_REG_OFFSET(GC, 0, mmVGT_GS_MAX_WAVE_ID), 0x00000fff, 0x000003ff
153 static const u32 golden_settings_gc_9_1_rv1[] =
155 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG), 0xffff77ff, 0x26013042,
156 SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG_READ), 0xffff77ff, 0x26013042,
157 SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1), 0xffffffff, 0x00048000,
158 SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
161 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
162 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x26013042
164 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev);
165 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev);
166 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev);
167 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
168 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
169 struct amdgpu_cu_info *cu_info);
170 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
171 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
172 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
174 static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
176 switch (adev->asic_type) {
178 amdgpu_program_register_sequence(adev,
179 golden_settings_gc_9_0,
180 (const u32)ARRAY_SIZE(golden_settings_gc_9_0));
181 amdgpu_program_register_sequence(adev,
182 golden_settings_gc_9_0_vg10,
183 (const u32)ARRAY_SIZE(golden_settings_gc_9_0_vg10));
186 amdgpu_program_register_sequence(adev,
187 golden_settings_gc_9_1,
188 (const u32)ARRAY_SIZE(golden_settings_gc_9_1));
189 amdgpu_program_register_sequence(adev,
190 golden_settings_gc_9_1_rv1,
191 (const u32)ARRAY_SIZE(golden_settings_gc_9_1_rv1));
198 static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
200 adev->gfx.scratch.num_reg = 7;
201 adev->gfx.scratch.reg_base = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
202 adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1;
205 static void gfx_v9_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
206 bool wc, uint32_t reg, uint32_t val)
208 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
209 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
210 WRITE_DATA_DST_SEL(0) |
211 (wc ? WR_CONFIRM : 0));
212 amdgpu_ring_write(ring, reg);
213 amdgpu_ring_write(ring, 0);
214 amdgpu_ring_write(ring, val);
217 static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
218 int mem_space, int opt, uint32_t addr0,
219 uint32_t addr1, uint32_t ref, uint32_t mask,
222 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
223 amdgpu_ring_write(ring,
224 /* memory (1) or register (0) */
225 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
226 WAIT_REG_MEM_OPERATION(opt) | /* wait */
227 WAIT_REG_MEM_FUNCTION(3) | /* equal */
228 WAIT_REG_MEM_ENGINE(eng_sel)));
231 BUG_ON(addr0 & 0x3); /* Dword align */
232 amdgpu_ring_write(ring, addr0);
233 amdgpu_ring_write(ring, addr1);
234 amdgpu_ring_write(ring, ref);
235 amdgpu_ring_write(ring, mask);
236 amdgpu_ring_write(ring, inv); /* poll interval */
239 static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
241 struct amdgpu_device *adev = ring->adev;
247 r = amdgpu_gfx_scratch_get(adev, &scratch);
249 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
252 WREG32(scratch, 0xCAFEDEAD);
253 r = amdgpu_ring_alloc(ring, 3);
255 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
257 amdgpu_gfx_scratch_free(adev, scratch);
260 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
261 amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
262 amdgpu_ring_write(ring, 0xDEADBEEF);
263 amdgpu_ring_commit(ring);
265 for (i = 0; i < adev->usec_timeout; i++) {
266 tmp = RREG32(scratch);
267 if (tmp == 0xDEADBEEF)
271 if (i < adev->usec_timeout) {
272 DRM_INFO("ring test on %d succeeded in %d usecs\n",
275 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
276 ring->idx, scratch, tmp);
279 amdgpu_gfx_scratch_free(adev, scratch);
283 static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
285 struct amdgpu_device *adev = ring->adev;
287 struct dma_fence *f = NULL;
292 r = amdgpu_gfx_scratch_get(adev, &scratch);
294 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
297 WREG32(scratch, 0xCAFEDEAD);
298 memset(&ib, 0, sizeof(ib));
299 r = amdgpu_ib_get(adev, NULL, 256, &ib);
301 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
304 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
305 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
306 ib.ptr[2] = 0xDEADBEEF;
309 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
313 r = dma_fence_wait_timeout(f, false, timeout);
315 DRM_ERROR("amdgpu: IB test timed out.\n");
319 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
322 tmp = RREG32(scratch);
323 if (tmp == 0xDEADBEEF) {
324 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
327 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
332 amdgpu_ib_free(adev, &ib, NULL);
335 amdgpu_gfx_scratch_free(adev, scratch);
339 static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
341 const char *chip_name;
344 struct amdgpu_firmware_info *info = NULL;
345 const struct common_firmware_header *header = NULL;
346 const struct gfx_firmware_header_v1_0 *cp_hdr;
347 const struct rlc_firmware_header_v2_0 *rlc_hdr;
348 unsigned int *tmp = NULL;
353 switch (adev->asic_type) {
355 chip_name = "vega10";
364 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
365 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
368 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
371 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
372 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
373 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
375 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
376 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
379 err = amdgpu_ucode_validate(adev->gfx.me_fw);
382 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
383 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
384 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
386 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
387 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
390 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
393 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
394 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
395 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
397 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
398 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
401 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
402 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
403 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
404 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
405 adev->gfx.rlc.save_and_restore_offset =
406 le32_to_cpu(rlc_hdr->save_and_restore_offset);
407 adev->gfx.rlc.clear_state_descriptor_offset =
408 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
409 adev->gfx.rlc.avail_scratch_ram_locations =
410 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
411 adev->gfx.rlc.reg_restore_list_size =
412 le32_to_cpu(rlc_hdr->reg_restore_list_size);
413 adev->gfx.rlc.reg_list_format_start =
414 le32_to_cpu(rlc_hdr->reg_list_format_start);
415 adev->gfx.rlc.reg_list_format_separate_start =
416 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
417 adev->gfx.rlc.starting_offsets_start =
418 le32_to_cpu(rlc_hdr->starting_offsets_start);
419 adev->gfx.rlc.reg_list_format_size_bytes =
420 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
421 adev->gfx.rlc.reg_list_size_bytes =
422 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
423 adev->gfx.rlc.register_list_format =
424 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
425 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
426 if (!adev->gfx.rlc.register_list_format) {
431 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
432 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
433 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
434 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
436 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
438 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
439 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
440 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
441 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
443 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
444 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
447 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
450 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
451 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
452 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
455 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
456 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
458 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
461 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
462 adev->gfx.mec2_fw->data;
463 adev->gfx.mec2_fw_version =
464 le32_to_cpu(cp_hdr->header.ucode_version);
465 adev->gfx.mec2_feature_version =
466 le32_to_cpu(cp_hdr->ucode_feature_version);
469 adev->gfx.mec2_fw = NULL;
472 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
473 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
474 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
475 info->fw = adev->gfx.pfp_fw;
476 header = (const struct common_firmware_header *)info->fw->data;
477 adev->firmware.fw_size +=
478 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
480 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
481 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
482 info->fw = adev->gfx.me_fw;
483 header = (const struct common_firmware_header *)info->fw->data;
484 adev->firmware.fw_size +=
485 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
487 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
488 info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
489 info->fw = adev->gfx.ce_fw;
490 header = (const struct common_firmware_header *)info->fw->data;
491 adev->firmware.fw_size +=
492 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
494 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
495 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
496 info->fw = adev->gfx.rlc_fw;
497 header = (const struct common_firmware_header *)info->fw->data;
498 adev->firmware.fw_size +=
499 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
501 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
502 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
503 info->fw = adev->gfx.mec_fw;
504 header = (const struct common_firmware_header *)info->fw->data;
505 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
506 adev->firmware.fw_size +=
507 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
509 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
510 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
511 info->fw = adev->gfx.mec_fw;
512 adev->firmware.fw_size +=
513 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
515 if (adev->gfx.mec2_fw) {
516 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
517 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
518 info->fw = adev->gfx.mec2_fw;
519 header = (const struct common_firmware_header *)info->fw->data;
520 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
521 adev->firmware.fw_size +=
522 ALIGN(le32_to_cpu(header->ucode_size_bytes) - le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
523 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
524 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
525 info->fw = adev->gfx.mec2_fw;
526 adev->firmware.fw_size +=
527 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
535 "gfx9: Failed to load firmware \"%s\"\n",
537 release_firmware(adev->gfx.pfp_fw);
538 adev->gfx.pfp_fw = NULL;
539 release_firmware(adev->gfx.me_fw);
540 adev->gfx.me_fw = NULL;
541 release_firmware(adev->gfx.ce_fw);
542 adev->gfx.ce_fw = NULL;
543 release_firmware(adev->gfx.rlc_fw);
544 adev->gfx.rlc_fw = NULL;
545 release_firmware(adev->gfx.mec_fw);
546 adev->gfx.mec_fw = NULL;
547 release_firmware(adev->gfx.mec2_fw);
548 adev->gfx.mec2_fw = NULL;
553 static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
556 const struct cs_section_def *sect = NULL;
557 const struct cs_extent_def *ext = NULL;
559 /* begin clear state */
561 /* context control state */
564 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
565 for (ext = sect->section; ext->extent != NULL; ++ext) {
566 if (sect->id == SECT_CONTEXT)
567 count += 2 + ext->reg_count;
573 /* end clear state */
581 static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
582 volatile u32 *buffer)
585 const struct cs_section_def *sect = NULL;
586 const struct cs_extent_def *ext = NULL;
588 if (adev->gfx.rlc.cs_data == NULL)
593 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
594 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
596 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
597 buffer[count++] = cpu_to_le32(0x80000000);
598 buffer[count++] = cpu_to_le32(0x80000000);
600 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
601 for (ext = sect->section; ext->extent != NULL; ++ext) {
602 if (sect->id == SECT_CONTEXT) {
604 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
605 buffer[count++] = cpu_to_le32(ext->reg_index -
606 PACKET3_SET_CONTEXT_REG_START);
607 for (i = 0; i < ext->reg_count; i++)
608 buffer[count++] = cpu_to_le32(ext->extent[i]);
615 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
616 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
618 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
619 buffer[count++] = cpu_to_le32(0);
622 static void rv_init_cp_jump_table(struct amdgpu_device *adev)
624 const __le32 *fw_data;
625 volatile u32 *dst_ptr;
626 int me, i, max_me = 5;
628 u32 table_offset, table_size;
630 /* write the cp table buffer */
631 dst_ptr = adev->gfx.rlc.cp_table_ptr;
632 for (me = 0; me < max_me; me++) {
634 const struct gfx_firmware_header_v1_0 *hdr =
635 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
636 fw_data = (const __le32 *)
637 (adev->gfx.ce_fw->data +
638 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
639 table_offset = le32_to_cpu(hdr->jt_offset);
640 table_size = le32_to_cpu(hdr->jt_size);
641 } else if (me == 1) {
642 const struct gfx_firmware_header_v1_0 *hdr =
643 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
644 fw_data = (const __le32 *)
645 (adev->gfx.pfp_fw->data +
646 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
647 table_offset = le32_to_cpu(hdr->jt_offset);
648 table_size = le32_to_cpu(hdr->jt_size);
649 } else if (me == 2) {
650 const struct gfx_firmware_header_v1_0 *hdr =
651 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
652 fw_data = (const __le32 *)
653 (adev->gfx.me_fw->data +
654 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
655 table_offset = le32_to_cpu(hdr->jt_offset);
656 table_size = le32_to_cpu(hdr->jt_size);
657 } else if (me == 3) {
658 const struct gfx_firmware_header_v1_0 *hdr =
659 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
660 fw_data = (const __le32 *)
661 (adev->gfx.mec_fw->data +
662 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
663 table_offset = le32_to_cpu(hdr->jt_offset);
664 table_size = le32_to_cpu(hdr->jt_size);
665 } else if (me == 4) {
666 const struct gfx_firmware_header_v1_0 *hdr =
667 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
668 fw_data = (const __le32 *)
669 (adev->gfx.mec2_fw->data +
670 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
671 table_offset = le32_to_cpu(hdr->jt_offset);
672 table_size = le32_to_cpu(hdr->jt_size);
675 for (i = 0; i < table_size; i ++) {
676 dst_ptr[bo_offset + i] =
677 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
680 bo_offset += table_size;
684 static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
686 /* clear state block */
687 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
688 &adev->gfx.rlc.clear_state_gpu_addr,
689 (void **)&adev->gfx.rlc.cs_ptr);
691 /* jump table block */
692 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
693 &adev->gfx.rlc.cp_table_gpu_addr,
694 (void **)&adev->gfx.rlc.cp_table_ptr);
697 static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
699 volatile u32 *dst_ptr;
701 const struct cs_section_def *cs_data;
704 adev->gfx.rlc.cs_data = gfx9_cs_data;
706 cs_data = adev->gfx.rlc.cs_data;
709 /* clear state block */
710 adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
711 if (adev->gfx.rlc.clear_state_obj == NULL) {
712 r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
713 AMDGPU_GEM_DOMAIN_VRAM,
714 &adev->gfx.rlc.clear_state_obj,
715 &adev->gfx.rlc.clear_state_gpu_addr,
716 (void **)&adev->gfx.rlc.cs_ptr);
719 "(%d) failed to create rlc csb bo\n", r);
720 gfx_v9_0_rlc_fini(adev);
724 /* set up the cs buffer */
725 dst_ptr = adev->gfx.rlc.cs_ptr;
726 gfx_v9_0_get_csb_buffer(adev, dst_ptr);
727 amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
728 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
731 if (adev->asic_type == CHIP_RAVEN) {
732 /* TODO: double check the cp_table_size for RV */
733 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
734 if (adev->gfx.rlc.cp_table_obj == NULL) {
735 r = amdgpu_bo_create_kernel(adev, adev->gfx.rlc.cp_table_size,
736 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
737 &adev->gfx.rlc.cp_table_obj,
738 &adev->gfx.rlc.cp_table_gpu_addr,
739 (void **)&adev->gfx.rlc.cp_table_ptr);
742 "(%d) failed to create cp table bo\n", r);
743 gfx_v9_0_rlc_fini(adev);
748 rv_init_cp_jump_table(adev);
749 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
750 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
756 static void gfx_v9_0_mec_fini(struct amdgpu_device *adev)
760 if (adev->gfx.mec.hpd_eop_obj) {
761 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true);
762 if (unlikely(r != 0))
763 dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r);
764 amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj);
765 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
767 amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj);
768 adev->gfx.mec.hpd_eop_obj = NULL;
770 if (adev->gfx.mec.mec_fw_obj) {
771 r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, true);
772 if (unlikely(r != 0))
773 dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r);
774 amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj);
775 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
777 amdgpu_bo_unref(&adev->gfx.mec.mec_fw_obj);
778 adev->gfx.mec.mec_fw_obj = NULL;
782 #define MEC_HPD_SIZE 2048
784 static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
788 const __le32 *fw_data;
792 const struct gfx_firmware_header_v1_0 *mec_hdr;
795 * we assign only 1 pipe because all other pipes will
798 adev->gfx.mec.num_mec = 1;
799 adev->gfx.mec.num_pipe = 1;
800 adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8;
802 if (adev->gfx.mec.hpd_eop_obj == NULL) {
803 r = amdgpu_bo_create(adev,
804 adev->gfx.mec.num_queue * MEC_HPD_SIZE,
806 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
807 &adev->gfx.mec.hpd_eop_obj);
809 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
814 r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
815 if (unlikely(r != 0)) {
816 gfx_v9_0_mec_fini(adev);
819 r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
820 &adev->gfx.mec.hpd_eop_gpu_addr);
822 dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r);
823 gfx_v9_0_mec_fini(adev);
826 r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
828 dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
829 gfx_v9_0_mec_fini(adev);
833 memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
835 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
836 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
838 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
840 fw_data = (const __le32 *)
841 (adev->gfx.mec_fw->data +
842 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
843 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
845 if (adev->gfx.mec.mec_fw_obj == NULL) {
846 r = amdgpu_bo_create(adev,
847 mec_hdr->header.ucode_size_bytes,
849 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
850 &adev->gfx.mec.mec_fw_obj);
852 dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
857 r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false);
858 if (unlikely(r != 0)) {
859 gfx_v9_0_mec_fini(adev);
862 r = amdgpu_bo_pin(adev->gfx.mec.mec_fw_obj, AMDGPU_GEM_DOMAIN_GTT,
863 &adev->gfx.mec.mec_fw_gpu_addr);
865 dev_warn(adev->dev, "(%d) pin mec firmware bo failed\n", r);
866 gfx_v9_0_mec_fini(adev);
869 r = amdgpu_bo_kmap(adev->gfx.mec.mec_fw_obj, (void **)&fw);
871 dev_warn(adev->dev, "(%d) map firmware bo failed\n", r);
872 gfx_v9_0_mec_fini(adev);
875 memcpy(fw, fw_data, fw_size);
877 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
878 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
884 static void gfx_v9_0_kiq_fini(struct amdgpu_device *adev)
886 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
888 amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
891 static int gfx_v9_0_kiq_init(struct amdgpu_device *adev)
895 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
897 r = amdgpu_bo_create_kernel(adev, MEC_HPD_SIZE, PAGE_SIZE,
898 AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
899 &kiq->eop_gpu_addr, (void **)&hpd);
901 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
905 memset(hpd, 0, MEC_HPD_SIZE);
907 r = amdgpu_bo_reserve(kiq->eop_obj, true);
908 if (unlikely(r != 0))
909 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
910 amdgpu_bo_kunmap(kiq->eop_obj);
911 amdgpu_bo_unreserve(kiq->eop_obj);
916 static int gfx_v9_0_kiq_init_ring(struct amdgpu_device *adev,
917 struct amdgpu_ring *ring,
918 struct amdgpu_irq_src *irq)
920 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
923 mutex_init(&kiq->ring_mutex);
925 r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
930 ring->ring_obj = NULL;
931 ring->use_doorbell = true;
932 ring->doorbell_index = AMDGPU_DOORBELL_KIQ;
933 if (adev->gfx.mec2_fw) {
942 ring->eop_gpu_addr = kiq->eop_gpu_addr;
943 sprintf(ring->name, "kiq %d.%d.%d", ring->me, ring->pipe, ring->queue);
944 r = amdgpu_ring_init(adev, ring, 1024,
945 irq, AMDGPU_CP_KIQ_IRQ_DRIVER0);
947 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
951 static void gfx_v9_0_kiq_free_ring(struct amdgpu_ring *ring,
952 struct amdgpu_irq_src *irq)
954 amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs);
955 amdgpu_ring_fini(ring);
958 /* create MQD for each compute queue */
959 static int gfx_v9_0_compute_mqd_sw_init(struct amdgpu_device *adev)
961 struct amdgpu_ring *ring = NULL;
964 /* create MQD for KIQ */
965 ring = &adev->gfx.kiq.ring;
966 if (!ring->mqd_obj) {
967 r = amdgpu_bo_create_kernel(adev, sizeof(struct v9_mqd), PAGE_SIZE,
968 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
969 &ring->mqd_gpu_addr, (void **)&ring->mqd_ptr);
971 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
975 /* prepare MQD backup */
976 adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(sizeof(struct v9_mqd), GFP_KERNEL);
977 if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
978 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
981 /* create MQD for each KCQ */
982 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
983 ring = &adev->gfx.compute_ring[i];
984 if (!ring->mqd_obj) {
985 r = amdgpu_bo_create_kernel(adev, sizeof(struct v9_mqd), PAGE_SIZE,
986 AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
987 &ring->mqd_gpu_addr, (void **)&ring->mqd_ptr);
989 dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
993 /* prepare MQD backup */
994 adev->gfx.mec.mqd_backup[i] = kmalloc(sizeof(struct v9_mqd), GFP_KERNEL);
995 if (!adev->gfx.mec.mqd_backup[i])
996 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
1003 static void gfx_v9_0_compute_mqd_sw_fini(struct amdgpu_device *adev)
1005 struct amdgpu_ring *ring = NULL;
1008 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1009 ring = &adev->gfx.compute_ring[i];
1010 kfree(adev->gfx.mec.mqd_backup[i]);
1011 amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, (void **)&ring->mqd_ptr);
1014 ring = &adev->gfx.kiq.ring;
1015 kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
1016 amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, (void **)&ring->mqd_ptr);
1019 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
1021 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1022 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1023 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1024 (address << SQ_IND_INDEX__INDEX__SHIFT) |
1025 (SQ_IND_INDEX__FORCE_READ_MASK));
1026 return RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1029 static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
1030 uint32_t wave, uint32_t thread,
1031 uint32_t regno, uint32_t num, uint32_t *out)
1033 WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
1034 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1035 (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
1036 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1037 (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
1038 (SQ_IND_INDEX__FORCE_READ_MASK) |
1039 (SQ_IND_INDEX__AUTO_INCR_MASK));
1041 *(out++) = RREG32_SOC15(GC, 0, mmSQ_IND_DATA);
1044 static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1046 /* type 1 wave data */
1047 dst[(*no_fields)++] = 1;
1048 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
1049 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
1050 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
1051 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
1052 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
1053 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_HW_ID);
1054 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
1055 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
1056 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_GPR_ALLOC);
1057 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_LDS_ALLOC);
1058 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TRAPSTS);
1059 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
1060 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
1061 dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
1064 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1065 uint32_t wave, uint32_t start,
1066 uint32_t size, uint32_t *dst)
1069 adev, simd, wave, 0,
1070 start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
1074 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
1075 .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
1076 .select_se_sh = &gfx_v9_0_select_se_sh,
1077 .read_wave_data = &gfx_v9_0_read_wave_data,
1078 .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
1081 static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
1085 adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
1087 switch (adev->asic_type) {
1089 adev->gfx.config.max_hw_contexts = 8;
1090 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1091 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1092 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1093 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1094 gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
1097 adev->gfx.config.max_hw_contexts = 8;
1098 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1099 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1100 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1101 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1102 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
1109 adev->gfx.config.gb_addr_config = gb_addr_config;
1111 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
1113 adev->gfx.config.gb_addr_config,
1117 adev->gfx.config.max_tile_pipes =
1118 adev->gfx.config.gb_addr_config_fields.num_pipes;
1120 adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
1122 adev->gfx.config.gb_addr_config,
1125 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
1127 adev->gfx.config.gb_addr_config,
1129 MAX_COMPRESSED_FRAGS);
1130 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
1132 adev->gfx.config.gb_addr_config,
1135 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
1137 adev->gfx.config.gb_addr_config,
1139 NUM_SHADER_ENGINES);
1140 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
1142 adev->gfx.config.gb_addr_config,
1144 PIPE_INTERLEAVE_SIZE));
1147 static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev,
1148 struct amdgpu_ngg_buf *ngg_buf,
1150 int default_size_se)
1155 dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se);
1158 size_se = size_se ? size_se : default_size_se;
1160 ngg_buf->size = size_se * adev->gfx.config.max_shader_engines;
1161 r = amdgpu_bo_create_kernel(adev, ngg_buf->size,
1162 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
1167 dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r);
1170 ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo);
1175 static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev)
1179 for (i = 0; i < NGG_BUF_MAX; i++)
1180 amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo,
1181 &adev->gfx.ngg.buf[i].gpu_addr,
1184 memset(&adev->gfx.ngg.buf[0], 0,
1185 sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX);
1187 adev->gfx.ngg.init = false;
1192 static int gfx_v9_0_ngg_init(struct amdgpu_device *adev)
1196 if (!amdgpu_ngg || adev->gfx.ngg.init == true)
1199 /* GDS reserve memory: 64 bytes alignment */
1200 adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40);
1201 adev->gds.mem.total_size -= adev->gfx.ngg.gds_reserve_size;
1202 adev->gds.mem.gfx_partition_size -= adev->gfx.ngg.gds_reserve_size;
1203 adev->gfx.ngg.gds_reserve_addr = amdgpu_gds_reg_offset[0].mem_base;
1204 adev->gfx.ngg.gds_reserve_addr += adev->gds.mem.gfx_partition_size;
1206 /* Primitive Buffer */
1207 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM],
1208 amdgpu_prim_buf_per_se,
1211 dev_err(adev->dev, "Failed to create Primitive Buffer\n");
1215 /* Position Buffer */
1216 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS],
1217 amdgpu_pos_buf_per_se,
1220 dev_err(adev->dev, "Failed to create Position Buffer\n");
1224 /* Control Sideband */
1225 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL],
1226 amdgpu_cntl_sb_buf_per_se,
1229 dev_err(adev->dev, "Failed to create Control Sideband Buffer\n");
1233 /* Parameter Cache, not created by default */
1234 if (amdgpu_param_buf_per_se <= 0)
1237 r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM],
1238 amdgpu_param_buf_per_se,
1241 dev_err(adev->dev, "Failed to create Parameter Cache\n");
1246 adev->gfx.ngg.init = true;
1249 gfx_v9_0_ngg_fini(adev);
1253 static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
1255 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
1264 /* Program buffer size */
1266 size = adev->gfx.ngg.buf[NGG_PRIM].size / 256;
1267 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, size);
1269 size = adev->gfx.ngg.buf[NGG_POS].size / 256;
1270 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, size);
1272 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
1275 size = adev->gfx.ngg.buf[NGG_CNTL].size / 256;
1276 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, size);
1278 size = adev->gfx.ngg.buf[NGG_PARAM].size / 1024;
1279 data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, size);
1281 WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
1283 /* Program buffer base address */
1284 base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1285 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base);
1286 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data);
1288 base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr);
1289 data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base);
1290 WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data);
1292 base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1293 data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base);
1294 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data);
1296 base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr);
1297 data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base);
1298 WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data);
1300 base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1301 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base);
1302 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data);
1304 base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr);
1305 data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base);
1306 WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data);
1308 /* Clear GDS reserved memory */
1309 r = amdgpu_ring_alloc(ring, 17);
1311 DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
1316 gfx_v9_0_write_data_to_reg(ring, 0, false,
1317 amdgpu_gds_reg_offset[0].mem_size,
1318 (adev->gds.mem.total_size +
1319 adev->gfx.ngg.gds_reserve_size) >>
1322 amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5));
1323 amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC |
1324 PACKET3_DMA_DATA_SRC_SEL(2)));
1325 amdgpu_ring_write(ring, 0);
1326 amdgpu_ring_write(ring, 0);
1327 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr);
1328 amdgpu_ring_write(ring, 0);
1329 amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_size);
1332 gfx_v9_0_write_data_to_reg(ring, 0, false,
1333 amdgpu_gds_reg_offset[0].mem_size, 0);
1335 amdgpu_ring_commit(ring);
1340 static int gfx_v9_0_sw_init(void *handle)
1343 struct amdgpu_ring *ring;
1344 struct amdgpu_kiq *kiq;
1345 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1348 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 178, &adev->gfx.kiq.irq);
1353 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 181, &adev->gfx.eop_irq);
1357 /* Privileged reg */
1358 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 184,
1359 &adev->gfx.priv_reg_irq);
1363 /* Privileged inst */
1364 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_GRBM_CP, 185,
1365 &adev->gfx.priv_inst_irq);
1369 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1371 gfx_v9_0_scratch_init(adev);
1373 r = gfx_v9_0_init_microcode(adev);
1375 DRM_ERROR("Failed to load gfx firmware!\n");
1379 r = gfx_v9_0_rlc_init(adev);
1381 DRM_ERROR("Failed to init rlc BOs!\n");
1385 r = gfx_v9_0_mec_init(adev);
1387 DRM_ERROR("Failed to init MEC BOs!\n");
1391 /* set up the gfx ring */
1392 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1393 ring = &adev->gfx.gfx_ring[i];
1394 ring->ring_obj = NULL;
1395 sprintf(ring->name, "gfx");
1396 ring->use_doorbell = true;
1397 ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
1398 r = amdgpu_ring_init(adev, ring, 1024,
1399 &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
1404 /* set up the compute queues */
1405 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1408 /* max 32 queues per MEC */
1409 if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) {
1410 DRM_ERROR("Too many (%d) compute rings!\n", i);
1413 ring = &adev->gfx.compute_ring[i];
1414 ring->ring_obj = NULL;
1415 ring->use_doorbell = true;
1416 ring->doorbell_index = (AMDGPU_DOORBELL64_MEC_RING0 + i) << 1;
1417 ring->me = 1; /* first MEC */
1419 ring->queue = i % 8;
1420 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE);
1421 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1422 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
1423 /* type-2 packets are deprecated on MEC, use type-3 instead */
1424 r = amdgpu_ring_init(adev, ring, 1024,
1425 &adev->gfx.eop_irq, irq_type);
1430 if (amdgpu_sriov_vf(adev)) {
1431 r = gfx_v9_0_kiq_init(adev);
1433 DRM_ERROR("Failed to init KIQ BOs!\n");
1437 kiq = &adev->gfx.kiq;
1438 r = gfx_v9_0_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1442 /* create MQD for all compute queues as wel as KIQ for SRIOV case */
1443 r = gfx_v9_0_compute_mqd_sw_init(adev);
1448 /* reserve GDS, GWS and OA resource for gfx */
1449 r = amdgpu_bo_create_kernel(adev, adev->gds.mem.gfx_partition_size,
1450 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GDS,
1451 &adev->gds.gds_gfx_bo, NULL, NULL);
1455 r = amdgpu_bo_create_kernel(adev, adev->gds.gws.gfx_partition_size,
1456 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GWS,
1457 &adev->gds.gws_gfx_bo, NULL, NULL);
1461 r = amdgpu_bo_create_kernel(adev, adev->gds.oa.gfx_partition_size,
1462 PAGE_SIZE, AMDGPU_GEM_DOMAIN_OA,
1463 &adev->gds.oa_gfx_bo, NULL, NULL);
1467 adev->gfx.ce_ram_size = 0x8000;
1469 gfx_v9_0_gpu_early_init(adev);
1471 r = gfx_v9_0_ngg_init(adev);
1479 static int gfx_v9_0_sw_fini(void *handle)
1482 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1484 amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
1485 amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
1486 amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
1488 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1489 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1490 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1491 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1493 if (amdgpu_sriov_vf(adev)) {
1494 gfx_v9_0_compute_mqd_sw_fini(adev);
1495 gfx_v9_0_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
1496 gfx_v9_0_kiq_fini(adev);
1499 gfx_v9_0_mec_fini(adev);
1500 gfx_v9_0_ngg_fini(adev);
1506 static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
1511 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
1513 u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1515 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
1516 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1517 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1518 } else if (se_num == 0xffffffff) {
1519 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1520 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1);
1521 } else if (sh_num == 0xffffffff) {
1522 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
1523 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1525 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
1526 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1528 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
1531 static u32 gfx_v9_0_create_bitmask(u32 bit_width)
1533 return (u32)((1ULL << bit_width) - 1);
1536 static u32 gfx_v9_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1540 data = RREG32_SOC15(GC, 0, mmCC_RB_BACKEND_DISABLE);
1541 data |= RREG32_SOC15(GC, 0, mmGC_USER_RB_BACKEND_DISABLE);
1543 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1544 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1546 mask = gfx_v9_0_create_bitmask(adev->gfx.config.max_backends_per_se /
1547 adev->gfx.config.max_sh_per_se);
1549 return (~data) & mask;
1552 static void gfx_v9_0_setup_rb(struct amdgpu_device *adev)
1557 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1558 adev->gfx.config.max_sh_per_se;
1560 mutex_lock(&adev->grbm_idx_mutex);
1561 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1562 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1563 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1564 data = gfx_v9_0_get_rb_active_bitmap(adev);
1565 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1566 rb_bitmap_width_per_sh);
1569 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1570 mutex_unlock(&adev->grbm_idx_mutex);
1572 adev->gfx.config.backend_enable_mask = active_rbs;
1573 adev->gfx.config.num_rbs = hweight32(active_rbs);
1576 #define DEFAULT_SH_MEM_BASES (0x6000)
1577 #define FIRST_COMPUTE_VMID (8)
1578 #define LAST_COMPUTE_VMID (16)
1579 static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
1582 uint32_t sh_mem_config;
1583 uint32_t sh_mem_bases;
1586 * Configure apertures:
1587 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1588 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1589 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1591 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1593 sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1594 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1595 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1597 mutex_lock(&adev->srbm_mutex);
1598 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1599 soc15_grbm_select(adev, 0, 0, 0, i);
1600 /* CP and shaders */
1601 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
1602 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
1604 soc15_grbm_select(adev, 0, 0, 0, 0);
1605 mutex_unlock(&adev->srbm_mutex);
1608 static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
1613 WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1615 gfx_v9_0_tiling_mode_table_init(adev);
1617 gfx_v9_0_setup_rb(adev);
1618 gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info);
1620 /* XXX SH_MEM regs */
1621 /* where to put LDS, scratch, GPUVM in FSA64 space */
1622 mutex_lock(&adev->srbm_mutex);
1623 for (i = 0; i < 16; i++) {
1624 soc15_grbm_select(adev, 0, 0, 0, i);
1625 /* CP and shaders */
1627 tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
1628 SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1629 WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
1630 WREG32_SOC15(GC, 0, mmSH_MEM_BASES, 0);
1632 soc15_grbm_select(adev, 0, 0, 0, 0);
1634 mutex_unlock(&adev->srbm_mutex);
1636 gfx_v9_0_init_compute_vmid(adev);
1638 mutex_lock(&adev->grbm_idx_mutex);
1640 * making sure that the following register writes will be broadcasted
1641 * to all the shaders
1643 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1645 WREG32_SOC15(GC, 0, mmPA_SC_FIFO_SIZE,
1646 (adev->gfx.config.sc_prim_fifo_size_frontend <<
1647 PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) |
1648 (adev->gfx.config.sc_prim_fifo_size_backend <<
1649 PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) |
1650 (adev->gfx.config.sc_hiz_tile_fifo_size <<
1651 PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) |
1652 (adev->gfx.config.sc_earlyz_tile_fifo_size <<
1653 PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT));
1654 mutex_unlock(&adev->grbm_idx_mutex);
1658 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
1663 mutex_lock(&adev->grbm_idx_mutex);
1664 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1665 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1666 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
1667 for (k = 0; k < adev->usec_timeout; k++) {
1668 if (RREG32_SOC15(GC, 0, mmRLC_SERDES_CU_MASTER_BUSY) == 0)
1674 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1675 mutex_unlock(&adev->grbm_idx_mutex);
1677 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1678 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1679 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1680 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1681 for (k = 0; k < adev->usec_timeout; k++) {
1682 if ((RREG32_SOC15(GC, 0, mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1688 static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1691 u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
1693 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1694 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1695 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1696 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
1698 WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
1701 static void gfx_v9_0_init_csb(struct amdgpu_device *adev)
1704 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI),
1705 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1706 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO),
1707 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1708 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH),
1709 adev->gfx.rlc.clear_state_size);
1712 static void gfx_v9_0_parse_ind_reg_list(int *register_list_format,
1713 int indirect_offset,
1715 int *unique_indirect_regs,
1716 int *unique_indirect_reg_count,
1717 int max_indirect_reg_count,
1718 int *indirect_start_offsets,
1719 int *indirect_start_offsets_count,
1720 int max_indirect_start_offsets_count)
1723 bool new_entry = true;
1725 for (; indirect_offset < list_size; indirect_offset++) {
1729 indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset;
1730 *indirect_start_offsets_count = *indirect_start_offsets_count + 1;
1731 BUG_ON(*indirect_start_offsets_count >= max_indirect_start_offsets_count);
1734 if (register_list_format[indirect_offset] == 0xFFFFFFFF) {
1739 indirect_offset += 2;
1741 /* look for the matching indice */
1742 for (idx = 0; idx < *unique_indirect_reg_count; idx++) {
1743 if (unique_indirect_regs[idx] ==
1744 register_list_format[indirect_offset])
1748 if (idx >= *unique_indirect_reg_count) {
1749 unique_indirect_regs[*unique_indirect_reg_count] =
1750 register_list_format[indirect_offset];
1751 idx = *unique_indirect_reg_count;
1752 *unique_indirect_reg_count = *unique_indirect_reg_count + 1;
1753 BUG_ON(*unique_indirect_reg_count >= max_indirect_reg_count);
1756 register_list_format[indirect_offset] = idx;
1760 static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
1762 int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1763 int unique_indirect_reg_count = 0;
1765 int indirect_start_offsets[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
1766 int indirect_start_offsets_count = 0;
1772 u32 *register_list_format =
1773 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
1774 if (!register_list_format)
1776 memcpy(register_list_format, adev->gfx.rlc.register_list_format,
1777 adev->gfx.rlc.reg_list_format_size_bytes);
1779 /* setup unique_indirect_regs array and indirect_start_offsets array */
1780 gfx_v9_0_parse_ind_reg_list(register_list_format,
1781 GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH,
1782 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
1783 unique_indirect_regs,
1784 &unique_indirect_reg_count,
1785 sizeof(unique_indirect_regs)/sizeof(int),
1786 indirect_start_offsets,
1787 &indirect_start_offsets_count,
1788 sizeof(indirect_start_offsets)/sizeof(int));
1790 /* enable auto inc in case it is disabled */
1791 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1792 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1793 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1795 /* write register_restore table to offset 0x0 using RLC_SRM_ARAM_ADDR/DATA */
1796 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR),
1797 RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET);
1798 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1799 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1800 adev->gfx.rlc.register_restore[i]);
1802 /* load direct register */
1803 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 0);
1804 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
1805 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA),
1806 adev->gfx.rlc.register_restore[i]);
1808 /* load indirect register */
1809 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1810 adev->gfx.rlc.reg_list_format_start);
1811 for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
1812 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1813 register_list_format[i]);
1815 /* set save/restore list size */
1816 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
1817 list_size = list_size >> 1;
1818 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1819 adev->gfx.rlc.reg_restore_list_size);
1820 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), list_size);
1822 /* write the starting offsets to RLC scratch ram */
1823 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
1824 adev->gfx.rlc.starting_offsets_start);
1825 for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
1826 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
1827 indirect_start_offsets[i]);
1829 /* load unique indirect regs*/
1830 for (i = 0; i < sizeof(unique_indirect_regs)/sizeof(int); i++) {
1831 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
1832 unique_indirect_regs[i] & 0x3FFFF);
1833 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
1834 unique_indirect_regs[i] >> 20);
1837 kfree(register_list_format);
1841 static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
1845 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
1846 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1847 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
1850 static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
1854 uint32_t default_data = 0;
1856 default_data = data = RREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS));
1857 if (enable == true) {
1858 /* enable GFXIP control over CGPG */
1859 data |= PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
1860 if(default_data != data)
1861 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1864 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK;
1865 data |= (2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT);
1866 if(default_data != data)
1867 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1869 /* restore GFXIP control over GCPG */
1870 data &= ~PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK;
1871 if(default_data != data)
1872 WREG32(SOC15_REG_OFFSET(PWR, 0, mmPWR_MISC_CNTL_STATUS), data);
1876 static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
1880 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
1881 AMD_PG_SUPPORT_GFX_SMG |
1882 AMD_PG_SUPPORT_GFX_DMG)) {
1883 /* init IDLE_POLL_COUNT = 60 */
1884 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL));
1885 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
1886 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
1887 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_POLL_CNTL), data);
1889 /* init RLC PG Delay */
1891 data |= (0x10 << RLC_PG_DELAY__POWER_UP_DELAY__SHIFT);
1892 data |= (0x10 << RLC_PG_DELAY__POWER_DOWN_DELAY__SHIFT);
1893 data |= (0x10 << RLC_PG_DELAY__CMD_PROPAGATE_DELAY__SHIFT);
1894 data |= (0x40 << RLC_PG_DELAY__MEM_SLEEP_DELAY__SHIFT);
1895 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY), data);
1897 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2));
1898 data &= ~RLC_PG_DELAY_2__SERDES_CMD_DELAY_MASK;
1899 data |= (0x4 << RLC_PG_DELAY_2__SERDES_CMD_DELAY__SHIFT);
1900 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_2), data);
1902 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3));
1903 data &= ~RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK;
1904 data |= (0xff << RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT);
1905 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_DELAY_3), data);
1907 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL));
1908 data &= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK;
1910 /* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
1911 data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
1912 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
1914 pwr_10_0_gfxip_control_over_cgpg(adev, true);
1918 static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
1922 uint32_t default_data = 0;
1924 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1926 if (enable == true) {
1927 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
1928 if (default_data != data)
1929 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1931 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
1932 if(default_data != data)
1933 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1937 static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
1941 uint32_t default_data = 0;
1943 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1945 if (enable == true) {
1946 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
1947 if(default_data != data)
1948 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1950 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
1951 if(default_data != data)
1952 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1956 static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
1960 uint32_t default_data = 0;
1962 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1964 if (enable == true) {
1965 data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
1966 if(default_data != data)
1967 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1969 data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
1970 if(default_data != data)
1971 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1975 static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
1978 uint32_t data, default_data;
1980 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1982 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
1984 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
1985 if(default_data != data)
1986 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
1989 static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
1992 uint32_t data, default_data;
1994 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
1996 data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
1998 data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
1999 if(default_data != data)
2000 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2003 /* read any GFX register to wake up GFX */
2004 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_RENDER_CONTROL));
2007 void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
2010 uint32_t data, default_data;
2012 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2014 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
2016 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
2017 if(default_data != data)
2018 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2021 void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
2024 uint32_t data, default_data;
2026 default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
2028 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
2030 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
2031 if(default_data != data)
2032 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
2035 static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
2037 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
2038 AMD_PG_SUPPORT_GFX_SMG |
2039 AMD_PG_SUPPORT_GFX_DMG |
2041 AMD_PG_SUPPORT_GDS |
2042 AMD_PG_SUPPORT_RLC_SMU_HS)) {
2043 gfx_v9_0_init_csb(adev);
2044 gfx_v9_0_init_rlc_save_restore_list(adev);
2045 gfx_v9_0_enable_save_restore_machine(adev);
2047 if (adev->asic_type == CHIP_RAVEN) {
2048 WREG32(mmRLC_JUMP_TABLE_RESTORE,
2049 adev->gfx.rlc.cp_table_gpu_addr >> 8);
2050 gfx_v9_0_init_gfx_power_gating(adev);
2052 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
2053 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
2054 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
2056 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
2057 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
2060 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
2061 gfx_v9_0_enable_cp_power_gating(adev, true);
2063 gfx_v9_0_enable_cp_power_gating(adev, false);
2068 void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
2070 u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
2072 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
2073 WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
2075 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2077 gfx_v9_0_wait_for_rlc_serdes(adev);
2080 static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
2082 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2084 WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2088 static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
2090 #ifdef AMDGPU_RLC_DEBUG_RETRY
2094 WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2096 /* carrizo do enable cp interrupt after cp inited */
2097 if (!(adev->flags & AMD_IS_APU))
2098 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2102 #ifdef AMDGPU_RLC_DEBUG_RETRY
2103 /* RLC_GPM_GENERAL_6 : RLC Ucode version */
2104 rlc_ucode_ver = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_6);
2105 if(rlc_ucode_ver == 0x108) {
2106 DRM_INFO("Using rlc debug ucode. mmRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
2107 rlc_ucode_ver, adev->gfx.rlc_fw_version);
2108 /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
2109 * default is 0x9C4 to create a 100us interval */
2110 WREG32_SOC15(GC, 0, mmRLC_GPM_TIMER_INT_3, 0x9C4);
2111 /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
2112 * to disable the page fault retry interrupts, default is
2114 WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_12, 0x100);
2119 static int gfx_v9_0_rlc_load_microcode(struct amdgpu_device *adev)
2121 const struct rlc_firmware_header_v2_0 *hdr;
2122 const __le32 *fw_data;
2123 unsigned i, fw_size;
2125 if (!adev->gfx.rlc_fw)
2128 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2129 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2131 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2132 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2133 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2135 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR,
2136 RLCG_UCODE_LOADING_START_ADDRESS);
2137 for (i = 0; i < fw_size; i++)
2138 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
2139 WREG32_SOC15(GC, 0, mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2144 static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2148 if (amdgpu_sriov_vf(adev))
2151 gfx_v9_0_rlc_stop(adev);
2154 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2157 WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, 0);
2159 gfx_v9_0_rlc_reset(adev);
2161 gfx_v9_0_init_pg(adev);
2163 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2164 /* legacy rlc firmware loading */
2165 r = gfx_v9_0_rlc_load_microcode(adev);
2170 gfx_v9_0_rlc_start(adev);
2175 static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2178 u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
2180 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2181 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2182 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
2184 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2185 adev->gfx.gfx_ring[i].ready = false;
2187 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
2191 static int gfx_v9_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2193 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2194 const struct gfx_firmware_header_v1_0 *ce_hdr;
2195 const struct gfx_firmware_header_v1_0 *me_hdr;
2196 const __le32 *fw_data;
2197 unsigned i, fw_size;
2199 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
2202 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2203 adev->gfx.pfp_fw->data;
2204 ce_hdr = (const struct gfx_firmware_header_v1_0 *)
2205 adev->gfx.ce_fw->data;
2206 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2207 adev->gfx.me_fw->data;
2209 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2210 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2211 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2213 gfx_v9_0_cp_gfx_enable(adev, false);
2216 fw_data = (const __le32 *)
2217 (adev->gfx.pfp_fw->data +
2218 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2219 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
2220 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, 0);
2221 for (i = 0; i < fw_size; i++)
2222 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
2223 WREG32_SOC15(GC, 0, mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2226 fw_data = (const __le32 *)
2227 (adev->gfx.ce_fw->data +
2228 le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
2229 fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
2230 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, 0);
2231 for (i = 0; i < fw_size; i++)
2232 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
2233 WREG32_SOC15(GC, 0, mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
2236 fw_data = (const __le32 *)
2237 (adev->gfx.me_fw->data +
2238 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2239 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
2240 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, 0);
2241 for (i = 0; i < fw_size; i++)
2242 WREG32_SOC15(GC, 0, mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
2243 WREG32_SOC15(GC, 0, mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
2248 static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
2250 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
2251 const struct cs_section_def *sect = NULL;
2252 const struct cs_extent_def *ext = NULL;
2256 WREG32_SOC15(GC, 0, mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
2257 WREG32_SOC15(GC, 0, mmCP_DEVICE_ID, 1);
2259 gfx_v9_0_cp_gfx_enable(adev, true);
2261 r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4);
2263 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
2267 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2268 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
2270 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
2271 amdgpu_ring_write(ring, 0x80000000);
2272 amdgpu_ring_write(ring, 0x80000000);
2274 for (sect = gfx9_cs_data; sect->section != NULL; ++sect) {
2275 for (ext = sect->section; ext->extent != NULL; ++ext) {
2276 if (sect->id == SECT_CONTEXT) {
2277 amdgpu_ring_write(ring,
2278 PACKET3(PACKET3_SET_CONTEXT_REG,
2280 amdgpu_ring_write(ring,
2281 ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
2282 for (i = 0; i < ext->reg_count; i++)
2283 amdgpu_ring_write(ring, ext->extent[i]);
2288 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
2289 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
2291 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
2292 amdgpu_ring_write(ring, 0);
2294 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
2295 amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
2296 amdgpu_ring_write(ring, 0x8000);
2297 amdgpu_ring_write(ring, 0x8000);
2299 amdgpu_ring_commit(ring);
2304 static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
2306 struct amdgpu_ring *ring;
2309 u64 rb_addr, rptr_addr, wptr_gpu_addr;
2311 /* Set the write pointer delay */
2312 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_DELAY, 0);
2314 /* set the RB to use vmid 0 */
2315 WREG32_SOC15(GC, 0, mmCP_RB_VMID, 0);
2317 /* Set ring buffer size */
2318 ring = &adev->gfx.gfx_ring[0];
2319 rb_bufsz = order_base_2(ring->ring_size / 8);
2320 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
2321 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
2323 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1);
2325 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2327 /* Initialize the ring buffer's write pointers */
2329 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
2330 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
2332 /* set the wb address wether it's enabled or not */
2333 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2334 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
2335 WREG32_SOC15(GC, 0, mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
2337 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2338 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, lower_32_bits(wptr_gpu_addr));
2339 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, upper_32_bits(wptr_gpu_addr));
2342 WREG32_SOC15(GC, 0, mmCP_RB0_CNTL, tmp);
2344 rb_addr = ring->gpu_addr >> 8;
2345 WREG32_SOC15(GC, 0, mmCP_RB0_BASE, rb_addr);
2346 WREG32_SOC15(GC, 0, mmCP_RB0_BASE_HI, upper_32_bits(rb_addr));
2348 tmp = RREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL);
2349 if (ring->use_doorbell) {
2350 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2351 DOORBELL_OFFSET, ring->doorbell_index);
2352 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
2355 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, DOORBELL_EN, 0);
2357 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
2359 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
2360 DOORBELL_RANGE_LOWER, ring->doorbell_index);
2361 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
2363 WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_UPPER,
2364 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
2367 /* start the ring */
2368 gfx_v9_0_cp_gfx_start(adev);
2374 static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
2379 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
2381 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
2382 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
2383 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2384 adev->gfx.compute_ring[i].ready = false;
2385 adev->gfx.kiq.ring.ready = false;
2390 static int gfx_v9_0_cp_compute_start(struct amdgpu_device *adev)
2392 gfx_v9_0_cp_compute_enable(adev, true);
2397 static int gfx_v9_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2399 const struct gfx_firmware_header_v1_0 *mec_hdr;
2400 const __le32 *fw_data;
2404 if (!adev->gfx.mec_fw)
2407 gfx_v9_0_cp_compute_enable(adev, false);
2409 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2410 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2412 fw_data = (const __le32 *)
2413 (adev->gfx.mec_fw->data +
2414 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
2416 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2417 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2418 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL, tmp);
2420 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_LO,
2421 adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
2422 WREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_HI,
2423 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
2426 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2427 mec_hdr->jt_offset);
2428 for (i = 0; i < mec_hdr->jt_size; i++)
2429 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_DATA,
2430 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
2432 WREG32_SOC15(GC, 0, mmCP_MEC_ME1_UCODE_ADDR,
2433 adev->gfx.mec_fw_version);
2434 /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
2439 static void gfx_v9_0_cp_compute_fini(struct amdgpu_device *adev)
2443 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2444 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2446 if (ring->mqd_obj) {
2447 r = amdgpu_bo_reserve(ring->mqd_obj, true);
2448 if (unlikely(r != 0))
2449 dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r);
2451 amdgpu_bo_unpin(ring->mqd_obj);
2452 amdgpu_bo_unreserve(ring->mqd_obj);
2454 amdgpu_bo_unref(&ring->mqd_obj);
2455 ring->mqd_obj = NULL;
2460 static int gfx_v9_0_init_queue(struct amdgpu_ring *ring);
2462 static int gfx_v9_0_cp_compute_resume(struct amdgpu_device *adev)
2465 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2466 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2467 if (gfx_v9_0_init_queue(ring))
2468 dev_warn(adev->dev, "compute queue %d init failed!\n", i);
2471 r = gfx_v9_0_cp_compute_start(adev);
2479 static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
2482 struct amdgpu_device *adev = ring->adev;
2484 /* tell RLC which is KIQ queue */
2485 tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
2487 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
2488 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2490 WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
2493 static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
2495 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
2496 uint32_t scratch, tmp = 0;
2499 r = amdgpu_gfx_scratch_get(adev, &scratch);
2501 DRM_ERROR("Failed to get scratch reg (%d).\n", r);
2504 WREG32(scratch, 0xCAFEDEAD);
2506 r = amdgpu_ring_alloc(kiq_ring, (7 * adev->gfx.num_compute_rings) + 11);
2508 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
2509 amdgpu_gfx_scratch_free(adev, scratch);
2514 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
2515 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
2516 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
2517 amdgpu_ring_write(kiq_ring, 0x000000FF); /* queue mask lo */
2518 amdgpu_ring_write(kiq_ring, 0); /* queue mask hi */
2519 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
2520 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
2521 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
2522 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
2523 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2524 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
2525 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
2526 uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2528 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
2529 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
2530 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
2531 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
2532 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
2533 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
2534 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
2535 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
2536 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
2537 PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */
2538 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
2539 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
2540 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
2541 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
2542 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
2543 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
2544 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
2546 /* write to scratch for completion */
2547 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
2548 amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
2549 amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
2550 amdgpu_ring_commit(kiq_ring);
2552 for (i = 0; i < adev->usec_timeout; i++) {
2553 tmp = RREG32(scratch);
2554 if (tmp == 0xDEADBEEF)
2558 if (i >= adev->usec_timeout) {
2559 DRM_ERROR("KCQ enable failed (scratch(0x%04X)=0x%08X)\n",
2563 amdgpu_gfx_scratch_free(adev, scratch);
2568 static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
2570 struct amdgpu_device *adev = ring->adev;
2571 struct v9_mqd *mqd = ring->mqd_ptr;
2572 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
2575 mqd->header = 0xC0310800;
2576 mqd->compute_pipelinestat_enable = 0x00000001;
2577 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
2578 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
2579 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
2580 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
2581 mqd->compute_misc_reserved = 0x00000003;
2583 eop_base_addr = ring->eop_gpu_addr >> 8;
2584 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
2585 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
2587 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2588 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
2589 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
2590 (order_base_2(MEC_HPD_SIZE / 4) - 1));
2592 mqd->cp_hqd_eop_control = tmp;
2594 /* enable doorbell? */
2595 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2597 if (ring->use_doorbell) {
2598 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2599 DOORBELL_OFFSET, ring->doorbell_index);
2600 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2602 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2603 DOORBELL_SOURCE, 0);
2604 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2608 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2611 mqd->cp_hqd_pq_doorbell_control = tmp;
2613 /* disable the queue if it's active */
2615 mqd->cp_hqd_dequeue_request = 0;
2616 mqd->cp_hqd_pq_rptr = 0;
2617 mqd->cp_hqd_pq_wptr_lo = 0;
2618 mqd->cp_hqd_pq_wptr_hi = 0;
2620 /* set the pointer to the MQD */
2621 mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
2622 mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
2624 /* set MQD vmid to 0 */
2625 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
2626 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
2627 mqd->cp_mqd_control = tmp;
2629 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2630 hqd_gpu_addr = ring->gpu_addr >> 8;
2631 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
2632 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
2634 /* set up the HQD, this is similar to CP_RB0_CNTL */
2635 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
2636 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
2637 (order_base_2(ring->ring_size / 4) - 1));
2638 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
2639 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
2641 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
2643 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
2644 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
2645 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
2646 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
2647 mqd->cp_hqd_pq_control = tmp;
2649 /* set the wb address whether it's enabled or not */
2650 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
2651 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
2652 mqd->cp_hqd_pq_rptr_report_addr_hi =
2653 upper_32_bits(wb_gpu_addr) & 0xffff;
2655 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2656 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
2657 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
2658 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
2661 /* enable the doorbell if requested */
2662 if (ring->use_doorbell) {
2663 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
2664 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2665 DOORBELL_OFFSET, ring->doorbell_index);
2667 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2669 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2670 DOORBELL_SOURCE, 0);
2671 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
2675 mqd->cp_hqd_pq_doorbell_control = tmp;
2677 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2679 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR);
2681 /* set the vmid for the queue */
2682 mqd->cp_hqd_vmid = 0;
2684 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
2685 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
2686 mqd->cp_hqd_persistent_state = tmp;
2688 /* set MIN_IB_AVAIL_SIZE */
2689 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_IB_CONTROL);
2690 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
2691 mqd->cp_hqd_ib_control = tmp;
2693 /* activate the queue */
2694 mqd->cp_hqd_active = 1;
2699 static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
2701 struct amdgpu_device *adev = ring->adev;
2702 struct v9_mqd *mqd = ring->mqd_ptr;
2705 /* disable wptr polling */
2706 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
2708 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR,
2709 mqd->cp_hqd_eop_base_addr_lo);
2710 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI,
2711 mqd->cp_hqd_eop_base_addr_hi);
2713 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2714 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL,
2715 mqd->cp_hqd_eop_control);
2717 /* enable doorbell? */
2718 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2719 mqd->cp_hqd_pq_doorbell_control);
2721 /* disable the queue if it's active */
2722 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
2723 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
2724 for (j = 0; j < adev->usec_timeout; j++) {
2725 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
2729 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST,
2730 mqd->cp_hqd_dequeue_request);
2731 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR,
2732 mqd->cp_hqd_pq_rptr);
2733 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2734 mqd->cp_hqd_pq_wptr_lo);
2735 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2736 mqd->cp_hqd_pq_wptr_hi);
2739 /* set the pointer to the MQD */
2740 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR,
2741 mqd->cp_mqd_base_addr_lo);
2742 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI,
2743 mqd->cp_mqd_base_addr_hi);
2745 /* set MQD vmid to 0 */
2746 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL,
2747 mqd->cp_mqd_control);
2749 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2750 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE,
2751 mqd->cp_hqd_pq_base_lo);
2752 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI,
2753 mqd->cp_hqd_pq_base_hi);
2755 /* set up the HQD, this is similar to CP_RB0_CNTL */
2756 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL,
2757 mqd->cp_hqd_pq_control);
2759 /* set the wb address whether it's enabled or not */
2760 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
2761 mqd->cp_hqd_pq_rptr_report_addr_lo);
2762 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
2763 mqd->cp_hqd_pq_rptr_report_addr_hi);
2765 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2766 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
2767 mqd->cp_hqd_pq_wptr_poll_addr_lo);
2768 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
2769 mqd->cp_hqd_pq_wptr_poll_addr_hi);
2771 /* enable the doorbell if requested */
2772 if (ring->use_doorbell) {
2773 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
2774 (AMDGPU_DOORBELL64_KIQ *2) << 2);
2775 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
2776 (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
2779 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
2780 mqd->cp_hqd_pq_doorbell_control);
2782 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2783 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
2784 mqd->cp_hqd_pq_wptr_lo);
2785 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
2786 mqd->cp_hqd_pq_wptr_hi);
2788 /* set the vmid for the queue */
2789 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
2791 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE,
2792 mqd->cp_hqd_persistent_state);
2794 /* activate the queue */
2795 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE,
2796 mqd->cp_hqd_active);
2798 if (ring->use_doorbell)
2799 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2804 static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
2806 struct amdgpu_device *adev = ring->adev;
2807 struct v9_mqd *mqd = ring->mqd_ptr;
2808 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
2810 gfx_v9_0_kiq_setting(ring);
2812 if (adev->gfx.in_reset) { /* for GPU_RESET case */
2813 /* reset MQD to a clean status */
2814 if (adev->gfx.mec.mqd_backup[mqd_idx])
2815 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2817 /* reset ring buffer */
2819 amdgpu_ring_clear_ring(ring);
2821 mutex_lock(&adev->srbm_mutex);
2822 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2823 gfx_v9_0_kiq_init_register(ring);
2824 soc15_grbm_select(adev, 0, 0, 0, 0);
2825 mutex_unlock(&adev->srbm_mutex);
2827 memset((void *)mqd, 0, sizeof(*mqd));
2828 mutex_lock(&adev->srbm_mutex);
2829 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2830 gfx_v9_0_mqd_init(ring);
2831 gfx_v9_0_kiq_init_register(ring);
2832 soc15_grbm_select(adev, 0, 0, 0, 0);
2833 mutex_unlock(&adev->srbm_mutex);
2835 if (adev->gfx.mec.mqd_backup[mqd_idx])
2836 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2842 static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
2844 struct amdgpu_device *adev = ring->adev;
2845 struct v9_mqd *mqd = ring->mqd_ptr;
2846 int mqd_idx = ring - &adev->gfx.compute_ring[0];
2848 if (!adev->gfx.in_reset) {
2849 memset((void *)mqd, 0, sizeof(*mqd));
2850 mutex_lock(&adev->srbm_mutex);
2851 soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
2852 gfx_v9_0_mqd_init(ring);
2853 soc15_grbm_select(adev, 0, 0, 0, 0);
2854 mutex_unlock(&adev->srbm_mutex);
2856 if (adev->gfx.mec.mqd_backup[mqd_idx])
2857 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
2858 } else if (adev->gfx.in_reset) { /* for GPU_RESET case */
2859 /* reset MQD to a clean status */
2860 if (adev->gfx.mec.mqd_backup[mqd_idx])
2861 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
2863 /* reset ring buffer */
2865 amdgpu_ring_clear_ring(ring);
2867 amdgpu_ring_clear_ring(ring);
2873 static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
2875 struct amdgpu_ring *ring = NULL;
2878 gfx_v9_0_cp_compute_enable(adev, true);
2880 ring = &adev->gfx.kiq.ring;
2882 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2883 if (unlikely(r != 0))
2886 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2888 r = gfx_v9_0_kiq_init_queue(ring);
2889 amdgpu_bo_kunmap(ring->mqd_obj);
2890 ring->mqd_ptr = NULL;
2892 amdgpu_bo_unreserve(ring->mqd_obj);
2896 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2897 ring = &adev->gfx.compute_ring[i];
2899 r = amdgpu_bo_reserve(ring->mqd_obj, false);
2900 if (unlikely(r != 0))
2902 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
2904 r = gfx_v9_0_kcq_init_queue(ring);
2905 amdgpu_bo_kunmap(ring->mqd_obj);
2906 ring->mqd_ptr = NULL;
2908 amdgpu_bo_unreserve(ring->mqd_obj);
2913 r = gfx_v9_0_kiq_kcq_enable(adev);
2918 static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
2921 struct amdgpu_ring *ring;
2923 if (!(adev->flags & AMD_IS_APU))
2924 gfx_v9_0_enable_gui_idle_interrupt(adev, false);
2926 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2927 /* legacy firmware loading */
2928 r = gfx_v9_0_cp_gfx_load_microcode(adev);
2932 r = gfx_v9_0_cp_compute_load_microcode(adev);
2937 r = gfx_v9_0_cp_gfx_resume(adev);
2941 if (amdgpu_sriov_vf(adev))
2942 r = gfx_v9_0_kiq_resume(adev);
2944 r = gfx_v9_0_cp_compute_resume(adev);
2948 ring = &adev->gfx.gfx_ring[0];
2949 r = amdgpu_ring_test_ring(ring);
2951 ring->ready = false;
2954 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2955 ring = &adev->gfx.compute_ring[i];
2958 r = amdgpu_ring_test_ring(ring);
2960 ring->ready = false;
2963 if (amdgpu_sriov_vf(adev)) {
2964 ring = &adev->gfx.kiq.ring;
2966 r = amdgpu_ring_test_ring(ring);
2968 ring->ready = false;
2971 gfx_v9_0_enable_gui_idle_interrupt(adev, true);
2976 static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
2978 gfx_v9_0_cp_gfx_enable(adev, enable);
2979 gfx_v9_0_cp_compute_enable(adev, enable);
2982 static int gfx_v9_0_hw_init(void *handle)
2985 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2987 gfx_v9_0_init_golden_registers(adev);
2989 gfx_v9_0_gpu_init(adev);
2991 r = gfx_v9_0_rlc_resume(adev);
2995 r = gfx_v9_0_cp_resume(adev);
2999 r = gfx_v9_0_ngg_en(adev);
3006 static int gfx_v9_0_hw_fini(void *handle)
3008 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3010 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
3011 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
3012 if (amdgpu_sriov_vf(adev)) {
3013 pr_debug("For SRIOV client, shouldn't do anything.\n");
3016 gfx_v9_0_cp_enable(adev, false);
3017 gfx_v9_0_rlc_stop(adev);
3018 gfx_v9_0_cp_compute_fini(adev);
3023 static int gfx_v9_0_suspend(void *handle)
3025 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3027 return gfx_v9_0_hw_fini(adev);
3030 static int gfx_v9_0_resume(void *handle)
3032 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3034 return gfx_v9_0_hw_init(adev);
3037 static bool gfx_v9_0_is_idle(void *handle)
3039 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3041 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, mmGRBM_STATUS),
3042 GRBM_STATUS, GUI_ACTIVE))
3048 static int gfx_v9_0_wait_for_idle(void *handle)
3052 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3054 for (i = 0; i < adev->usec_timeout; i++) {
3055 /* read MC_STATUS */
3056 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
3057 GRBM_STATUS__GUI_ACTIVE_MASK;
3059 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
3066 static int gfx_v9_0_soft_reset(void *handle)
3068 u32 grbm_soft_reset = 0;
3070 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3073 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS);
3074 if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
3075 GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
3076 GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
3077 GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
3078 GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
3079 GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
3080 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3081 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3082 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3083 GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
3086 if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
3087 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3088 GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
3092 tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
3093 if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
3094 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
3095 GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
3098 if (grbm_soft_reset) {
3100 gfx_v9_0_rlc_stop(adev);
3102 /* Disable GFX parsing/prefetching */
3103 gfx_v9_0_cp_gfx_enable(adev, false);
3105 /* Disable MEC parsing/prefetching */
3106 gfx_v9_0_cp_compute_enable(adev, false);
3108 if (grbm_soft_reset) {
3109 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3110 tmp |= grbm_soft_reset;
3111 dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3112 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3113 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3117 tmp &= ~grbm_soft_reset;
3118 WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
3119 tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
3122 /* Wait a little for things to settle down */
3128 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
3132 mutex_lock(&adev->gfx.gpu_clock_mutex);
3133 WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
3134 clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
3135 ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
3136 mutex_unlock(&adev->gfx.gpu_clock_mutex);
3140 static void gfx_v9_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
3142 uint32_t gds_base, uint32_t gds_size,
3143 uint32_t gws_base, uint32_t gws_size,
3144 uint32_t oa_base, uint32_t oa_size)
3146 gds_base = gds_base >> AMDGPU_GDS_SHIFT;
3147 gds_size = gds_size >> AMDGPU_GDS_SHIFT;
3149 gws_base = gws_base >> AMDGPU_GWS_SHIFT;
3150 gws_size = gws_size >> AMDGPU_GWS_SHIFT;
3152 oa_base = oa_base >> AMDGPU_OA_SHIFT;
3153 oa_size = oa_size >> AMDGPU_OA_SHIFT;
3156 gfx_v9_0_write_data_to_reg(ring, 0, false,
3157 amdgpu_gds_reg_offset[vmid].mem_base,
3161 gfx_v9_0_write_data_to_reg(ring, 0, false,
3162 amdgpu_gds_reg_offset[vmid].mem_size,
3166 gfx_v9_0_write_data_to_reg(ring, 0, false,
3167 amdgpu_gds_reg_offset[vmid].gws,
3168 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
3171 gfx_v9_0_write_data_to_reg(ring, 0, false,
3172 amdgpu_gds_reg_offset[vmid].oa,
3173 (1 << (oa_size + oa_base)) - (1 << oa_base));
3176 static int gfx_v9_0_early_init(void *handle)
3178 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3180 adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
3181 adev->gfx.num_compute_rings = GFX9_NUM_COMPUTE_RINGS;
3182 gfx_v9_0_set_ring_funcs(adev);
3183 gfx_v9_0_set_irq_funcs(adev);
3184 gfx_v9_0_set_gds_init(adev);
3185 gfx_v9_0_set_rlc_funcs(adev);
3190 static int gfx_v9_0_late_init(void *handle)
3192 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3195 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
3199 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
3206 static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3208 uint32_t rlc_setting, data;
3211 if (adev->gfx.rlc.in_safe_mode)
3214 /* if RLC is not enabled, do nothing */
3215 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3216 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3219 if (adev->cg_flags &
3220 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
3221 AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3222 data = RLC_SAFE_MODE__CMD_MASK;
3223 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
3224 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3226 /* wait for RLC_SAFE_MODE */
3227 for (i = 0; i < adev->usec_timeout; i++) {
3228 if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
3232 adev->gfx.rlc.in_safe_mode = true;
3236 static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3238 uint32_t rlc_setting, data;
3240 if (!adev->gfx.rlc.in_safe_mode)
3243 /* if RLC is not enabled, do nothing */
3244 rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
3245 if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
3248 if (adev->cg_flags &
3249 (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
3251 * Try to exit safe mode only if it is already in safe
3254 data = RLC_SAFE_MODE__CMD_MASK;
3255 WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
3256 adev->gfx.rlc.in_safe_mode = false;
3260 static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
3263 /* TODO: double check if we need to perform under safe mdoe */
3264 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3266 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
3267 gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
3268 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
3269 gfx_v9_0_enable_gfx_pipeline_powergating(adev, true);
3271 gfx_v9_0_enable_gfx_cg_power_gating(adev, false);
3272 gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
3275 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3278 static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
3281 /* TODO: double check if we need to perform under safe mode */
3282 /* gfx_v9_0_enter_rlc_safe_mode(adev); */
3284 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
3285 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, true);
3287 gfx_v9_0_enable_gfx_static_mg_power_gating(adev, false);
3289 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
3290 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, true);
3292 gfx_v9_0_enable_gfx_dynamic_mg_power_gating(adev, false);
3294 /* gfx_v9_0_exit_rlc_safe_mode(adev); */
3297 static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
3302 /* It is disabled by HW by default */
3303 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3304 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
3305 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3306 data &= ~(RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
3307 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3308 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3309 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3311 /* only for Vega10 & Raven1 */
3312 data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
3315 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3317 /* MGLS is a global flag to control all MGLS in GFX */
3318 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3319 /* 2 - RLC memory Light sleep */
3320 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
3321 def = data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3322 data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3324 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3326 /* 3 - CP memory Light sleep */
3327 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3328 def = data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3329 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3331 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3335 /* 1 - MGCG_OVERRIDE */
3336 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3337 data |= (RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK |
3338 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
3339 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
3340 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
3341 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
3343 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3345 /* 2 - disable MGLS in RLC */
3346 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3347 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
3348 data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
3349 WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
3352 /* 3 - disable MGLS in CP */
3353 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3354 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
3355 data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3356 WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
3361 static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
3366 adev->gfx.rlc.funcs->enter_safe_mode(adev);
3368 /* Enable 3D CGCG/CGLS */
3369 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
3370 /* write cmd to clear cgcg/cgls ov */
3371 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3372 /* unset CGCG override */
3373 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
3374 /* update CGCG and CGLS override bits */
3376 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3377 /* enable 3Dcgcg FSM(0x0020003f) */
3378 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3379 data = (0x2000 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3380 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
3381 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
3382 data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3383 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
3385 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3387 /* set IDLE_POLL_COUNT(0x00900100) */
3388 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3389 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3390 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3392 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3394 /* Disable CGCG/CGLS */
3395 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3396 /* disable cgcg, cgls should be disabled */
3397 data &= ~(RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK |
3398 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK);
3399 /* disable cgcg and cgls in FSM */
3401 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
3404 adev->gfx.rlc.funcs->exit_safe_mode(adev);
3407 static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
3412 adev->gfx.rlc.funcs->enter_safe_mode(adev);
3414 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3415 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3416 /* unset CGCG override */
3417 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
3418 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3419 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3421 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
3422 /* update CGCG and CGLS override bits */
3424 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
3426 /* enable cgcg FSM(0x0020003F) */
3427 def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3428 data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
3429 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
3430 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
3431 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
3432 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
3434 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3436 /* set IDLE_POLL_COUNT(0x00900100) */
3437 def = RREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL);
3438 data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
3439 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
3441 WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_CNTL, data);
3443 def = data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3444 /* reset CGCG/CGLS bits */
3445 data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
3446 /* disable cgcg and cgls in FSM */
3448 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
3451 adev->gfx.rlc.funcs->exit_safe_mode(adev);
3454 static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
3458 /* CGCG/CGLS should be enabled after MGCG/MGLS
3459 * === MGCG + MGLS ===
3461 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3462 /* === CGCG /CGLS for GFX 3D Only === */
3463 gfx_v9_0_update_3d_clock_gating(adev, enable);
3464 /* === CGCG + CGLS === */
3465 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3467 /* CGCG/CGLS should be disabled before MGCG/MGLS
3468 * === CGCG + CGLS ===
3470 gfx_v9_0_update_coarse_grain_clock_gating(adev, enable);
3471 /* === CGCG /CGLS for GFX 3D Only === */
3472 gfx_v9_0_update_3d_clock_gating(adev, enable);
3473 /* === MGCG + MGLS === */
3474 gfx_v9_0_update_medium_grain_clock_gating(adev, enable);
3479 static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
3480 .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
3481 .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
3484 static int gfx_v9_0_set_powergating_state(void *handle,
3485 enum amd_powergating_state state)
3487 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3488 bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
3490 switch (adev->asic_type) {
3492 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
3493 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
3494 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
3496 gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false);
3497 gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false);
3500 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
3501 gfx_v9_0_enable_cp_power_gating(adev, true);
3503 gfx_v9_0_enable_cp_power_gating(adev, false);
3505 /* update gfx cgpg state */
3506 gfx_v9_0_update_gfx_cg_power_gating(adev, enable);
3508 /* update mgcg state */
3509 gfx_v9_0_update_gfx_mg_power_gating(adev, enable);
3518 static int gfx_v9_0_set_clockgating_state(void *handle,
3519 enum amd_clockgating_state state)
3521 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3523 if (amdgpu_sriov_vf(adev))
3526 switch (adev->asic_type) {
3529 gfx_v9_0_update_gfx_clock_gating(adev,
3530 state == AMD_CG_STATE_GATE ? true : false);
3538 static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
3540 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3543 if (amdgpu_sriov_vf(adev))
3546 /* AMD_CG_SUPPORT_GFX_MGCG */
3547 data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
3548 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
3549 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
3551 /* AMD_CG_SUPPORT_GFX_CGCG */
3552 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
3553 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
3554 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
3556 /* AMD_CG_SUPPORT_GFX_CGLS */
3557 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
3558 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
3560 /* AMD_CG_SUPPORT_GFX_RLC_LS */
3561 data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
3562 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
3563 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
3565 /* AMD_CG_SUPPORT_GFX_CP_LS */
3566 data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
3567 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
3568 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
3570 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
3571 data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
3572 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
3573 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
3575 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
3576 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
3577 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
3580 static u64 gfx_v9_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
3582 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 is 32bit rptr*/
3585 static u64 gfx_v9_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
3587 struct amdgpu_device *adev = ring->adev;
3590 /* XXX check if swapping is necessary on BE */
3591 if (ring->use_doorbell) {
3592 wptr = atomic64_read((atomic64_t *)&adev->wb.wb[ring->wptr_offs]);
3594 wptr = RREG32_SOC15(GC, 0, mmCP_RB0_WPTR);
3595 wptr += (u64)RREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI) << 32;
3601 static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
3603 struct amdgpu_device *adev = ring->adev;
3605 if (ring->use_doorbell) {
3606 /* XXX check if swapping is necessary on BE */
3607 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3608 WDOORBELL64(ring->doorbell_index, ring->wptr);
3610 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
3611 WREG32_SOC15(GC, 0, mmCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3615 static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3617 u32 ref_and_mask, reg_mem_engine;
3618 struct nbio_hdp_flush_reg *nbio_hf_reg;
3620 if (ring->adev->asic_type == CHIP_VEGA10)
3621 nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
3623 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
3626 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
3629 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
3636 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
3637 reg_mem_engine = 1; /* pfp */
3640 gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
3641 nbio_hf_reg->hdp_flush_req_offset,
3642 nbio_hf_reg->hdp_flush_done_offset,
3643 ref_and_mask, ref_and_mask, 0x20);
3646 static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
3648 gfx_v9_0_write_data_to_reg(ring, 0, true,
3649 SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 1);
3652 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3653 struct amdgpu_ib *ib,
3654 unsigned vm_id, bool ctx_switch)
3656 u32 header, control = 0;
3658 if (ib->flags & AMDGPU_IB_FLAG_CE)
3659 header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3661 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3663 control |= ib->length_dw | (vm_id << 24);
3665 if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
3666 control |= INDIRECT_BUFFER_PRE_ENB(1);
3668 if (!(ib->flags & AMDGPU_IB_FLAG_CE))
3669 gfx_v9_0_ring_emit_de_meta(ring);
3672 amdgpu_ring_write(ring, header);
3673 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3674 amdgpu_ring_write(ring,
3678 lower_32_bits(ib->gpu_addr));
3679 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3680 amdgpu_ring_write(ring, control);
3683 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3684 struct amdgpu_ib *ib,
3685 unsigned vm_id, bool ctx_switch)
3687 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
3689 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
3690 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
3691 amdgpu_ring_write(ring,
3695 lower_32_bits(ib->gpu_addr));
3696 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
3697 amdgpu_ring_write(ring, control);
3700 static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
3701 u64 seq, unsigned flags)
3703 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
3704 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
3706 /* RELEASE_MEM - flush caches, send int */
3707 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
3708 amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
3710 EOP_TC_WB_ACTION_EN |
3711 EOP_TC_MD_ACTION_EN |
3712 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
3714 amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
3717 * the address should be Qword aligned if 64bit write, Dword
3718 * aligned if only send 32bit data low (discard data high)
3724 amdgpu_ring_write(ring, lower_32_bits(addr));
3725 amdgpu_ring_write(ring, upper_32_bits(addr));
3726 amdgpu_ring_write(ring, lower_32_bits(seq));
3727 amdgpu_ring_write(ring, upper_32_bits(seq));
3728 amdgpu_ring_write(ring, 0);
3731 static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
3733 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3734 uint32_t seq = ring->fence_drv.sync_seq;
3735 uint64_t addr = ring->fence_drv.gpu_addr;
3737 gfx_v9_0_wait_reg_mem(ring, usepfp, 1, 0,
3738 lower_32_bits(addr), upper_32_bits(addr),
3739 seq, 0xffffffff, 4);
3742 static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3743 unsigned vm_id, uint64_t pd_addr)
3745 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
3746 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
3747 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
3748 unsigned eng = ring->vm_inv_eng;
3750 pd_addr = ring->adev->gart.gart_funcs->adjust_mc_addr(ring->adev, pd_addr);
3751 pd_addr = pd_addr | 0x1; /* valid bit */
3752 /* now only use physical base address of PDE and valid */
3753 BUG_ON(pd_addr & 0xFFFF00000000003EULL);
3755 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3756 hub->ctx0_ptb_addr_lo32 + (2 * vm_id),
3757 lower_32_bits(pd_addr));
3759 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3760 hub->ctx0_ptb_addr_hi32 + (2 * vm_id),
3761 upper_32_bits(pd_addr));
3763 gfx_v9_0_write_data_to_reg(ring, usepfp, true,
3764 hub->vm_inv_eng0_req + eng, req);
3766 /* wait for the invalidate to complete */
3767 gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
3768 eng, 0, 1 << vm_id, 1 << vm_id, 0x20);
3770 /* compute doesn't have PFP */
3772 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3773 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
3774 amdgpu_ring_write(ring, 0x0);
3778 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
3780 return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
3783 static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
3787 /* XXX check if swapping is necessary on BE */
3788 if (ring->use_doorbell)
3789 wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
3795 static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
3797 struct amdgpu_device *adev = ring->adev;
3799 /* XXX check if swapping is necessary on BE */
3800 if (ring->use_doorbell) {
3801 atomic64_set((atomic64_t*)&adev->wb.wb[ring->wptr_offs], ring->wptr);
3802 WDOORBELL64(ring->doorbell_index, ring->wptr);
3804 BUG(); /* only DOORBELL method supported on gfx9 now */
3808 static void gfx_v9_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
3809 u64 seq, unsigned int flags)
3811 /* we only allocate 32bit for each seq wb address */
3812 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
3814 /* write fence seq to the "addr" */
3815 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3816 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3817 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
3818 amdgpu_ring_write(ring, lower_32_bits(addr));
3819 amdgpu_ring_write(ring, upper_32_bits(addr));
3820 amdgpu_ring_write(ring, lower_32_bits(seq));
3822 if (flags & AMDGPU_FENCE_FLAG_INT) {
3823 /* set register to trigger INT */
3824 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3825 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
3826 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
3827 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmCPC_INT_STATUS));
3828 amdgpu_ring_write(ring, 0);
3829 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
3833 static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
3835 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3836 amdgpu_ring_write(ring, 0);
3839 static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
3841 static struct v9_ce_ib_state ce_payload = {0};
3845 cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
3846 csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
3848 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
3849 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
3850 WRITE_DATA_DST_SEL(8) |
3852 WRITE_DATA_CACHE_POLICY(0));
3853 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
3854 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, ce_payload)));
3855 amdgpu_ring_write_multiple(ring, (void *)&ce_payload, sizeof(ce_payload) >> 2);
3858 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
3860 static struct v9_de_ib_state de_payload = {0};
3861 uint64_t csa_addr, gds_addr;
3864 csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096;
3865 gds_addr = csa_addr + 4096;
3866 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
3867 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
3869 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
3870 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
3871 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
3872 WRITE_DATA_DST_SEL(8) |
3874 WRITE_DATA_CACHE_POLICY(0));
3875 amdgpu_ring_write(ring, lower_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
3876 amdgpu_ring_write(ring, upper_32_bits(csa_addr + offsetof(struct v9_gfx_meta_data, de_payload)));
3877 amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
3880 static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
3884 if (amdgpu_sriov_vf(ring->adev))
3885 gfx_v9_0_ring_emit_ce_meta(ring);
3887 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
3888 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
3889 /* set load_global_config & load_global_uconfig */
3891 /* set load_cs_sh_regs */
3893 /* set load_per_context_state & load_gfx_sh_regs for GFX */
3896 /* set load_ce_ram if preamble presented */
3897 if (AMDGPU_PREAMBLE_IB_PRESENT & flags)
3900 /* still load_ce_ram if this is the first time preamble presented
3901 * although there is no context switch happens.
3903 if (AMDGPU_PREAMBLE_IB_PRESENT_FIRST & flags)
3907 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3908 amdgpu_ring_write(ring, dw2);
3909 amdgpu_ring_write(ring, 0);
3912 static unsigned gfx_v9_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
3915 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
3916 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
3917 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
3918 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
3919 ret = ring->wptr & ring->buf_mask;
3920 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
3924 static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
3927 BUG_ON(offset > ring->buf_mask);
3928 BUG_ON(ring->ring[offset] != 0x55aa55aa);
3930 cur = (ring->wptr & ring->buf_mask) - 1;
3931 if (likely(cur > offset))
3932 ring->ring[offset] = cur - offset;
3934 ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
3937 static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
3939 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
3940 amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
3943 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
3945 struct amdgpu_device *adev = ring->adev;
3947 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
3948 amdgpu_ring_write(ring, 0 | /* src: register*/
3949 (5 << 8) | /* dst: memory */
3950 (1 << 20)); /* write confirm */
3951 amdgpu_ring_write(ring, reg);
3952 amdgpu_ring_write(ring, 0);
3953 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
3954 adev->virt.reg_val_offs * 4));
3955 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
3956 adev->virt.reg_val_offs * 4));
3959 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
3962 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3963 amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */
3964 amdgpu_ring_write(ring, reg);
3965 amdgpu_ring_write(ring, 0);
3966 amdgpu_ring_write(ring, val);
3969 static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
3970 enum amdgpu_interrupt_state state)
3973 case AMDGPU_IRQ_STATE_DISABLE:
3974 case AMDGPU_IRQ_STATE_ENABLE:
3975 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
3976 TIME_STAMP_INT_ENABLE,
3977 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3984 static void gfx_v9_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
3986 enum amdgpu_interrupt_state state)
3988 u32 mec_int_cntl, mec_int_cntl_reg;
3991 * amdgpu controls only pipe 0 of MEC1. That's why this function only
3992 * handles the setting of interrupts for this specific pipe. All other
3993 * pipes' interrupts are set by amdkfd.
3999 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4002 DRM_DEBUG("invalid pipe %d\n", pipe);
4006 DRM_DEBUG("invalid me %d\n", me);
4011 case AMDGPU_IRQ_STATE_DISABLE:
4012 mec_int_cntl = RREG32(mec_int_cntl_reg);
4013 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4014 TIME_STAMP_INT_ENABLE, 0);
4015 WREG32(mec_int_cntl_reg, mec_int_cntl);
4017 case AMDGPU_IRQ_STATE_ENABLE:
4018 mec_int_cntl = RREG32(mec_int_cntl_reg);
4019 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
4020 TIME_STAMP_INT_ENABLE, 1);
4021 WREG32(mec_int_cntl_reg, mec_int_cntl);
4028 static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4029 struct amdgpu_irq_src *source,
4031 enum amdgpu_interrupt_state state)
4034 case AMDGPU_IRQ_STATE_DISABLE:
4035 case AMDGPU_IRQ_STATE_ENABLE:
4036 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4037 PRIV_REG_INT_ENABLE,
4038 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4047 static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
4048 struct amdgpu_irq_src *source,
4050 enum amdgpu_interrupt_state state)
4053 case AMDGPU_IRQ_STATE_DISABLE:
4054 case AMDGPU_IRQ_STATE_ENABLE:
4055 WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
4056 PRIV_INSTR_INT_ENABLE,
4057 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
4065 static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
4066 struct amdgpu_irq_src *src,
4068 enum amdgpu_interrupt_state state)
4071 case AMDGPU_CP_IRQ_GFX_EOP:
4072 gfx_v9_0_set_gfx_eop_interrupt_state(adev, state);
4074 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
4075 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
4077 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
4078 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
4080 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
4081 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
4083 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
4084 gfx_v9_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
4086 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
4087 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 0, state);
4089 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
4090 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 1, state);
4092 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
4093 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 2, state);
4095 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
4096 gfx_v9_0_set_compute_eop_interrupt_state(adev, 2, 3, state);
4104 static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
4105 struct amdgpu_irq_src *source,
4106 struct amdgpu_iv_entry *entry)
4109 u8 me_id, pipe_id, queue_id;
4110 struct amdgpu_ring *ring;
4112 DRM_DEBUG("IH: CP EOP\n");
4113 me_id = (entry->ring_id & 0x0c) >> 2;
4114 pipe_id = (entry->ring_id & 0x03) >> 0;
4115 queue_id = (entry->ring_id & 0x70) >> 4;
4119 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
4123 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4124 ring = &adev->gfx.compute_ring[i];
4125 /* Per-queue interrupt is supported for MEC starting from VI.
4126 * The interrupt can only be enabled/disabled per pipe instead of per queue.
4128 if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
4129 amdgpu_fence_process(ring);
4136 static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
4137 struct amdgpu_irq_src *source,
4138 struct amdgpu_iv_entry *entry)
4140 DRM_ERROR("Illegal register access in command stream\n");
4141 schedule_work(&adev->reset_work);
4145 static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
4146 struct amdgpu_irq_src *source,
4147 struct amdgpu_iv_entry *entry)
4149 DRM_ERROR("Illegal instruction in command stream\n");
4150 schedule_work(&adev->reset_work);
4154 static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
4155 struct amdgpu_irq_src *src,
4157 enum amdgpu_interrupt_state state)
4159 uint32_t tmp, target;
4160 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4163 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL);
4165 target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL);
4166 target += ring->pipe;
4169 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
4170 if (state == AMDGPU_IRQ_STATE_DISABLE) {
4171 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4172 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4173 GENERIC2_INT_ENABLE, 0);
4174 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4176 tmp = RREG32(target);
4177 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4178 GENERIC2_INT_ENABLE, 0);
4179 WREG32(target, tmp);
4181 tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
4182 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
4183 GENERIC2_INT_ENABLE, 1);
4184 WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
4186 tmp = RREG32(target);
4187 tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
4188 GENERIC2_INT_ENABLE, 1);
4189 WREG32(target, tmp);
4193 BUG(); /* kiq only support GENERIC2_INT now */
4199 static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev,
4200 struct amdgpu_irq_src *source,
4201 struct amdgpu_iv_entry *entry)
4203 u8 me_id, pipe_id, queue_id;
4204 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
4206 me_id = (entry->ring_id & 0x0c) >> 2;
4207 pipe_id = (entry->ring_id & 0x03) >> 0;
4208 queue_id = (entry->ring_id & 0x70) >> 4;
4209 DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n",
4210 me_id, pipe_id, queue_id);
4212 amdgpu_fence_process(ring);
4216 const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
4218 .early_init = gfx_v9_0_early_init,
4219 .late_init = gfx_v9_0_late_init,
4220 .sw_init = gfx_v9_0_sw_init,
4221 .sw_fini = gfx_v9_0_sw_fini,
4222 .hw_init = gfx_v9_0_hw_init,
4223 .hw_fini = gfx_v9_0_hw_fini,
4224 .suspend = gfx_v9_0_suspend,
4225 .resume = gfx_v9_0_resume,
4226 .is_idle = gfx_v9_0_is_idle,
4227 .wait_for_idle = gfx_v9_0_wait_for_idle,
4228 .soft_reset = gfx_v9_0_soft_reset,
4229 .set_clockgating_state = gfx_v9_0_set_clockgating_state,
4230 .set_powergating_state = gfx_v9_0_set_powergating_state,
4231 .get_clockgating_state = gfx_v9_0_get_clockgating_state,
4234 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
4235 .type = AMDGPU_RING_TYPE_GFX,
4237 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4238 .support_64bit_ptrs = true,
4239 .vmhub = AMDGPU_GFXHUB,
4240 .get_rptr = gfx_v9_0_ring_get_rptr_gfx,
4241 .get_wptr = gfx_v9_0_ring_get_wptr_gfx,
4242 .set_wptr = gfx_v9_0_ring_set_wptr_gfx,
4243 .emit_frame_size = /* totally 242 maximum if 16 IBs */
4245 7 + /* PIPELINE_SYNC */
4247 8 + /* FENCE for VM_FLUSH */
4248 20 + /* GDS switch */
4249 4 + /* double SWITCH_BUFFER,
4250 the first COND_EXEC jump to the place just
4251 prior to this double SWITCH_BUFFER */
4259 8 + 8 + /* FENCE x2 */
4260 2, /* SWITCH_BUFFER */
4261 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
4262 .emit_ib = gfx_v9_0_ring_emit_ib_gfx,
4263 .emit_fence = gfx_v9_0_ring_emit_fence,
4264 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4265 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4266 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4267 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4268 .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
4269 .test_ring = gfx_v9_0_ring_test_ring,
4270 .test_ib = gfx_v9_0_ring_test_ib,
4271 .insert_nop = amdgpu_ring_insert_nop,
4272 .pad_ib = amdgpu_ring_generic_pad_ib,
4273 .emit_switch_buffer = gfx_v9_ring_emit_sb,
4274 .emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
4275 .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
4276 .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
4277 .emit_tmz = gfx_v9_0_ring_emit_tmz,
4280 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
4281 .type = AMDGPU_RING_TYPE_COMPUTE,
4283 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4284 .support_64bit_ptrs = true,
4285 .vmhub = AMDGPU_GFXHUB,
4286 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4287 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4288 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4290 20 + /* gfx_v9_0_ring_emit_gds_switch */
4291 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4292 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
4293 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4294 24 + /* gfx_v9_0_ring_emit_vm_flush */
4295 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
4296 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4297 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4298 .emit_fence = gfx_v9_0_ring_emit_fence,
4299 .emit_pipeline_sync = gfx_v9_0_ring_emit_pipeline_sync,
4300 .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
4301 .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
4302 .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
4303 .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
4304 .test_ring = gfx_v9_0_ring_test_ring,
4305 .test_ib = gfx_v9_0_ring_test_ib,
4306 .insert_nop = amdgpu_ring_insert_nop,
4307 .pad_ib = amdgpu_ring_generic_pad_ib,
4310 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
4311 .type = AMDGPU_RING_TYPE_KIQ,
4313 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
4314 .support_64bit_ptrs = true,
4315 .vmhub = AMDGPU_GFXHUB,
4316 .get_rptr = gfx_v9_0_ring_get_rptr_compute,
4317 .get_wptr = gfx_v9_0_ring_get_wptr_compute,
4318 .set_wptr = gfx_v9_0_ring_set_wptr_compute,
4320 20 + /* gfx_v9_0_ring_emit_gds_switch */
4321 7 + /* gfx_v9_0_ring_emit_hdp_flush */
4322 5 + /* gfx_v9_0_ring_emit_hdp_invalidate */
4323 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
4324 24 + /* gfx_v9_0_ring_emit_vm_flush */
4325 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
4326 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
4327 .emit_ib = gfx_v9_0_ring_emit_ib_compute,
4328 .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
4329 .test_ring = gfx_v9_0_ring_test_ring,
4330 .test_ib = gfx_v9_0_ring_test_ib,
4331 .insert_nop = amdgpu_ring_insert_nop,
4332 .pad_ib = amdgpu_ring_generic_pad_ib,
4333 .emit_rreg = gfx_v9_0_ring_emit_rreg,
4334 .emit_wreg = gfx_v9_0_ring_emit_wreg,
4337 static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
4341 adev->gfx.kiq.ring.funcs = &gfx_v9_0_ring_funcs_kiq;
4343 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4344 adev->gfx.gfx_ring[i].funcs = &gfx_v9_0_ring_funcs_gfx;
4346 for (i = 0; i < adev->gfx.num_compute_rings; i++)
4347 adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute;
4350 static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = {
4351 .set = gfx_v9_0_kiq_set_interrupt_state,
4352 .process = gfx_v9_0_kiq_irq,
4355 static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = {
4356 .set = gfx_v9_0_set_eop_interrupt_state,
4357 .process = gfx_v9_0_eop_irq,
4360 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_reg_irq_funcs = {
4361 .set = gfx_v9_0_set_priv_reg_fault_state,
4362 .process = gfx_v9_0_priv_reg_irq,
4365 static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
4366 .set = gfx_v9_0_set_priv_inst_fault_state,
4367 .process = gfx_v9_0_priv_inst_irq,
4370 static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
4372 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4373 adev->gfx.eop_irq.funcs = &gfx_v9_0_eop_irq_funcs;
4375 adev->gfx.priv_reg_irq.num_types = 1;
4376 adev->gfx.priv_reg_irq.funcs = &gfx_v9_0_priv_reg_irq_funcs;
4378 adev->gfx.priv_inst_irq.num_types = 1;
4379 adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
4381 adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST;
4382 adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs;
4385 static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
4387 switch (adev->asic_type) {
4390 adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
4397 static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
4399 /* init asci gds info */
4400 adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE);
4401 adev->gds.gws.total_size = 64;
4402 adev->gds.oa.total_size = 16;
4404 if (adev->gds.mem.total_size == 64 * 1024) {
4405 adev->gds.mem.gfx_partition_size = 4096;
4406 adev->gds.mem.cs_partition_size = 4096;
4408 adev->gds.gws.gfx_partition_size = 4;
4409 adev->gds.gws.cs_partition_size = 4;
4411 adev->gds.oa.gfx_partition_size = 4;
4412 adev->gds.oa.cs_partition_size = 1;
4414 adev->gds.mem.gfx_partition_size = 1024;
4415 adev->gds.mem.cs_partition_size = 1024;
4417 adev->gds.gws.gfx_partition_size = 16;
4418 adev->gds.gws.cs_partition_size = 16;
4420 adev->gds.oa.gfx_partition_size = 4;
4421 adev->gds.oa.cs_partition_size = 4;
4425 static u32 gfx_v9_0_get_cu_active_bitmap(struct amdgpu_device *adev)
4429 data = RREG32_SOC15(GC, 0, mmCC_GC_SHADER_ARRAY_CONFIG);
4430 data |= RREG32_SOC15(GC, 0, mmGC_USER_SHADER_ARRAY_CONFIG);
4432 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4433 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4435 mask = gfx_v9_0_create_bitmask(adev->gfx.config.max_cu_per_sh);
4437 return (~data) & mask;
4440 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
4441 struct amdgpu_cu_info *cu_info)
4443 int i, j, k, counter, active_cu_number = 0;
4444 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
4446 if (!adev || !cu_info)
4449 memset(cu_info, 0, sizeof(*cu_info));
4451 mutex_lock(&adev->grbm_idx_mutex);
4452 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4453 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4457 gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff);
4458 bitmap = gfx_v9_0_get_cu_active_bitmap(adev);
4459 cu_info->bitmap[i][j] = bitmap;
4461 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
4462 if (bitmap & mask) {
4463 if (counter < adev->gfx.config.max_cu_per_sh)
4469 active_cu_number += counter;
4470 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4473 gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
4474 mutex_unlock(&adev->grbm_idx_mutex);
4476 cu_info->number = active_cu_number;
4477 cu_info->ao_cu_mask = ao_cu_mask;
4482 static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
4486 bool use_doorbell = true;
4493 struct amdgpu_device *adev;
4496 if (ring->mqd_obj == NULL) {
4497 r = amdgpu_bo_create(adev,
4498 sizeof(struct v9_mqd),
4500 AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
4501 NULL, &ring->mqd_obj);
4503 dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
4508 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4509 if (unlikely(r != 0)) {
4510 gfx_v9_0_cp_compute_fini(adev);
4514 r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
4517 dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r);
4518 gfx_v9_0_cp_compute_fini(adev);
4521 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf);
4523 dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
4524 gfx_v9_0_cp_compute_fini(adev);
4528 /* init the mqd struct */
4529 memset(buf, 0, sizeof(struct v9_mqd));
4531 mqd = (struct v9_mqd *)buf;
4532 mqd->header = 0xC0310800;
4533 mqd->compute_pipelinestat_enable = 0x00000001;
4534 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
4535 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
4536 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
4537 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
4538 mqd->compute_misc_reserved = 0x00000003;
4539 mutex_lock(&adev->srbm_mutex);
4540 soc15_grbm_select(adev, ring->me,
4543 /* disable wptr polling */
4544 WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
4546 /* write the EOP addr */
4547 BUG_ON(ring->me != 1 || ring->pipe != 0); /* can't handle other cases eop address */
4548 eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (ring->queue * MEC_HPD_SIZE);
4551 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR, lower_32_bits(eop_gpu_addr));
4552 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr));
4553 mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_gpu_addr);
4554 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_gpu_addr);
4556 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4557 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL);
4558 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
4559 (order_base_2(MEC_HPD_SIZE / 4) - 1));
4560 WREG32_SOC15(GC, 0, mmCP_HQD_EOP_CONTROL, tmp);
4562 /* enable doorbell? */
4563 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
4565 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
4567 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 0);
4569 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, tmp);
4570 mqd->cp_hqd_pq_doorbell_control = tmp;
4572 /* disable the queue if it's active */
4574 mqd->cp_hqd_dequeue_request = 0;
4575 mqd->cp_hqd_pq_rptr = 0;
4576 mqd->cp_hqd_pq_wptr_lo = 0;
4577 mqd->cp_hqd_pq_wptr_hi = 0;
4578 if (RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1) {
4579 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, 1);
4580 for (j = 0; j < adev->usec_timeout; j++) {
4581 if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
4585 WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request);
4586 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr);
4587 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, mqd->cp_hqd_pq_wptr_lo);
4588 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, mqd->cp_hqd_pq_wptr_hi);
4591 /* set the pointer to the MQD */
4592 mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc;
4593 mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr);
4594 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo);
4595 WREG32_SOC15(GC, 0, mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
4597 /* set MQD vmid to 0 */
4598 tmp = RREG32_SOC15(GC, 0, mmCP_MQD_CONTROL);
4599 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
4600 WREG32_SOC15(GC, 0, mmCP_MQD_CONTROL, tmp);
4601 mqd->cp_mqd_control = tmp;
4603 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4604 hqd_gpu_addr = ring->gpu_addr >> 8;
4605 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
4606 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4607 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo);
4608 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi);
4610 /* set up the HQD, this is similar to CP_RB0_CNTL */
4611 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL);
4612 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
4613 (order_base_2(ring->ring_size / 4) - 1));
4614 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
4615 ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
4617 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
4619 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
4620 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
4621 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
4622 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
4623 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_CONTROL, tmp);
4624 mqd->cp_hqd_pq_control = tmp;
4626 /* set the wb address wether it's enabled or not */
4627 wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
4628 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
4629 mqd->cp_hqd_pq_rptr_report_addr_hi =
4630 upper_32_bits(wb_gpu_addr) & 0xffff;
4631 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR,
4632 mqd->cp_hqd_pq_rptr_report_addr_lo);
4633 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
4634 mqd->cp_hqd_pq_rptr_report_addr_hi);
4636 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4637 wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
4638 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4639 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4640 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
4641 mqd->cp_hqd_pq_wptr_poll_addr_lo);
4642 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
4643 mqd->cp_hqd_pq_wptr_poll_addr_hi);
4645 /* enable the doorbell if requested */
4647 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
4648 (AMDGPU_DOORBELL64_KIQ * 2) << 2);
4649 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
4650 (AMDGPU_DOORBELL64_MEC_RING7 * 2) << 2);
4651 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL);
4652 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4653 DOORBELL_OFFSET, ring->doorbell_index);
4654 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
4655 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_SOURCE, 0);
4656 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_HIT, 0);
4657 mqd->cp_hqd_pq_doorbell_control = tmp;
4660 mqd->cp_hqd_pq_doorbell_control = 0;
4662 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
4663 mqd->cp_hqd_pq_doorbell_control);
4665 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4666 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, mqd->cp_hqd_pq_wptr_lo);
4667 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, mqd->cp_hqd_pq_wptr_hi);
4669 /* set the vmid for the queue */
4670 mqd->cp_hqd_vmid = 0;
4671 WREG32_SOC15(GC, 0, mmCP_HQD_VMID, mqd->cp_hqd_vmid);
4673 tmp = RREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE);
4674 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
4675 WREG32_SOC15(GC, 0, mmCP_HQD_PERSISTENT_STATE, tmp);
4676 mqd->cp_hqd_persistent_state = tmp;
4678 /* activate the queue */
4679 mqd->cp_hqd_active = 1;
4680 WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
4682 soc15_grbm_select(adev, 0, 0, 0, 0);
4683 mutex_unlock(&adev->srbm_mutex);
4685 amdgpu_bo_kunmap(ring->mqd_obj);
4686 amdgpu_bo_unreserve(ring->mqd_obj);
4689 WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4694 const struct amdgpu_ip_block_version gfx_v9_0_ip_block =
4696 .type = AMD_IP_BLOCK_TYPE_GFX,
4700 .funcs = &gfx_v9_0_ip_funcs,