2 * Copyright 2021 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "imu_v11_0.h"
37 #include "gc/gc_11_0_0_offset.h"
38 #include "gc/gc_11_0_0_sh_mask.h"
39 #include "smuio/smuio_13_0_6_offset.h"
40 #include "smuio/smuio_13_0_6_sh_mask.h"
41 #include "navi10_enum.h"
42 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
46 #include "clearstate_gfx11.h"
47 #include "v11_structs.h"
48 #include "gfx_v11_0.h"
49 #include "nbio_v4_3.h"
50 #include "mes_v11_0.h"
52 #define GFX11_NUM_GFX_RINGS 1
53 #define GFX11_MEC_HPD_SIZE 2048
55 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
57 #define regCGTT_WD_CLK_CTRL 0x5086
58 #define regCGTT_WD_CLK_CTRL_BASE_IDX 1
59 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e
60 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1
62 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
63 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
64 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
65 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
66 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
67 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
68 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin");
69 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin");
70 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin");
71 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin");
72 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin");
73 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin");
74 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin");
76 static const struct soc15_reg_golden golden_settings_gc_11_0[] =
78 /* Pending on emulation bring up */
81 static const struct soc15_reg_golden golden_settings_gc_11_0_0[] =
83 /* Pending on emulation bring up */
86 static const struct soc15_reg_golden golden_settings_gc_rlc_spm_11_0[] =
88 /* Pending on emulation bring up */
91 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
93 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
94 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010),
95 SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
96 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988),
97 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007),
98 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008),
99 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100),
100 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
101 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
104 #define DEFAULT_SH_MEM_CONFIG \
105 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
106 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
107 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
109 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev);
110 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev);
111 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev);
112 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev);
113 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev);
114 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev);
115 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev);
116 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
117 struct amdgpu_cu_info *cu_info);
118 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev);
119 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
120 u32 sh_num, u32 instance);
121 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
123 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
124 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
125 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
127 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
128 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
129 uint16_t pasid, uint32_t flush_type,
130 bool all_hub, uint8_t dst_sel);
131 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev);
132 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev);
134 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
136 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
137 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
138 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
139 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
140 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
141 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
142 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
143 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
144 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
147 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring,
148 struct amdgpu_ring *ring)
150 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
151 uint64_t wptr_addr = ring->wptr_gpu_addr;
152 uint32_t me = 0, eng_sel = 0;
154 switch (ring->funcs->type) {
155 case AMDGPU_RING_TYPE_COMPUTE:
159 case AMDGPU_RING_TYPE_GFX:
163 case AMDGPU_RING_TYPE_MES:
171 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
172 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
173 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
174 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
175 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
176 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
177 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
178 PACKET3_MAP_QUEUES_ME((me)) |
179 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
180 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
181 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
182 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
183 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
184 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
185 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
186 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
187 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
190 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
191 struct amdgpu_ring *ring,
192 enum amdgpu_unmap_queues_action action,
193 u64 gpu_addr, u64 seq)
195 struct amdgpu_device *adev = kiq_ring->adev;
196 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
198 if (adev->enable_mes && !adev->gfx.kiq.ring.sched.ready) {
199 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
203 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
204 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
205 PACKET3_UNMAP_QUEUES_ACTION(action) |
206 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
207 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
208 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
209 amdgpu_ring_write(kiq_ring,
210 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
212 if (action == PREEMPT_QUEUES_NO_UNMAP) {
213 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
214 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
215 amdgpu_ring_write(kiq_ring, seq);
217 amdgpu_ring_write(kiq_ring, 0);
218 amdgpu_ring_write(kiq_ring, 0);
219 amdgpu_ring_write(kiq_ring, 0);
223 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring,
224 struct amdgpu_ring *ring,
228 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
230 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
231 amdgpu_ring_write(kiq_ring,
232 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
233 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
234 PACKET3_QUERY_STATUS_COMMAND(2));
235 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
236 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
237 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
238 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
239 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
240 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
241 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
244 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
245 uint16_t pasid, uint32_t flush_type,
248 gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
251 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = {
252 .kiq_set_resources = gfx11_kiq_set_resources,
253 .kiq_map_queues = gfx11_kiq_map_queues,
254 .kiq_unmap_queues = gfx11_kiq_unmap_queues,
255 .kiq_query_status = gfx11_kiq_query_status,
256 .kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs,
257 .set_resources_size = 8,
258 .map_queues_size = 7,
259 .unmap_queues_size = 6,
260 .query_status_size = 7,
261 .invalidate_tlbs_size = 2,
264 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
266 adev->gfx.kiq.pmf = &gfx_v11_0_kiq_pm4_funcs;
269 static void gfx_v11_0_init_spm_golden_registers(struct amdgpu_device *adev)
271 switch (adev->ip_versions[GC_HWIP][0]) {
272 case IP_VERSION(11, 0, 0):
273 soc15_program_register_sequence(adev,
274 golden_settings_gc_rlc_spm_11_0,
275 (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_11_0));
282 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
284 switch (adev->ip_versions[GC_HWIP][0]) {
285 case IP_VERSION(11, 0, 0):
286 soc15_program_register_sequence(adev,
287 golden_settings_gc_11_0,
288 (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
289 soc15_program_register_sequence(adev,
290 golden_settings_gc_11_0_0,
291 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_0));
293 case IP_VERSION(11, 0, 1):
294 soc15_program_register_sequence(adev,
295 golden_settings_gc_11_0,
296 (const u32)ARRAY_SIZE(golden_settings_gc_11_0));
297 soc15_program_register_sequence(adev,
298 golden_settings_gc_11_0_1,
299 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
304 gfx_v11_0_init_spm_golden_registers(adev);
307 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
308 bool wc, uint32_t reg, uint32_t val)
310 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
311 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
312 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
313 amdgpu_ring_write(ring, reg);
314 amdgpu_ring_write(ring, 0);
315 amdgpu_ring_write(ring, val);
318 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
319 int mem_space, int opt, uint32_t addr0,
320 uint32_t addr1, uint32_t ref, uint32_t mask,
323 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
324 amdgpu_ring_write(ring,
325 /* memory (1) or register (0) */
326 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
327 WAIT_REG_MEM_OPERATION(opt) | /* wait */
328 WAIT_REG_MEM_FUNCTION(3) | /* equal */
329 WAIT_REG_MEM_ENGINE(eng_sel)));
332 BUG_ON(addr0 & 0x3); /* Dword align */
333 amdgpu_ring_write(ring, addr0);
334 amdgpu_ring_write(ring, addr1);
335 amdgpu_ring_write(ring, ref);
336 amdgpu_ring_write(ring, mask);
337 amdgpu_ring_write(ring, inv); /* poll interval */
340 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
342 struct amdgpu_device *adev = ring->adev;
343 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
348 WREG32(scratch, 0xCAFEDEAD);
349 r = amdgpu_ring_alloc(ring, 5);
351 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
356 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
357 gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF);
359 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
360 amdgpu_ring_write(ring, scratch -
361 PACKET3_SET_UCONFIG_REG_START);
362 amdgpu_ring_write(ring, 0xDEADBEEF);
364 amdgpu_ring_commit(ring);
366 for (i = 0; i < adev->usec_timeout; i++) {
367 tmp = RREG32(scratch);
368 if (tmp == 0xDEADBEEF)
370 if (amdgpu_emu_mode == 1)
376 if (i >= adev->usec_timeout)
381 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
383 struct amdgpu_device *adev = ring->adev;
385 struct dma_fence *f = NULL;
388 volatile uint32_t *cpu_ptr;
391 /* MES KIQ fw hasn't indirect buffer support for now */
392 if (adev->enable_mes_kiq &&
393 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
396 memset(&ib, 0, sizeof(ib));
398 if (ring->is_mes_queue) {
399 uint32_t padding, offset;
401 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
402 padding = amdgpu_mes_ctx_get_offs(ring,
403 AMDGPU_MES_CTX_PADDING_OFFS);
405 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
406 ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
408 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
409 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
410 *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
412 r = amdgpu_device_wb_get(adev, &index);
416 gpu_addr = adev->wb.gpu_addr + (index * 4);
417 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
418 cpu_ptr = &adev->wb.wb[index];
420 r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
422 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
427 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
428 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
429 ib.ptr[2] = lower_32_bits(gpu_addr);
430 ib.ptr[3] = upper_32_bits(gpu_addr);
431 ib.ptr[4] = 0xDEADBEEF;
434 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
438 r = dma_fence_wait_timeout(f, false, timeout);
446 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
451 if (!ring->is_mes_queue)
452 amdgpu_ib_free(adev, &ib, NULL);
455 if (!ring->is_mes_queue)
456 amdgpu_device_wb_free(adev, index);
460 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
462 release_firmware(adev->gfx.pfp_fw);
463 adev->gfx.pfp_fw = NULL;
464 release_firmware(adev->gfx.me_fw);
465 adev->gfx.me_fw = NULL;
466 release_firmware(adev->gfx.rlc_fw);
467 adev->gfx.rlc_fw = NULL;
468 release_firmware(adev->gfx.mec_fw);
469 adev->gfx.mec_fw = NULL;
471 kfree(adev->gfx.rlc.register_list_format);
474 static void gfx_v11_0_init_rlc_ext_microcode(struct amdgpu_device *adev)
476 const struct rlc_firmware_header_v2_1 *rlc_hdr;
478 rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
479 adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
480 adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
481 adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
482 adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
483 adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
484 adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
485 adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
486 adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
487 adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
488 adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
489 adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
490 adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
491 adev->gfx.rlc.reg_list_format_direct_reg_list_length =
492 le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
495 static void gfx_v11_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
497 const struct rlc_firmware_header_v2_2 *rlc_hdr;
499 rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
500 adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
501 adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
502 adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
503 adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
506 static void gfx_v11_0_init_rlcp_rlcv_microcode(struct amdgpu_device *adev)
508 const struct rlc_firmware_header_v2_3 *rlc_hdr;
510 rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
511 adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
512 adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
513 adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
514 adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
517 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
520 char ucode_prefix[30];
522 struct amdgpu_firmware_info *info = NULL;
523 const struct common_firmware_header *header = NULL;
524 const struct gfx_firmware_header_v1_0 *cp_hdr;
525 const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
526 const struct rlc_firmware_header_v2_0 *rlc_hdr;
527 unsigned int *tmp = NULL;
529 uint16_t version_major;
530 uint16_t version_minor;
534 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
536 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix);
537 err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
540 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
543 /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/
544 adev->gfx.rs64_enable = amdgpu_ucode_hdr_version(
545 (union amdgpu_firmware_header *)
546 adev->gfx.pfp_fw->data, 2, 0);
547 if (adev->gfx.rs64_enable) {
548 dev_info(adev->dev, "CP RS64 enable\n");
549 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data;
550 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
551 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
554 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
555 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
556 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
559 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
560 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
563 err = amdgpu_ucode_validate(adev->gfx.me_fw);
566 if (adev->gfx.rs64_enable) {
567 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data;
568 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
569 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
572 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
573 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
574 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
577 if (!amdgpu_sriov_vf(adev)) {
578 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
579 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
582 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
583 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
584 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
585 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
587 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
588 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
589 adev->gfx.rlc.save_and_restore_offset =
590 le32_to_cpu(rlc_hdr->save_and_restore_offset);
591 adev->gfx.rlc.clear_state_descriptor_offset =
592 le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
593 adev->gfx.rlc.avail_scratch_ram_locations =
594 le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
595 adev->gfx.rlc.reg_restore_list_size =
596 le32_to_cpu(rlc_hdr->reg_restore_list_size);
597 adev->gfx.rlc.reg_list_format_start =
598 le32_to_cpu(rlc_hdr->reg_list_format_start);
599 adev->gfx.rlc.reg_list_format_separate_start =
600 le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
601 adev->gfx.rlc.starting_offsets_start =
602 le32_to_cpu(rlc_hdr->starting_offsets_start);
603 adev->gfx.rlc.reg_list_format_size_bytes =
604 le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
605 adev->gfx.rlc.reg_list_size_bytes =
606 le32_to_cpu(rlc_hdr->reg_list_size_bytes);
607 adev->gfx.rlc.register_list_format =
608 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
609 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
610 if (!adev->gfx.rlc.register_list_format) {
615 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
616 le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
617 for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
618 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
620 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
622 tmp = (unsigned int *)((uintptr_t)rlc_hdr +
623 le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
624 for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
625 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
627 if (version_major == 2) {
628 if (version_minor >= 1)
629 gfx_v11_0_init_rlc_ext_microcode(adev);
630 if (version_minor >= 2)
631 gfx_v11_0_init_rlc_iram_dram_microcode(adev);
632 if (version_minor == 3)
633 gfx_v11_0_init_rlcp_rlcv_microcode(adev);
637 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
638 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
641 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
644 if (adev->gfx.rs64_enable) {
645 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
646 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
647 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
650 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
651 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
652 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
655 /* only one MEC for gfx 11.0.0. */
656 adev->gfx.mec2_fw = NULL;
658 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
659 if (adev->gfx.rs64_enable) {
660 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.pfp_fw->data;
661 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP];
662 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP;
663 info->fw = adev->gfx.pfp_fw;
664 header = (const struct common_firmware_header *)info->fw->data;
665 adev->firmware.fw_size +=
666 ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
668 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK];
669 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK;
670 info->fw = adev->gfx.pfp_fw;
671 header = (const struct common_firmware_header *)info->fw->data;
672 adev->firmware.fw_size +=
673 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
675 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK];
676 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK;
677 info->fw = adev->gfx.pfp_fw;
678 header = (const struct common_firmware_header *)info->fw->data;
679 adev->firmware.fw_size +=
680 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
682 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.me_fw->data;
683 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME];
684 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME;
685 info->fw = adev->gfx.me_fw;
686 header = (const struct common_firmware_header *)info->fw->data;
687 adev->firmware.fw_size +=
688 ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
690 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK];
691 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK;
692 info->fw = adev->gfx.me_fw;
693 header = (const struct common_firmware_header *)info->fw->data;
694 adev->firmware.fw_size +=
695 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
697 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK];
698 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK;
699 info->fw = adev->gfx.me_fw;
700 header = (const struct common_firmware_header *)info->fw->data;
701 adev->firmware.fw_size +=
702 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
704 cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
705 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC];
706 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC;
707 info->fw = adev->gfx.mec_fw;
708 header = (const struct common_firmware_header *)info->fw->data;
709 adev->firmware.fw_size +=
710 ALIGN(le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes), PAGE_SIZE);
712 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK];
713 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK;
714 info->fw = adev->gfx.mec_fw;
715 header = (const struct common_firmware_header *)info->fw->data;
716 adev->firmware.fw_size +=
717 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
719 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK];
720 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK;
721 info->fw = adev->gfx.mec_fw;
722 header = (const struct common_firmware_header *)info->fw->data;
723 adev->firmware.fw_size +=
724 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
726 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK];
727 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK;
728 info->fw = adev->gfx.mec_fw;
729 header = (const struct common_firmware_header *)info->fw->data;
730 adev->firmware.fw_size +=
731 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
733 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK];
734 info->ucode_id = AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK;
735 info->fw = adev->gfx.mec_fw;
736 header = (const struct common_firmware_header *)info->fw->data;
737 adev->firmware.fw_size +=
738 ALIGN(le32_to_cpu(cp_hdr_v2_0->data_size_bytes), PAGE_SIZE);
740 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
741 info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
742 info->fw = adev->gfx.pfp_fw;
743 header = (const struct common_firmware_header *)info->fw->data;
744 adev->firmware.fw_size +=
745 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
747 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
748 info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
749 info->fw = adev->gfx.me_fw;
750 header = (const struct common_firmware_header *)info->fw->data;
751 adev->firmware.fw_size +=
752 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
754 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
755 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
756 info->fw = adev->gfx.mec_fw;
757 header = (const struct common_firmware_header *)info->fw->data;
758 cp_hdr = (const struct gfx_firmware_header_v1_0 *)info->fw->data;
759 adev->firmware.fw_size +=
760 ALIGN(le32_to_cpu(header->ucode_size_bytes) -
761 le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
763 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1_JT];
764 info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1_JT;
765 info->fw = adev->gfx.mec_fw;
766 adev->firmware.fw_size +=
767 ALIGN(le32_to_cpu(cp_hdr->jt_size) * 4, PAGE_SIZE);
770 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
771 info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
772 info->fw = adev->gfx.rlc_fw;
774 header = (const struct common_firmware_header *)info->fw->data;
775 adev->firmware.fw_size +=
776 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
778 if (adev->gfx.rlc.save_restore_list_gpm_size_bytes &&
779 adev->gfx.rlc.save_restore_list_srm_size_bytes) {
780 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
781 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
782 info->fw = adev->gfx.rlc_fw;
783 adev->firmware.fw_size +=
784 ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
786 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
787 info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
788 info->fw = adev->gfx.rlc_fw;
789 adev->firmware.fw_size +=
790 ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
793 if (adev->gfx.rlc.rlc_iram_ucode_size_bytes &&
794 adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
795 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
796 info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
797 info->fw = adev->gfx.rlc_fw;
798 adev->firmware.fw_size +=
799 ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
801 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
802 info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
803 info->fw = adev->gfx.rlc_fw;
804 adev->firmware.fw_size +=
805 ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
808 if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
809 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
810 info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
811 info->fw = adev->gfx.rlc_fw;
812 adev->firmware.fw_size +=
813 ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
816 if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
817 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
818 info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
819 info->fw = adev->gfx.rlc_fw;
820 adev->firmware.fw_size +=
821 ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
828 "gfx11: Failed to load firmware \"%s\"\n",
830 release_firmware(adev->gfx.pfp_fw);
831 adev->gfx.pfp_fw = NULL;
832 release_firmware(adev->gfx.me_fw);
833 adev->gfx.me_fw = NULL;
834 release_firmware(adev->gfx.rlc_fw);
835 adev->gfx.rlc_fw = NULL;
836 release_firmware(adev->gfx.mec_fw);
837 adev->gfx.mec_fw = NULL;
843 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev)
845 const struct psp_firmware_header_v1_0 *toc_hdr;
848 char ucode_prefix[30];
850 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
852 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix);
853 err = request_firmware(&adev->psp.toc_fw, fw_name, adev->dev);
857 err = amdgpu_ucode_validate(adev->psp.toc_fw);
861 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
862 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
863 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
864 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
865 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
866 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
869 dev_err(adev->dev, "Failed to load TOC microcode\n");
870 release_firmware(adev->psp.toc_fw);
871 adev->psp.toc_fw = NULL;
875 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
878 const struct cs_section_def *sect = NULL;
879 const struct cs_extent_def *ext = NULL;
881 /* begin clear state */
883 /* context control state */
886 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
887 for (ext = sect->section; ext->extent != NULL; ++ext) {
888 if (sect->id == SECT_CONTEXT)
889 count += 2 + ext->reg_count;
895 /* set PA_SC_TILE_STEERING_OVERRIDE */
897 /* end clear state */
905 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
906 volatile u32 *buffer)
909 const struct cs_section_def *sect = NULL;
910 const struct cs_extent_def *ext = NULL;
913 if (adev->gfx.rlc.cs_data == NULL)
918 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
919 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
921 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
922 buffer[count++] = cpu_to_le32(0x80000000);
923 buffer[count++] = cpu_to_le32(0x80000000);
925 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
926 for (ext = sect->section; ext->extent != NULL; ++ext) {
927 if (sect->id == SECT_CONTEXT) {
929 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
930 buffer[count++] = cpu_to_le32(ext->reg_index -
931 PACKET3_SET_CONTEXT_REG_START);
932 for (i = 0; i < ext->reg_count; i++)
933 buffer[count++] = cpu_to_le32(ext->extent[i]);
941 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
942 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
943 buffer[count++] = cpu_to_le32(ctx_reg_offset);
944 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
946 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
947 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
949 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
950 buffer[count++] = cpu_to_le32(0);
953 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
955 /* clear state block */
956 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
957 &adev->gfx.rlc.clear_state_gpu_addr,
958 (void **)&adev->gfx.rlc.cs_ptr);
960 /* jump table block */
961 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
962 &adev->gfx.rlc.cp_table_gpu_addr,
963 (void **)&adev->gfx.rlc.cp_table_ptr);
966 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
968 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
970 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
971 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
972 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1);
973 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2);
974 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3);
975 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL);
976 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX);
977 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0);
978 adev->gfx.rlc.rlcg_reg_access_supported = true;
981 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev)
983 const struct cs_section_def *cs_data;
986 adev->gfx.rlc.cs_data = gfx11_cs_data;
988 cs_data = adev->gfx.rlc.cs_data;
991 /* init clear state block */
992 r = amdgpu_gfx_rlc_init_csb(adev);
997 /* init spm vmid with 0xf */
998 if (adev->gfx.rlc.funcs->update_spm_vmid)
999 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1004 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev)
1006 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1007 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
1008 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
1011 static int gfx_v11_0_me_init(struct amdgpu_device *adev)
1015 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
1017 amdgpu_gfx_graphics_queue_acquire(adev);
1019 r = gfx_v11_0_init_microcode(adev);
1021 DRM_ERROR("Failed to load gfx firmware!\n");
1026 static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
1030 size_t mec_hpd_size;
1032 bitmap_zero(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1034 /* take ownership of the relevant compute queues */
1035 amdgpu_gfx_compute_queue_acquire(adev);
1036 mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE;
1039 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
1040 AMDGPU_GEM_DOMAIN_GTT,
1041 &adev->gfx.mec.hpd_eop_obj,
1042 &adev->gfx.mec.hpd_eop_gpu_addr,
1045 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
1046 gfx_v11_0_mec_fini(adev);
1050 memset(hpd, 0, mec_hpd_size);
1052 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1053 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1059 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
1061 WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
1062 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1063 (address << SQ_IND_INDEX__INDEX__SHIFT));
1064 return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
1067 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
1068 uint32_t thread, uint32_t regno,
1069 uint32_t num, uint32_t *out)
1071 WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
1072 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
1073 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
1074 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
1075 (SQ_IND_INDEX__AUTO_INCR_MASK));
1077 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA);
1080 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
1082 /* in gfx11 the SIMD_ID is specified as part of the INSTANCE
1083 * field when performing a select_se_sh so it should be
1087 /* type 2 wave data */
1088 dst[(*no_fields)++] = 2;
1089 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
1090 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
1091 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
1092 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
1093 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
1094 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
1095 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
1096 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
1097 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
1098 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
1099 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
1100 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
1101 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
1102 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
1103 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
1106 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
1107 uint32_t wave, uint32_t start,
1108 uint32_t size, uint32_t *dst)
1113 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
1117 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
1118 uint32_t wave, uint32_t thread,
1119 uint32_t start, uint32_t size,
1124 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
1127 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
1128 u32 me, u32 pipe, u32 q, u32 vm)
1130 soc21_grbm_select(adev, me, pipe, q, vm);
1133 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
1134 .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter,
1135 .select_se_sh = &gfx_v11_0_select_se_sh,
1136 .read_wave_data = &gfx_v11_0_read_wave_data,
1137 .read_wave_sgprs = &gfx_v11_0_read_wave_sgprs,
1138 .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
1139 .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
1140 .init_spm_golden = &gfx_v11_0_init_spm_golden_registers,
1143 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
1145 adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
1147 switch (adev->ip_versions[GC_HWIP][0]) {
1148 case IP_VERSION(11, 0, 0):
1149 case IP_VERSION(11, 0, 2):
1150 adev->gfx.config.max_hw_contexts = 8;
1151 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1152 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1153 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
1154 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
1156 case IP_VERSION(11, 0, 1):
1157 adev->gfx.config.max_hw_contexts = 8;
1158 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1159 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1160 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
1161 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300;
1171 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
1172 int me, int pipe, int queue)
1175 struct amdgpu_ring *ring;
1176 unsigned int irq_type;
1178 ring = &adev->gfx.gfx_ring[ring_id];
1182 ring->queue = queue;
1184 ring->ring_obj = NULL;
1185 ring->use_doorbell = true;
1188 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
1190 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
1191 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1193 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
1194 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1195 AMDGPU_RING_PRIO_DEFAULT, NULL);
1201 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
1202 int mec, int pipe, int queue)
1206 struct amdgpu_ring *ring;
1207 unsigned int hw_prio;
1209 ring = &adev->gfx.compute_ring[ring_id];
1214 ring->queue = queue;
1216 ring->ring_obj = NULL;
1217 ring->use_doorbell = true;
1218 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
1219 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1220 + (ring_id * GFX11_MEC_HPD_SIZE);
1221 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
1223 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
1224 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1226 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
1227 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
1228 /* type-2 packets are deprecated on MEC, use type-3 instead */
1229 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1238 SOC21_FIRMWARE_ID id;
1239 unsigned int offset;
1241 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX];
1243 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
1245 RLC_TABLE_OF_CONTENT *ucode = rlc_toc;
1247 while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) &&
1248 (ucode->id < SOC21_FIRMWARE_ID_MAX)) {
1249 rlc_autoload_info[ucode->id].id = ucode->id;
1250 rlc_autoload_info[ucode->id].offset = ucode->offset * 4;
1251 rlc_autoload_info[ucode->id].size = ucode->size * 4;
1257 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev)
1259 uint32_t total_size = 0;
1260 SOC21_FIRMWARE_ID id;
1262 gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr);
1264 for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++)
1265 total_size += rlc_autoload_info[id].size;
1267 /* In case the offset in rlc toc ucode is aligned */
1268 if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset)
1269 total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset +
1270 rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size;
1275 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
1278 uint32_t total_size;
1280 total_size = gfx_v11_0_calc_toc_total_size(adev);
1282 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
1283 AMDGPU_GEM_DOMAIN_VRAM,
1284 &adev->gfx.rlc.rlc_autoload_bo,
1285 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1286 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1289 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1296 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
1297 SOC21_FIRMWARE_ID id,
1298 const void *fw_data,
1300 uint32_t *fw_autoload_mask)
1302 uint32_t toc_offset;
1303 uint32_t toc_fw_size;
1304 char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
1306 if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX)
1309 toc_offset = rlc_autoload_info[id].offset;
1310 toc_fw_size = rlc_autoload_info[id].size;
1313 fw_size = toc_fw_size;
1315 if (fw_size > toc_fw_size)
1316 fw_size = toc_fw_size;
1318 memcpy(ptr + toc_offset, fw_data, fw_size);
1320 if (fw_size < toc_fw_size)
1321 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
1323 if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME))
1324 *(uint64_t *)fw_autoload_mask |= 1ULL << id;
1327 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev,
1328 uint32_t *fw_autoload_mask)
1334 *(uint64_t *)fw_autoload_mask |= 0x1;
1336 DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask);
1338 data = adev->psp.toc.start_addr;
1339 size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size;
1341 toc_ptr = (uint64_t *)data + size / 8 - 1;
1342 *toc_ptr = *(uint64_t *)fw_autoload_mask;
1344 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC,
1345 data, size, fw_autoload_mask);
1348 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev,
1349 uint32_t *fw_autoload_mask)
1351 const __le32 *fw_data;
1353 const struct gfx_firmware_header_v1_0 *cp_hdr;
1354 const struct gfx_firmware_header_v2_0 *cpv2_hdr;
1355 const struct rlc_firmware_header_v2_0 *rlc_hdr;
1356 const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
1357 uint16_t version_major, version_minor;
1359 if (adev->gfx.rs64_enable) {
1361 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1362 adev->gfx.pfp_fw->data;
1364 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1365 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1366 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1367 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP,
1368 fw_data, fw_size, fw_autoload_mask);
1370 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1371 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1372 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1373 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK,
1374 fw_data, fw_size, fw_autoload_mask);
1375 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK,
1376 fw_data, fw_size, fw_autoload_mask);
1378 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1379 adev->gfx.me_fw->data;
1381 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1382 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1383 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1384 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME,
1385 fw_data, fw_size, fw_autoload_mask);
1387 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1388 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1389 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1390 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK,
1391 fw_data, fw_size, fw_autoload_mask);
1392 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK,
1393 fw_data, fw_size, fw_autoload_mask);
1395 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1396 adev->gfx.mec_fw->data;
1398 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1399 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1400 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1401 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC,
1402 fw_data, fw_size, fw_autoload_mask);
1404 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1405 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1406 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1407 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK,
1408 fw_data, fw_size, fw_autoload_mask);
1409 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK,
1410 fw_data, fw_size, fw_autoload_mask);
1411 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK,
1412 fw_data, fw_size, fw_autoload_mask);
1413 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK,
1414 fw_data, fw_size, fw_autoload_mask);
1417 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1418 adev->gfx.pfp_fw->data;
1419 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1420 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1421 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1422 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP,
1423 fw_data, fw_size, fw_autoload_mask);
1426 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1427 adev->gfx.me_fw->data;
1428 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1429 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1430 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1431 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME,
1432 fw_data, fw_size, fw_autoload_mask);
1435 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1436 adev->gfx.mec_fw->data;
1437 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1438 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1439 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1440 cp_hdr->jt_size * 4;
1441 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC,
1442 fw_data, fw_size, fw_autoload_mask);
1446 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
1447 adev->gfx.rlc_fw->data;
1448 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1449 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
1450 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
1451 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE,
1452 fw_data, fw_size, fw_autoload_mask);
1454 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1455 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1456 if (version_major == 2) {
1457 if (version_minor >= 2) {
1458 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1460 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1461 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1462 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1463 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE,
1464 fw_data, fw_size, fw_autoload_mask);
1466 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1467 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1468 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1469 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT,
1470 fw_data, fw_size, fw_autoload_mask);
1475 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev,
1476 uint32_t *fw_autoload_mask)
1478 const __le32 *fw_data;
1480 const struct sdma_firmware_header_v2_0 *sdma_hdr;
1482 sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
1483 adev->sdma.instance[0].fw->data;
1484 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1485 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
1486 fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes);
1488 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1489 SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask);
1491 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1492 le32_to_cpu(sdma_hdr->ctl_ucode_offset));
1493 fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes);
1495 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1496 SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask);
1499 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev,
1500 uint32_t *fw_autoload_mask)
1502 const __le32 *fw_data;
1504 const struct mes_firmware_header_v1_0 *mes_hdr;
1505 int pipe, ucode_id, data_id;
1507 for (pipe = 0; pipe < 2; pipe++) {
1509 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0;
1510 data_id = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK;
1512 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1;
1513 data_id = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK;
1516 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1517 adev->mes.fw[pipe]->data;
1519 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1520 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1521 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1523 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1524 ucode_id, fw_data, fw_size, fw_autoload_mask);
1526 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1527 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1528 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1530 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1531 data_id, fw_data, fw_size, fw_autoload_mask);
1535 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1537 uint32_t rlc_g_offset, rlc_g_size;
1539 uint32_t autoload_fw_id[2];
1541 memset(autoload_fw_id, 0, sizeof(uint32_t) * 2);
1543 /* RLC autoload sequence 2: copy ucode */
1544 gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id);
1545 gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id);
1546 gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id);
1547 gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id);
1549 rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset;
1550 rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size;
1551 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
1553 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr));
1554 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr));
1556 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size);
1558 /* RLC autoload sequence 3: load IMU fw */
1559 if (adev->gfx.imu.funcs->load_microcode)
1560 adev->gfx.imu.funcs->load_microcode(adev);
1561 /* RLC autoload sequence 4 init IMU fw */
1562 if (adev->gfx.imu.funcs->setup_imu)
1563 adev->gfx.imu.funcs->setup_imu(adev);
1564 if (adev->gfx.imu.funcs->start_imu)
1565 adev->gfx.imu.funcs->start_imu(adev);
1567 /* RLC autoload sequence 5 disable gpa mode */
1568 gfx_v11_0_disable_gpa_mode(adev);
1573 static int gfx_v11_0_sw_init(void *handle)
1575 int i, j, k, r, ring_id = 0;
1576 struct amdgpu_kiq *kiq;
1577 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1579 adev->gfxhub.funcs->init(adev);
1581 switch (adev->ip_versions[GC_HWIP][0]) {
1582 case IP_VERSION(11, 0, 0):
1583 case IP_VERSION(11, 0, 1):
1584 case IP_VERSION(11, 0, 2):
1585 adev->gfx.me.num_me = 1;
1586 adev->gfx.me.num_pipe_per_me = 1;
1587 adev->gfx.me.num_queue_per_pipe = 1;
1588 adev->gfx.mec.num_mec = 2;
1589 adev->gfx.mec.num_pipe_per_mec = 4;
1590 adev->gfx.mec.num_queue_per_pipe = 4;
1593 adev->gfx.me.num_me = 1;
1594 adev->gfx.me.num_pipe_per_me = 1;
1595 adev->gfx.me.num_queue_per_pipe = 1;
1596 adev->gfx.mec.num_mec = 1;
1597 adev->gfx.mec.num_pipe_per_mec = 4;
1598 adev->gfx.mec.num_queue_per_pipe = 8;
1603 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1604 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
1605 &adev->gfx.eop_irq);
1609 /* Privileged reg */
1610 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1611 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
1612 &adev->gfx.priv_reg_irq);
1616 /* Privileged inst */
1617 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1618 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
1619 &adev->gfx.priv_inst_irq);
1623 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1625 if (adev->gfx.imu.funcs) {
1626 if (adev->gfx.imu.funcs->init_microcode) {
1627 r = adev->gfx.imu.funcs->init_microcode(adev);
1629 DRM_ERROR("Failed to load imu firmware!\n");
1633 r = gfx_v11_0_me_init(adev);
1637 r = gfx_v11_0_rlc_init(adev);
1639 DRM_ERROR("Failed to init rlc BOs!\n");
1643 r = gfx_v11_0_mec_init(adev);
1645 DRM_ERROR("Failed to init MEC BOs!\n");
1649 /* set up the gfx ring */
1650 for (i = 0; i < adev->gfx.me.num_me; i++) {
1651 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1652 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1653 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1656 r = gfx_v11_0_gfx_ring_init(adev, ring_id,
1666 /* set up the compute queues - allocate horizontally across pipes */
1667 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1668 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1669 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1670 if (!amdgpu_gfx_is_mec_queue_enabled(adev, i, k,
1674 r = gfx_v11_0_compute_ring_init(adev, ring_id,
1684 if (!adev->enable_mes_kiq) {
1685 r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE);
1687 DRM_ERROR("Failed to init KIQ BOs!\n");
1691 kiq = &adev->gfx.kiq;
1692 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq);
1697 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd));
1701 /* allocate visible FB for rlc auto-loading fw */
1702 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1703 r = gfx_v11_0_init_toc_microcode(adev);
1705 dev_err(adev->dev, "Failed to load toc firmware!\n");
1706 r = gfx_v11_0_rlc_autoload_buffer_init(adev);
1711 r = gfx_v11_0_gpu_early_init(adev);
1718 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev)
1720 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1721 &adev->gfx.pfp.pfp_fw_gpu_addr,
1722 (void **)&adev->gfx.pfp.pfp_fw_ptr);
1724 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj,
1725 &adev->gfx.pfp.pfp_fw_data_gpu_addr,
1726 (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
1729 static void gfx_v11_0_me_fini(struct amdgpu_device *adev)
1731 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1732 &adev->gfx.me.me_fw_gpu_addr,
1733 (void **)&adev->gfx.me.me_fw_ptr);
1735 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj,
1736 &adev->gfx.me.me_fw_data_gpu_addr,
1737 (void **)&adev->gfx.me.me_fw_data_ptr);
1740 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1742 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1743 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1744 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1747 static int gfx_v11_0_sw_fini(void *handle)
1750 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1752 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1753 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1754 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1755 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1757 amdgpu_gfx_mqd_sw_fini(adev);
1759 if (!adev->enable_mes_kiq) {
1760 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring);
1761 amdgpu_gfx_kiq_fini(adev);
1764 gfx_v11_0_pfp_fini(adev);
1765 gfx_v11_0_me_fini(adev);
1766 gfx_v11_0_rlc_fini(adev);
1767 gfx_v11_0_mec_fini(adev);
1769 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1770 gfx_v11_0_rlc_autoload_buffer_fini(adev);
1772 gfx_v11_0_free_microcode(adev);
1777 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1778 u32 sh_num, u32 instance)
1782 if (instance == 0xffffffff)
1783 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1784 INSTANCE_BROADCAST_WRITES, 1);
1786 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1789 if (se_num == 0xffffffff)
1790 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1793 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1795 if (sh_num == 0xffffffff)
1796 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1799 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1801 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
1804 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1808 data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
1809 data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
1811 data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
1812 data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
1814 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
1815 adev->gfx.config.max_sh_per_se);
1817 return (~data) & mask;
1820 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
1825 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
1826 adev->gfx.config.max_sh_per_se;
1828 mutex_lock(&adev->grbm_idx_mutex);
1829 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1830 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1831 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff);
1832 data = gfx_v11_0_get_rb_active_bitmap(adev);
1833 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1834 rb_bitmap_width_per_sh);
1837 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1838 mutex_unlock(&adev->grbm_idx_mutex);
1840 adev->gfx.config.backend_enable_mask = active_rbs;
1841 adev->gfx.config.num_rbs = hweight32(active_rbs);
1844 #define DEFAULT_SH_MEM_BASES (0x6000)
1845 #define LDS_APP_BASE 0x1
1846 #define SCRATCH_APP_BASE 0x2
1848 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
1851 uint32_t sh_mem_bases;
1855 * Configure apertures:
1856 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1857 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1858 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1860 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) |
1863 mutex_lock(&adev->srbm_mutex);
1864 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1865 soc21_grbm_select(adev, 0, 0, 0, i);
1866 /* CP and shaders */
1867 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1868 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
1870 /* Enable trap for each kfd vmid. */
1871 data = RREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL));
1872 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1874 soc21_grbm_select(adev, 0, 0, 0, 0);
1875 mutex_unlock(&adev->srbm_mutex);
1877 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1878 acccess. These should be enabled by FW for target VMIDs. */
1879 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1880 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0);
1881 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0);
1882 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0);
1883 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0);
1887 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev)
1892 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1893 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1894 * the driver can enable them for graphics. VMID0 should maintain
1895 * access so that HWS firmware can save/restore entries.
1897 for (vmid = 1; vmid < 16; vmid++) {
1898 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0);
1899 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0);
1900 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0);
1901 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0);
1905 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev)
1907 /* TODO: harvest feature to be added later. */
1910 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev)
1912 /* TCCs are global (not instanced). */
1913 uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) |
1914 RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE);
1916 adev->gfx.config.tcc_disabled_mask =
1917 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
1918 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
1921 static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
1926 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1928 gfx_v11_0_setup_rb(adev);
1929 gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info);
1930 gfx_v11_0_get_tcc_info(adev);
1931 adev->gfx.config.pa_sc_tile_steering_override = 0;
1933 /* XXX SH_MEM regs */
1934 /* where to put LDS, scratch, GPUVM in FSA64 space */
1935 mutex_lock(&adev->srbm_mutex);
1936 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids; i++) {
1937 soc21_grbm_select(adev, 0, 0, 0, i);
1938 /* CP and shaders */
1939 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1941 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1942 (adev->gmc.private_aperture_start >> 48));
1943 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1944 (adev->gmc.shared_aperture_start >> 48));
1945 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp);
1948 soc21_grbm_select(adev, 0, 0, 0, 0);
1950 mutex_unlock(&adev->srbm_mutex);
1952 gfx_v11_0_init_compute_vmid(adev);
1953 gfx_v11_0_init_gds_vmid(adev);
1956 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1961 if (amdgpu_sriov_vf(adev))
1964 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
1966 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1968 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1970 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1972 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1975 WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
1978 static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
1980 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1982 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI,
1983 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1984 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO,
1985 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1986 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1991 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
1993 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL);
1995 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1996 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp);
1999 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev)
2001 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2003 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
2007 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
2010 uint32_t rlc_pg_cntl;
2012 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
2015 /* RLC_PG_CNTL[23] = 0 (default)
2016 * RLC will wait for handshake acks with SMU
2017 * GFXOFF will be enabled
2018 * RLC_PG_CNTL[23] = 1
2019 * RLC will not issue any message to SMU
2020 * hence no handshake between SMU & RLC
2021 * GFXOFF will be disabled
2023 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
2025 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
2026 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl);
2029 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev)
2031 /* TODO: enable rlc & smu handshake until smu
2032 * and gfxoff feature works as expected */
2033 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
2034 gfx_v11_0_rlc_smu_handshake_cntl(adev, false);
2036 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
2040 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev)
2044 /* enable Save Restore Machine */
2045 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL));
2046 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
2047 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
2048 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp);
2051 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev)
2053 const struct rlc_firmware_header_v2_0 *hdr;
2054 const __le32 *fw_data;
2055 unsigned i, fw_size;
2057 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2058 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2059 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2060 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
2062 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR,
2063 RLCG_UCODE_LOADING_START_ADDRESS);
2065 for (i = 0; i < fw_size; i++)
2066 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA,
2067 le32_to_cpup(fw_data++));
2069 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
2072 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev)
2074 const struct rlc_firmware_header_v2_2 *hdr;
2075 const __le32 *fw_data;
2076 unsigned i, fw_size;
2079 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
2081 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2082 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
2083 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
2085 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0);
2087 for (i = 0; i < fw_size; i++) {
2088 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2090 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA,
2091 le32_to_cpup(fw_data++));
2094 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
2096 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2097 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
2098 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
2100 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0);
2101 for (i = 0; i < fw_size; i++) {
2102 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2104 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA,
2105 le32_to_cpup(fw_data++));
2108 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
2110 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL);
2111 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
2112 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
2113 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp);
2116 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev)
2118 const struct rlc_firmware_header_v2_3 *hdr;
2119 const __le32 *fw_data;
2120 unsigned i, fw_size;
2123 hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
2125 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2126 le32_to_cpu(hdr->rlcp_ucode_offset_bytes));
2127 fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4;
2129 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0);
2131 for (i = 0; i < fw_size; i++) {
2132 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2134 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA,
2135 le32_to_cpup(fw_data++));
2138 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version);
2140 tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE);
2141 tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
2142 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp);
2144 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2145 le32_to_cpu(hdr->rlcv_ucode_offset_bytes));
2146 fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4;
2148 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0);
2150 for (i = 0; i < fw_size; i++) {
2151 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
2153 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA,
2154 le32_to_cpup(fw_data++));
2157 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version);
2159 tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL);
2160 tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1);
2161 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp);
2164 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
2166 const struct rlc_firmware_header_v2_0 *hdr;
2167 uint16_t version_major;
2168 uint16_t version_minor;
2170 if (!adev->gfx.rlc_fw)
2173 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2174 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2176 version_major = le16_to_cpu(hdr->header.header_version_major);
2177 version_minor = le16_to_cpu(hdr->header.header_version_minor);
2179 if (version_major == 2) {
2180 gfx_v11_0_load_rlcg_microcode(adev);
2181 if (amdgpu_dpm == 1) {
2182 if (version_minor >= 2)
2183 gfx_v11_0_load_rlc_iram_dram_microcode(adev);
2184 if (version_minor == 3)
2185 gfx_v11_0_load_rlcp_rlcv_microcode(adev);
2194 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev)
2198 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2199 gfx_v11_0_init_csb(adev);
2201 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
2202 gfx_v11_0_rlc_enable_srm(adev);
2204 if (amdgpu_sriov_vf(adev)) {
2205 gfx_v11_0_init_csb(adev);
2209 adev->gfx.rlc.funcs->stop(adev);
2212 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0);
2215 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0);
2217 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
2218 /* legacy rlc firmware loading */
2219 r = gfx_v11_0_rlc_load_microcode(adev);
2224 gfx_v11_0_init_csb(adev);
2226 adev->gfx.rlc.funcs->start(adev);
2231 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
2233 uint32_t usec_timeout = 50000; /* wait for 50ms */
2237 /* Trigger an invalidation of the L1 instruction caches */
2238 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2239 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2240 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2242 /* Wait for invalidation complete */
2243 for (i = 0; i < usec_timeout; i++) {
2244 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2245 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2246 INVALIDATE_CACHE_COMPLETE))
2251 if (i >= usec_timeout) {
2252 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2256 if (amdgpu_emu_mode == 1)
2257 adev->hdp.funcs->flush_hdp(adev, NULL);
2259 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2260 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2261 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2262 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2263 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2264 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2266 /* Program me ucode address into intruction cache address register */
2267 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2268 lower_32_bits(addr) & 0xFFFFF000);
2269 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2270 upper_32_bits(addr));
2275 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
2277 uint32_t usec_timeout = 50000; /* wait for 50ms */
2281 /* Trigger an invalidation of the L1 instruction caches */
2282 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2283 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2284 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2286 /* Wait for invalidation complete */
2287 for (i = 0; i < usec_timeout; i++) {
2288 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2289 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2290 INVALIDATE_CACHE_COMPLETE))
2295 if (i >= usec_timeout) {
2296 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2300 if (amdgpu_emu_mode == 1)
2301 adev->hdp.funcs->flush_hdp(adev, NULL);
2303 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2304 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2305 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2306 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2307 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2308 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2310 /* Program pfp ucode address into intruction cache address register */
2311 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2312 lower_32_bits(addr) & 0xFFFFF000);
2313 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2314 upper_32_bits(addr));
2319 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
2321 uint32_t usec_timeout = 50000; /* wait for 50ms */
2325 /* Trigger an invalidation of the L1 instruction caches */
2326 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2327 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2329 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2331 /* Wait for invalidation complete */
2332 for (i = 0; i < usec_timeout; i++) {
2333 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2334 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2335 INVALIDATE_CACHE_COMPLETE))
2340 if (i >= usec_timeout) {
2341 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2345 if (amdgpu_emu_mode == 1)
2346 adev->hdp.funcs->flush_hdp(adev, NULL);
2348 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2349 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2350 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2351 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2352 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2354 /* Program mec1 ucode address into intruction cache address register */
2355 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO,
2356 lower_32_bits(addr) & 0xFFFFF000);
2357 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2358 upper_32_bits(addr));
2363 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2365 uint32_t usec_timeout = 50000; /* wait for 50ms */
2367 unsigned i, pipe_id;
2368 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2370 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2371 adev->gfx.pfp_fw->data;
2373 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2374 lower_32_bits(addr));
2375 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2376 upper_32_bits(addr));
2378 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2379 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2380 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2381 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2382 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2385 * Programming any of the CP_PFP_IC_BASE registers
2386 * forces invalidation of the ME L1 I$. Wait for the
2387 * invalidation complete
2389 for (i = 0; i < usec_timeout; i++) {
2390 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2391 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2392 INVALIDATE_CACHE_COMPLETE))
2397 if (i >= usec_timeout) {
2398 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2402 /* Prime the L1 instruction caches */
2403 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2404 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2405 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2406 /* Waiting for cache primed*/
2407 for (i = 0; i < usec_timeout; i++) {
2408 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2409 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2415 if (i >= usec_timeout) {
2416 dev_err(adev->dev, "failed to prime instruction cache\n");
2420 mutex_lock(&adev->srbm_mutex);
2421 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2422 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2423 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2424 (pfp_hdr->ucode_start_addr_hi << 30) |
2425 (pfp_hdr->ucode_start_addr_lo >> 2));
2426 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2427 pfp_hdr->ucode_start_addr_hi >> 2);
2430 * Program CP_ME_CNTL to reset given PIPE to take
2431 * effect of CP_PFP_PRGRM_CNTR_START.
2433 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2435 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2436 PFP_PIPE0_RESET, 1);
2438 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2439 PFP_PIPE1_RESET, 1);
2440 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2442 /* Clear pfp pipe0 reset bit. */
2444 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2445 PFP_PIPE0_RESET, 0);
2447 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2448 PFP_PIPE1_RESET, 0);
2449 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2451 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2452 lower_32_bits(addr2));
2453 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2454 upper_32_bits(addr2));
2456 soc21_grbm_select(adev, 0, 0, 0, 0);
2457 mutex_unlock(&adev->srbm_mutex);
2459 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2460 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2461 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2462 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2464 /* Invalidate the data caches */
2465 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2466 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2467 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2469 for (i = 0; i < usec_timeout; i++) {
2470 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2471 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2472 INVALIDATE_DCACHE_COMPLETE))
2477 if (i >= usec_timeout) {
2478 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2485 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2487 uint32_t usec_timeout = 50000; /* wait for 50ms */
2489 unsigned i, pipe_id;
2490 const struct gfx_firmware_header_v2_0 *me_hdr;
2492 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2493 adev->gfx.me_fw->data;
2495 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2496 lower_32_bits(addr));
2497 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2498 upper_32_bits(addr));
2500 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2501 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2502 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2503 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2504 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2507 * Programming any of the CP_ME_IC_BASE registers
2508 * forces invalidation of the ME L1 I$. Wait for the
2509 * invalidation complete
2511 for (i = 0; i < usec_timeout; i++) {
2512 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2513 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2514 INVALIDATE_CACHE_COMPLETE))
2519 if (i >= usec_timeout) {
2520 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2524 /* Prime the instruction caches */
2525 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2526 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
2527 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2529 /* Waiting for instruction cache primed*/
2530 for (i = 0; i < usec_timeout; i++) {
2531 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2532 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2538 if (i >= usec_timeout) {
2539 dev_err(adev->dev, "failed to prime instruction cache\n");
2543 mutex_lock(&adev->srbm_mutex);
2544 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2545 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2546 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2547 (me_hdr->ucode_start_addr_hi << 30) |
2548 (me_hdr->ucode_start_addr_lo >> 2) );
2549 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2550 me_hdr->ucode_start_addr_hi>>2);
2553 * Program CP_ME_CNTL to reset given PIPE to take
2554 * effect of CP_PFP_PRGRM_CNTR_START.
2556 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2558 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2561 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2563 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2565 /* Clear pfp pipe0 reset bit. */
2567 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2570 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2572 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2574 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
2575 lower_32_bits(addr2));
2576 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
2577 upper_32_bits(addr2));
2579 soc21_grbm_select(adev, 0, 0, 0, 0);
2580 mutex_unlock(&adev->srbm_mutex);
2582 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2583 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2584 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2585 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2587 /* Invalidate the data caches */
2588 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2589 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2590 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2592 for (i = 0; i < usec_timeout; i++) {
2593 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2594 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2595 INVALIDATE_DCACHE_COMPLETE))
2600 if (i >= usec_timeout) {
2601 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2608 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2610 uint32_t usec_timeout = 50000; /* wait for 50ms */
2613 const struct gfx_firmware_header_v2_0 *mec_hdr;
2615 mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2616 adev->gfx.mec_fw->data;
2618 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2619 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2620 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2621 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2622 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2624 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
2625 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
2626 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
2627 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
2629 mutex_lock(&adev->srbm_mutex);
2630 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2631 soc21_grbm_select(adev, 1, i, 0, 0);
2633 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2);
2634 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
2635 upper_32_bits(addr2));
2637 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2638 mec_hdr->ucode_start_addr_lo >> 2 |
2639 mec_hdr->ucode_start_addr_hi << 30);
2640 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2641 mec_hdr->ucode_start_addr_hi >> 2);
2643 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr);
2644 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2645 upper_32_bits(addr));
2647 mutex_unlock(&adev->srbm_mutex);
2648 soc21_grbm_select(adev, 0, 0, 0, 0);
2650 /* Trigger an invalidation of the L1 instruction caches */
2651 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2652 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2653 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
2655 /* Wait for invalidation complete */
2656 for (i = 0; i < usec_timeout; i++) {
2657 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2658 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2659 INVALIDATE_DCACHE_COMPLETE))
2664 if (i >= usec_timeout) {
2665 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2669 /* Trigger an invalidation of the L1 instruction caches */
2670 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2671 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2672 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2674 /* Wait for invalidation complete */
2675 for (i = 0; i < usec_timeout; i++) {
2676 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2677 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2678 INVALIDATE_CACHE_COMPLETE))
2683 if (i >= usec_timeout) {
2684 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2691 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev)
2693 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2694 const struct gfx_firmware_header_v2_0 *me_hdr;
2695 const struct gfx_firmware_header_v2_0 *mec_hdr;
2696 uint32_t pipe_id, tmp;
2698 mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2699 adev->gfx.mec_fw->data;
2700 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2701 adev->gfx.me_fw->data;
2702 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2703 adev->gfx.pfp_fw->data;
2705 /* config pfp program start addr */
2706 for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2707 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2708 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2709 (pfp_hdr->ucode_start_addr_hi << 30) |
2710 (pfp_hdr->ucode_start_addr_lo >> 2));
2711 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2712 pfp_hdr->ucode_start_addr_hi >> 2);
2714 soc21_grbm_select(adev, 0, 0, 0, 0);
2716 /* reset pfp pipe */
2717 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2718 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1);
2719 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1);
2720 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2722 /* clear pfp pipe reset */
2723 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0);
2724 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0);
2725 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2727 /* config me program start addr */
2728 for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2729 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2730 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2731 (me_hdr->ucode_start_addr_hi << 30) |
2732 (me_hdr->ucode_start_addr_lo >> 2) );
2733 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2734 me_hdr->ucode_start_addr_hi>>2);
2736 soc21_grbm_select(adev, 0, 0, 0, 0);
2739 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2740 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1);
2741 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1);
2742 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2744 /* clear me pipe reset */
2745 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0);
2746 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0);
2747 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2749 /* config mec program start addr */
2750 for (pipe_id = 0; pipe_id < 4; pipe_id++) {
2751 soc21_grbm_select(adev, 1, pipe_id, 0, 0);
2752 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2753 mec_hdr->ucode_start_addr_lo >> 2 |
2754 mec_hdr->ucode_start_addr_hi << 30);
2755 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2756 mec_hdr->ucode_start_addr_hi >> 2);
2758 soc21_grbm_select(adev, 0, 0, 0, 0);
2761 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2764 uint32_t bootload_status;
2766 uint64_t addr, addr2;
2768 for (i = 0; i < adev->usec_timeout; i++) {
2769 cp_status = RREG32_SOC15(GC, 0, regCP_STAT);
2771 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1))
2772 bootload_status = RREG32_SOC15(GC, 0,
2773 regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
2775 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS);
2777 if ((cp_status == 0) &&
2778 (REG_GET_FIELD(bootload_status,
2779 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2785 if (i >= adev->usec_timeout) {
2786 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2790 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2791 if (adev->gfx.rs64_enable) {
2792 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2793 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset;
2794 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2795 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset;
2796 r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2);
2799 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2800 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset;
2801 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2802 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset;
2803 r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2);
2806 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2807 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset;
2808 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2809 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset;
2810 r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2);
2814 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2815 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset;
2816 r = gfx_v11_0_config_me_cache(adev, addr);
2819 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2820 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset;
2821 r = gfx_v11_0_config_pfp_cache(adev, addr);
2824 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2825 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset;
2826 r = gfx_v11_0_config_mec_cache(adev, addr);
2835 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2838 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2840 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2841 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2842 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2844 for (i = 0; i < adev->usec_timeout; i++) {
2845 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0)
2850 if (i >= adev->usec_timeout)
2851 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
2856 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
2859 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2860 const __le32 *fw_data;
2861 unsigned i, fw_size;
2863 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2864 adev->gfx.pfp_fw->data;
2866 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2868 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2869 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2870 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
2872 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
2873 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2874 &adev->gfx.pfp.pfp_fw_obj,
2875 &adev->gfx.pfp.pfp_fw_gpu_addr,
2876 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2878 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
2879 gfx_v11_0_pfp_fini(adev);
2883 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
2885 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2886 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2888 gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr);
2890 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0);
2892 for (i = 0; i < pfp_hdr->jt_size; i++)
2893 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA,
2894 le32_to_cpup(fw_data + pfp_hdr->jt_offset + i));
2896 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2901 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
2904 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2905 const __le32 *fw_ucode, *fw_data;
2906 unsigned i, pipe_id, fw_ucode_size, fw_data_size;
2908 uint32_t usec_timeout = 50000; /* wait for 50ms */
2910 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2911 adev->gfx.pfp_fw->data;
2913 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2916 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data +
2917 le32_to_cpu(pfp_hdr->ucode_offset_bytes));
2918 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes);
2920 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2921 le32_to_cpu(pfp_hdr->data_offset_bytes));
2922 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes);
2925 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
2926 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
2927 &adev->gfx.pfp.pfp_fw_obj,
2928 &adev->gfx.pfp.pfp_fw_gpu_addr,
2929 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2931 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r);
2932 gfx_v11_0_pfp_fini(adev);
2936 r = amdgpu_bo_create_reserved(adev, fw_data_size,
2937 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
2938 &adev->gfx.pfp.pfp_fw_data_obj,
2939 &adev->gfx.pfp.pfp_fw_data_gpu_addr,
2940 (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
2942 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r);
2943 gfx_v11_0_pfp_fini(adev);
2947 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size);
2948 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size);
2950 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2951 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj);
2952 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2953 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
2955 if (amdgpu_emu_mode == 1)
2956 adev->hdp.funcs->flush_hdp(adev, NULL);
2958 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2959 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2960 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2961 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2963 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2964 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2965 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2966 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2967 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2970 * Programming any of the CP_PFP_IC_BASE registers
2971 * forces invalidation of the ME L1 I$. Wait for the
2972 * invalidation complete
2974 for (i = 0; i < usec_timeout; i++) {
2975 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2976 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2977 INVALIDATE_CACHE_COMPLETE))
2982 if (i >= usec_timeout) {
2983 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2987 /* Prime the L1 instruction caches */
2988 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2989 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2990 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2991 /* Waiting for cache primed*/
2992 for (i = 0; i < usec_timeout; i++) {
2993 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2994 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
3000 if (i >= usec_timeout) {
3001 dev_err(adev->dev, "failed to prime instruction cache\n");
3005 mutex_lock(&adev->srbm_mutex);
3006 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
3007 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
3008 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
3009 (pfp_hdr->ucode_start_addr_hi << 30) |
3010 (pfp_hdr->ucode_start_addr_lo >> 2) );
3011 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
3012 pfp_hdr->ucode_start_addr_hi>>2);
3015 * Program CP_ME_CNTL to reset given PIPE to take
3016 * effect of CP_PFP_PRGRM_CNTR_START.
3018 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3020 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3021 PFP_PIPE0_RESET, 1);
3023 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3024 PFP_PIPE1_RESET, 1);
3025 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3027 /* Clear pfp pipe0 reset bit. */
3029 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3030 PFP_PIPE0_RESET, 0);
3032 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3033 PFP_PIPE1_RESET, 0);
3034 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3036 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
3037 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
3038 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
3039 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
3041 soc21_grbm_select(adev, 0, 0, 0, 0);
3042 mutex_unlock(&adev->srbm_mutex);
3044 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3045 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3046 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3047 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3049 /* Invalidate the data caches */
3050 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3051 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3052 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3054 for (i = 0; i < usec_timeout; i++) {
3055 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3056 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3057 INVALIDATE_DCACHE_COMPLETE))
3062 if (i >= usec_timeout) {
3063 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3070 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
3073 const struct gfx_firmware_header_v1_0 *me_hdr;
3074 const __le32 *fw_data;
3075 unsigned i, fw_size;
3077 me_hdr = (const struct gfx_firmware_header_v1_0 *)
3078 adev->gfx.me_fw->data;
3080 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3082 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
3083 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3084 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
3086 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
3087 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3088 &adev->gfx.me.me_fw_obj,
3089 &adev->gfx.me.me_fw_gpu_addr,
3090 (void **)&adev->gfx.me.me_fw_ptr);
3092 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
3093 gfx_v11_0_me_fini(adev);
3097 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
3099 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
3100 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
3102 gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr);
3104 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0);
3106 for (i = 0; i < me_hdr->jt_size; i++)
3107 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA,
3108 le32_to_cpup(fw_data + me_hdr->jt_offset + i));
3110 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version);
3115 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
3118 const struct gfx_firmware_header_v2_0 *me_hdr;
3119 const __le32 *fw_ucode, *fw_data;
3120 unsigned i, pipe_id, fw_ucode_size, fw_data_size;
3122 uint32_t usec_timeout = 50000; /* wait for 50ms */
3124 me_hdr = (const struct gfx_firmware_header_v2_0 *)
3125 adev->gfx.me_fw->data;
3127 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
3130 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data +
3131 le32_to_cpu(me_hdr->ucode_offset_bytes));
3132 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes);
3134 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
3135 le32_to_cpu(me_hdr->data_offset_bytes));
3136 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes);
3139 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3140 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
3141 &adev->gfx.me.me_fw_obj,
3142 &adev->gfx.me.me_fw_gpu_addr,
3143 (void **)&adev->gfx.me.me_fw_ptr);
3145 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r);
3146 gfx_v11_0_me_fini(adev);
3150 r = amdgpu_bo_create_reserved(adev, fw_data_size,
3151 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
3152 &adev->gfx.me.me_fw_data_obj,
3153 &adev->gfx.me.me_fw_data_gpu_addr,
3154 (void **)&adev->gfx.me.me_fw_data_ptr);
3156 dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
3157 gfx_v11_0_pfp_fini(adev);
3161 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size);
3162 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size);
3164 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
3165 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj);
3166 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
3167 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
3169 if (amdgpu_emu_mode == 1)
3170 adev->hdp.funcs->flush_hdp(adev, NULL);
3172 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
3173 lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
3174 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
3175 upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
3177 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
3178 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
3179 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
3180 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
3181 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
3184 * Programming any of the CP_ME_IC_BASE registers
3185 * forces invalidation of the ME L1 I$. Wait for the
3186 * invalidation complete
3188 for (i = 0; i < usec_timeout; i++) {
3189 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3190 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3191 INVALIDATE_CACHE_COMPLETE))
3196 if (i >= usec_timeout) {
3197 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3201 /* Prime the instruction caches */
3202 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3203 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
3204 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
3206 /* Waiting for instruction cache primed*/
3207 for (i = 0; i < usec_timeout; i++) {
3208 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3209 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3215 if (i >= usec_timeout) {
3216 dev_err(adev->dev, "failed to prime instruction cache\n");
3220 mutex_lock(&adev->srbm_mutex);
3221 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
3222 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
3223 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
3224 (me_hdr->ucode_start_addr_hi << 30) |
3225 (me_hdr->ucode_start_addr_lo >> 2) );
3226 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
3227 me_hdr->ucode_start_addr_hi>>2);
3230 * Program CP_ME_CNTL to reset given PIPE to take
3231 * effect of CP_PFP_PRGRM_CNTR_START.
3233 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3235 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3238 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3240 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3242 /* Clear pfp pipe0 reset bit. */
3244 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3247 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3249 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3251 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
3252 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3253 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
3254 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3256 soc21_grbm_select(adev, 0, 0, 0, 0);
3257 mutex_unlock(&adev->srbm_mutex);
3259 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3260 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3261 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3262 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3264 /* Invalidate the data caches */
3265 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3266 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3267 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3269 for (i = 0; i < usec_timeout; i++) {
3270 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3271 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3272 INVALIDATE_DCACHE_COMPLETE))
3277 if (i >= usec_timeout) {
3278 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3285 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3289 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw)
3292 gfx_v11_0_cp_gfx_enable(adev, false);
3294 if (adev->gfx.rs64_enable)
3295 r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev);
3297 r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev);
3299 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
3303 if (adev->gfx.rs64_enable)
3304 r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev);
3306 r = gfx_v11_0_cp_gfx_load_me_microcode(adev);
3308 dev_err(adev->dev, "(%d) failed to load me fw\n", r);
3315 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev)
3317 struct amdgpu_ring *ring;
3318 const struct cs_section_def *sect = NULL;
3319 const struct cs_extent_def *ext = NULL;
3324 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT,
3325 adev->gfx.config.max_hw_contexts - 1);
3326 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1);
3328 if (!amdgpu_async_gfx_ring)
3329 gfx_v11_0_cp_gfx_enable(adev, true);
3331 ring = &adev->gfx.gfx_ring[0];
3332 r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev));
3334 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3338 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3339 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3341 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3342 amdgpu_ring_write(ring, 0x80000000);
3343 amdgpu_ring_write(ring, 0x80000000);
3345 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
3346 for (ext = sect->section; ext->extent != NULL; ++ext) {
3347 if (sect->id == SECT_CONTEXT) {
3348 amdgpu_ring_write(ring,
3349 PACKET3(PACKET3_SET_CONTEXT_REG,
3351 amdgpu_ring_write(ring, ext->reg_index -
3352 PACKET3_SET_CONTEXT_REG_START);
3353 for (i = 0; i < ext->reg_count; i++)
3354 amdgpu_ring_write(ring, ext->extent[i]);
3360 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
3361 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
3362 amdgpu_ring_write(ring, ctx_reg_offset);
3363 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
3365 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3366 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3368 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3369 amdgpu_ring_write(ring, 0);
3371 amdgpu_ring_commit(ring);
3373 /* submit cs packet to copy state 0 to next available state */
3374 if (adev->gfx.num_gfx_rings > 1) {
3375 /* maximum supported gfx ring is 2 */
3376 ring = &adev->gfx.gfx_ring[1];
3377 r = amdgpu_ring_alloc(ring, 2);
3379 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3383 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3384 amdgpu_ring_write(ring, 0);
3386 amdgpu_ring_commit(ring);
3391 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
3396 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
3397 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
3399 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
3402 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
3403 struct amdgpu_ring *ring)
3407 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3408 if (ring->use_doorbell) {
3409 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3410 DOORBELL_OFFSET, ring->doorbell_index);
3411 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3414 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3417 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp);
3419 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3420 DOORBELL_RANGE_LOWER, ring->doorbell_index);
3421 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp);
3423 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3424 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3427 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
3429 struct amdgpu_ring *ring;
3432 u64 rb_addr, rptr_addr, wptr_gpu_addr;
3435 /* Set the write pointer delay */
3436 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
3438 /* set the RB to use vmid 0 */
3439 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0);
3441 /* Init gfx ring 0 for pipe 0 */
3442 mutex_lock(&adev->srbm_mutex);
3443 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3445 /* Set ring buffer size */
3446 ring = &adev->gfx.gfx_ring[0];
3447 rb_bufsz = order_base_2(ring->ring_size / 8);
3448 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3449 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3450 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3452 /* Initialize the ring buffer's write pointers */
3454 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
3455 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3457 /* set the wb address wether it's enabled or not */
3458 rptr_addr = ring->rptr_gpu_addr;
3459 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3460 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3461 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3463 wptr_gpu_addr = ring->wptr_gpu_addr;
3464 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3465 lower_32_bits(wptr_gpu_addr));
3466 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3467 upper_32_bits(wptr_gpu_addr));
3470 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3472 rb_addr = ring->gpu_addr >> 8;
3473 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr);
3474 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3476 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1);
3478 gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3479 mutex_unlock(&adev->srbm_mutex);
3481 /* Init gfx ring 1 for pipe 1 */
3482 if (adev->gfx.num_gfx_rings > 1) {
3483 mutex_lock(&adev->srbm_mutex);
3484 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
3485 /* maximum supported gfx ring is 2 */
3486 ring = &adev->gfx.gfx_ring[1];
3487 rb_bufsz = order_base_2(ring->ring_size / 8);
3488 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
3489 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
3490 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3491 /* Initialize the ring buffer's write pointers */
3493 WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr));
3494 WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
3495 /* Set the wb address wether it's enabled or not */
3496 rptr_addr = ring->rptr_gpu_addr;
3497 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
3498 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3499 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3500 wptr_gpu_addr = ring->wptr_gpu_addr;
3501 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3502 lower_32_bits(wptr_gpu_addr));
3503 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3504 upper_32_bits(wptr_gpu_addr));
3507 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3509 rb_addr = ring->gpu_addr >> 8;
3510 WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr);
3511 WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr));
3512 WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1);
3514 gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3515 mutex_unlock(&adev->srbm_mutex);
3517 /* Switch to pipe 0 */
3518 mutex_lock(&adev->srbm_mutex);
3519 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3520 mutex_unlock(&adev->srbm_mutex);
3522 /* start the ring */
3523 gfx_v11_0_cp_gfx_start(adev);
3525 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3526 ring = &adev->gfx.gfx_ring[i];
3527 ring->sched.ready = true;
3533 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3537 if (adev->gfx.rs64_enable) {
3538 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
3539 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
3541 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
3543 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
3545 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
3547 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
3549 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
3551 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
3553 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
3555 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
3557 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
3559 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data);
3561 data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL);
3564 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0);
3565 if (!adev->enable_mes_kiq)
3566 data = REG_SET_FIELD(data, CP_MEC_CNTL,
3569 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1);
3570 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1);
3572 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
3575 adev->gfx.kiq.ring.sched.ready = enable;
3580 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3582 const struct gfx_firmware_header_v1_0 *mec_hdr;
3583 const __le32 *fw_data;
3584 unsigned i, fw_size;
3588 if (!adev->gfx.mec_fw)
3591 gfx_v11_0_cp_compute_enable(adev, false);
3593 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3594 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3596 fw_data = (const __le32 *)
3597 (adev->gfx.mec_fw->data +
3598 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3599 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
3601 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
3602 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3603 &adev->gfx.mec.mec_fw_obj,
3604 &adev->gfx.mec.mec_fw_gpu_addr,
3607 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
3608 gfx_v11_0_mec_fini(adev);
3612 memcpy(fw, fw_data, fw_size);
3614 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3615 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3617 gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr);
3620 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0);
3622 for (i = 0; i < mec_hdr->jt_size; i++)
3623 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA,
3624 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3626 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
3631 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
3633 const struct gfx_firmware_header_v2_0 *mec_hdr;
3634 const __le32 *fw_ucode, *fw_data;
3635 u32 tmp, fw_ucode_size, fw_data_size;
3636 u32 i, usec_timeout = 50000; /* Wait for 50 ms */
3637 u32 *fw_ucode_ptr, *fw_data_ptr;
3640 if (!adev->gfx.mec_fw)
3643 gfx_v11_0_cp_compute_enable(adev, false);
3645 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
3646 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3648 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
3649 le32_to_cpu(mec_hdr->ucode_offset_bytes));
3650 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
3652 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
3653 le32_to_cpu(mec_hdr->data_offset_bytes));
3654 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
3656 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3657 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
3658 &adev->gfx.mec.mec_fw_obj,
3659 &adev->gfx.mec.mec_fw_gpu_addr,
3660 (void **)&fw_ucode_ptr);
3662 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3663 gfx_v11_0_mec_fini(adev);
3667 r = amdgpu_bo_create_reserved(adev, fw_data_size,
3668 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
3669 &adev->gfx.mec.mec_fw_data_obj,
3670 &adev->gfx.mec.mec_fw_data_gpu_addr,
3671 (void **)&fw_data_ptr);
3673 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3674 gfx_v11_0_mec_fini(adev);
3678 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
3679 memcpy(fw_data_ptr, fw_data, fw_data_size);
3681 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3682 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
3683 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3684 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
3686 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
3687 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3688 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
3689 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3690 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
3692 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
3693 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
3694 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
3695 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
3697 mutex_lock(&adev->srbm_mutex);
3698 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3699 soc21_grbm_select(adev, 1, i, 0, 0);
3701 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr);
3702 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
3703 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr));
3705 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
3706 mec_hdr->ucode_start_addr_lo >> 2 |
3707 mec_hdr->ucode_start_addr_hi << 30);
3708 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
3709 mec_hdr->ucode_start_addr_hi >> 2);
3711 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr);
3712 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
3713 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3715 mutex_unlock(&adev->srbm_mutex);
3716 soc21_grbm_select(adev, 0, 0, 0, 0);
3718 /* Trigger an invalidation of the L1 instruction caches */
3719 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3720 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3721 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
3723 /* Wait for invalidation complete */
3724 for (i = 0; i < usec_timeout; i++) {
3725 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3726 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
3727 INVALIDATE_DCACHE_COMPLETE))
3732 if (i >= usec_timeout) {
3733 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3737 /* Trigger an invalidation of the L1 instruction caches */
3738 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3739 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
3740 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
3742 /* Wait for invalidation complete */
3743 for (i = 0; i < usec_timeout; i++) {
3744 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3745 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
3746 INVALIDATE_CACHE_COMPLETE))
3751 if (i >= usec_timeout) {
3752 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3759 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring)
3762 struct amdgpu_device *adev = ring->adev;
3764 /* tell RLC which is KIQ queue */
3765 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
3767 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3768 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3770 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3773 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev)
3775 /* set graphics engine doorbell range */
3776 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER,
3777 (adev->doorbell_index.gfx_ring0 * 2) << 2);
3778 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3779 (adev->doorbell_index.gfx_userqueue_end * 2) << 2);
3781 /* set compute engine doorbell range */
3782 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
3783 (adev->doorbell_index.kiq * 2) << 2);
3784 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
3785 (adev->doorbell_index.userqueue_end * 2) << 2);
3788 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
3789 struct amdgpu_mqd_prop *prop)
3791 struct v11_gfx_mqd *mqd = m;
3792 uint64_t hqd_gpu_addr, wb_gpu_addr;
3796 /* set up gfx hqd wptr */
3797 mqd->cp_gfx_hqd_wptr = 0;
3798 mqd->cp_gfx_hqd_wptr_hi = 0;
3800 /* set the pointer to the MQD */
3801 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc;
3802 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
3804 /* set up mqd control */
3805 tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
3806 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
3807 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
3808 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
3809 mqd->cp_gfx_mqd_control = tmp;
3811 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
3812 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
3813 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
3814 mqd->cp_gfx_hqd_vmid = 0;
3816 /* set up default queue priority level
3817 * 0x0 = low priority, 0x1 = high priority */
3818 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
3819 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
3820 mqd->cp_gfx_hqd_queue_priority = tmp;
3822 /* set up time quantum */
3823 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
3824 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
3825 mqd->cp_gfx_hqd_quantum = tmp;
3827 /* set up gfx hqd base. this is similar as CP_RB_BASE */
3828 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
3829 mqd->cp_gfx_hqd_base = hqd_gpu_addr;
3830 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
3832 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
3833 wb_gpu_addr = prop->rptr_gpu_addr;
3834 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
3835 mqd->cp_gfx_hqd_rptr_addr_hi =
3836 upper_32_bits(wb_gpu_addr) & 0xffff;
3838 /* set up rb_wptr_poll addr */
3839 wb_gpu_addr = prop->wptr_gpu_addr;
3840 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3841 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3843 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
3844 rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
3845 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
3846 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
3847 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
3849 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
3851 mqd->cp_gfx_hqd_cntl = tmp;
3853 /* set up cp_doorbell_control */
3854 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3855 if (prop->use_doorbell) {
3856 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3857 DOORBELL_OFFSET, prop->doorbell_index);
3858 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3861 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3863 mqd->cp_rb_doorbell_control = tmp;
3865 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3866 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
3868 /* active the queue */
3869 mqd->cp_gfx_hqd_active = 1;
3874 #ifdef BRING_UP_DEBUG
3875 static int gfx_v11_0_gfx_queue_init_register(struct amdgpu_ring *ring)
3877 struct amdgpu_device *adev = ring->adev;
3878 struct v11_gfx_mqd *mqd = ring->mqd_ptr;
3880 /* set mmCP_GFX_HQD_WPTR/_HI to 0 */
3881 WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR, mqd->cp_gfx_hqd_wptr);
3882 WREG32_SOC15(GC, 0, regCP_GFX_HQD_WPTR_HI, mqd->cp_gfx_hqd_wptr_hi);
3884 /* set GFX_MQD_BASE */
3885 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr);
3886 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi);
3888 /* set GFX_MQD_CONTROL */
3889 WREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL, mqd->cp_gfx_mqd_control);
3891 /* set GFX_HQD_VMID to 0 */
3892 WREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID, mqd->cp_gfx_hqd_vmid);
3894 WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY,
3895 mqd->cp_gfx_hqd_queue_priority);
3896 WREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM, mqd->cp_gfx_hqd_quantum);
3898 /* set GFX_HQD_BASE, similar as CP_RB_BASE */
3899 WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE, mqd->cp_gfx_hqd_base);
3900 WREG32_SOC15(GC, 0, regCP_GFX_HQD_BASE_HI, mqd->cp_gfx_hqd_base_hi);
3902 /* set GFX_HQD_RPTR_ADDR, similar as CP_RB_RPTR */
3903 WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR, mqd->cp_gfx_hqd_rptr_addr);
3904 WREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR_ADDR_HI, mqd->cp_gfx_hqd_rptr_addr_hi);
3906 /* set GFX_HQD_CNTL, similar as CP_RB_CNTL */
3907 WREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL, mqd->cp_gfx_hqd_cntl);
3909 /* set RB_WPTR_POLL_ADDR */
3910 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO, mqd->cp_rb_wptr_poll_addr_lo);
3911 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI, mqd->cp_rb_wptr_poll_addr_hi);
3913 /* set RB_DOORBELL_CONTROL */
3914 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, mqd->cp_rb_doorbell_control);
3916 /* active the queue */
3917 WREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE, mqd->cp_gfx_hqd_active);
3923 static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
3925 struct amdgpu_device *adev = ring->adev;
3926 struct v11_gfx_mqd *mqd = ring->mqd_ptr;
3927 int mqd_idx = ring - &adev->gfx.gfx_ring[0];
3929 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
3930 memset((void *)mqd, 0, sizeof(*mqd));
3931 mutex_lock(&adev->srbm_mutex);
3932 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3933 amdgpu_ring_init_mqd(ring);
3934 #ifdef BRING_UP_DEBUG
3935 gfx_v11_0_gfx_queue_init_register(ring);
3937 soc21_grbm_select(adev, 0, 0, 0, 0);
3938 mutex_unlock(&adev->srbm_mutex);
3939 if (adev->gfx.me.mqd_backup[mqd_idx])
3940 memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3941 } else if (amdgpu_in_reset(adev)) {
3942 /* reset mqd with the backup copy */
3943 if (adev->gfx.me.mqd_backup[mqd_idx])
3944 memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
3945 /* reset the ring */
3947 *ring->wptr_cpu_addr = 0;
3948 amdgpu_ring_clear_ring(ring);
3949 #ifdef BRING_UP_DEBUG
3950 mutex_lock(&adev->srbm_mutex);
3951 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3952 gfx_v11_0_gfx_queue_init_register(ring);
3953 soc21_grbm_select(adev, 0, 0, 0, 0);
3954 mutex_unlock(&adev->srbm_mutex);
3957 amdgpu_ring_clear_ring(ring);
3963 #ifndef BRING_UP_DEBUG
3964 static int gfx_v11_0_kiq_enable_kgq(struct amdgpu_device *adev)
3966 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
3967 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
3970 if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
3973 r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
3974 adev->gfx.num_gfx_rings);
3976 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
3980 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
3981 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
3983 return amdgpu_ring_test_helper(kiq_ring);
3987 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
3990 struct amdgpu_ring *ring;
3992 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3993 ring = &adev->gfx.gfx_ring[i];
3995 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3996 if (unlikely(r != 0))
3999 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4001 r = gfx_v11_0_gfx_init_queue(ring);
4002 amdgpu_bo_kunmap(ring->mqd_obj);
4003 ring->mqd_ptr = NULL;
4005 amdgpu_bo_unreserve(ring->mqd_obj);
4009 #ifndef BRING_UP_DEBUG
4010 r = gfx_v11_0_kiq_enable_kgq(adev);
4014 r = gfx_v11_0_cp_gfx_start(adev);
4018 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4019 ring = &adev->gfx.gfx_ring[i];
4020 ring->sched.ready = true;
4026 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
4027 struct amdgpu_mqd_prop *prop)
4029 struct v11_compute_mqd *mqd = m;
4030 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
4033 mqd->header = 0xC0310800;
4034 mqd->compute_pipelinestat_enable = 0x00000001;
4035 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
4036 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
4037 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
4038 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
4039 mqd->compute_misc_reserved = 0x00000007;
4041 eop_base_addr = prop->eop_gpu_addr >> 8;
4042 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
4043 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
4045 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4046 tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
4047 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
4048 (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
4050 mqd->cp_hqd_eop_control = tmp;
4052 /* enable doorbell? */
4053 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
4055 if (prop->use_doorbell) {
4056 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4057 DOORBELL_OFFSET, prop->doorbell_index);
4058 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4060 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4061 DOORBELL_SOURCE, 0);
4062 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4065 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4069 mqd->cp_hqd_pq_doorbell_control = tmp;
4071 /* disable the queue if it's active */
4072 mqd->cp_hqd_dequeue_request = 0;
4073 mqd->cp_hqd_pq_rptr = 0;
4074 mqd->cp_hqd_pq_wptr_lo = 0;
4075 mqd->cp_hqd_pq_wptr_hi = 0;
4077 /* set the pointer to the MQD */
4078 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
4079 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
4081 /* set MQD vmid to 0 */
4082 tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
4083 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
4084 mqd->cp_mqd_control = tmp;
4086 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4087 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
4088 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
4089 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
4091 /* set up the HQD, this is similar to CP_RB0_CNTL */
4092 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
4093 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
4094 (order_base_2(prop->queue_size / 4) - 1));
4095 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
4096 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
4097 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
4098 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
4099 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
4100 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
4101 mqd->cp_hqd_pq_control = tmp;
4103 /* set the wb address whether it's enabled or not */
4104 wb_gpu_addr = prop->rptr_gpu_addr;
4105 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
4106 mqd->cp_hqd_pq_rptr_report_addr_hi =
4107 upper_32_bits(wb_gpu_addr) & 0xffff;
4109 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4110 wb_gpu_addr = prop->wptr_gpu_addr;
4111 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
4112 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
4115 /* enable the doorbell if requested */
4116 if (prop->use_doorbell) {
4117 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
4118 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4119 DOORBELL_OFFSET, prop->doorbell_index);
4121 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4123 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4124 DOORBELL_SOURCE, 0);
4125 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
4129 mqd->cp_hqd_pq_doorbell_control = tmp;
4131 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4132 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
4134 /* set the vmid for the queue */
4135 mqd->cp_hqd_vmid = 0;
4137 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
4138 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
4139 mqd->cp_hqd_persistent_state = tmp;
4141 /* set MIN_IB_AVAIL_SIZE */
4142 tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
4143 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
4144 mqd->cp_hqd_ib_control = tmp;
4146 /* set static priority for a compute queue/ring */
4147 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
4148 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
4150 mqd->cp_hqd_active = prop->hqd_active;
4155 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring)
4157 struct amdgpu_device *adev = ring->adev;
4158 struct v11_compute_mqd *mqd = ring->mqd_ptr;
4161 /* inactivate the queue */
4162 if (amdgpu_sriov_vf(adev))
4163 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0);
4165 /* disable wptr polling */
4166 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
4168 /* write the EOP addr */
4169 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR,
4170 mqd->cp_hqd_eop_base_addr_lo);
4171 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI,
4172 mqd->cp_hqd_eop_base_addr_hi);
4174 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4175 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL,
4176 mqd->cp_hqd_eop_control);
4178 /* enable doorbell? */
4179 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
4180 mqd->cp_hqd_pq_doorbell_control);
4182 /* disable the queue if it's active */
4183 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
4184 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
4185 for (j = 0; j < adev->usec_timeout; j++) {
4186 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
4190 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST,
4191 mqd->cp_hqd_dequeue_request);
4192 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR,
4193 mqd->cp_hqd_pq_rptr);
4194 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
4195 mqd->cp_hqd_pq_wptr_lo);
4196 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
4197 mqd->cp_hqd_pq_wptr_hi);
4200 /* set the pointer to the MQD */
4201 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR,
4202 mqd->cp_mqd_base_addr_lo);
4203 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI,
4204 mqd->cp_mqd_base_addr_hi);
4206 /* set MQD vmid to 0 */
4207 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL,
4208 mqd->cp_mqd_control);
4210 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
4211 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE,
4212 mqd->cp_hqd_pq_base_lo);
4213 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI,
4214 mqd->cp_hqd_pq_base_hi);
4216 /* set up the HQD, this is similar to CP_RB0_CNTL */
4217 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL,
4218 mqd->cp_hqd_pq_control);
4220 /* set the wb address whether it's enabled or not */
4221 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
4222 mqd->cp_hqd_pq_rptr_report_addr_lo);
4223 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
4224 mqd->cp_hqd_pq_rptr_report_addr_hi);
4226 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
4227 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
4228 mqd->cp_hqd_pq_wptr_poll_addr_lo);
4229 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
4230 mqd->cp_hqd_pq_wptr_poll_addr_hi);
4232 /* enable the doorbell if requested */
4233 if (ring->use_doorbell) {
4234 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
4235 (adev->doorbell_index.kiq * 2) << 2);
4236 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
4237 (adev->doorbell_index.userqueue_end * 2) << 2);
4240 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
4241 mqd->cp_hqd_pq_doorbell_control);
4243 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
4244 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
4245 mqd->cp_hqd_pq_wptr_lo);
4246 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
4247 mqd->cp_hqd_pq_wptr_hi);
4249 /* set the vmid for the queue */
4250 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid);
4252 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE,
4253 mqd->cp_hqd_persistent_state);
4255 /* activate the queue */
4256 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE,
4257 mqd->cp_hqd_active);
4259 if (ring->use_doorbell)
4260 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
4265 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
4267 struct amdgpu_device *adev = ring->adev;
4268 struct v11_compute_mqd *mqd = ring->mqd_ptr;
4269 int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
4271 gfx_v11_0_kiq_setting(ring);
4273 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4274 /* reset MQD to a clean status */
4275 if (adev->gfx.mec.mqd_backup[mqd_idx])
4276 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
4278 /* reset ring buffer */
4280 amdgpu_ring_clear_ring(ring);
4282 mutex_lock(&adev->srbm_mutex);
4283 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4284 gfx_v11_0_kiq_init_register(ring);
4285 soc21_grbm_select(adev, 0, 0, 0, 0);
4286 mutex_unlock(&adev->srbm_mutex);
4288 memset((void *)mqd, 0, sizeof(*mqd));
4289 mutex_lock(&adev->srbm_mutex);
4290 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4291 amdgpu_ring_init_mqd(ring);
4292 gfx_v11_0_kiq_init_register(ring);
4293 soc21_grbm_select(adev, 0, 0, 0, 0);
4294 mutex_unlock(&adev->srbm_mutex);
4296 if (adev->gfx.mec.mqd_backup[mqd_idx])
4297 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4303 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
4305 struct amdgpu_device *adev = ring->adev;
4306 struct v11_compute_mqd *mqd = ring->mqd_ptr;
4307 int mqd_idx = ring - &adev->gfx.compute_ring[0];
4309 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4310 memset((void *)mqd, 0, sizeof(*mqd));
4311 mutex_lock(&adev->srbm_mutex);
4312 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4313 amdgpu_ring_init_mqd(ring);
4314 soc21_grbm_select(adev, 0, 0, 0, 0);
4315 mutex_unlock(&adev->srbm_mutex);
4317 if (adev->gfx.mec.mqd_backup[mqd_idx])
4318 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4319 } else if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
4320 /* reset MQD to a clean status */
4321 if (adev->gfx.mec.mqd_backup[mqd_idx])
4322 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
4324 /* reset ring buffer */
4326 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
4327 amdgpu_ring_clear_ring(ring);
4329 amdgpu_ring_clear_ring(ring);
4335 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
4337 struct amdgpu_ring *ring;
4340 ring = &adev->gfx.kiq.ring;
4342 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4343 if (unlikely(r != 0))
4346 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4347 if (unlikely(r != 0)) {
4348 amdgpu_bo_unreserve(ring->mqd_obj);
4352 gfx_v11_0_kiq_init_queue(ring);
4353 amdgpu_bo_kunmap(ring->mqd_obj);
4354 ring->mqd_ptr = NULL;
4355 amdgpu_bo_unreserve(ring->mqd_obj);
4356 ring->sched.ready = true;
4360 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
4362 struct amdgpu_ring *ring = NULL;
4365 if (!amdgpu_async_gfx_ring)
4366 gfx_v11_0_cp_compute_enable(adev, true);
4368 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4369 ring = &adev->gfx.compute_ring[i];
4371 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4372 if (unlikely(r != 0))
4374 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4376 r = gfx_v11_0_kcq_init_queue(ring);
4377 amdgpu_bo_kunmap(ring->mqd_obj);
4378 ring->mqd_ptr = NULL;
4380 amdgpu_bo_unreserve(ring->mqd_obj);
4385 r = amdgpu_gfx_enable_kcq(adev);
4390 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
4393 struct amdgpu_ring *ring;
4395 if (!(adev->flags & AMD_IS_APU))
4396 gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4398 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4399 /* legacy firmware loading */
4400 r = gfx_v11_0_cp_gfx_load_microcode(adev);
4404 if (adev->gfx.rs64_enable)
4405 r = gfx_v11_0_cp_compute_load_microcode_rs64(adev);
4407 r = gfx_v11_0_cp_compute_load_microcode(adev);
4412 gfx_v11_0_cp_set_doorbell_range(adev);
4414 if (amdgpu_async_gfx_ring) {
4415 gfx_v11_0_cp_compute_enable(adev, true);
4416 gfx_v11_0_cp_gfx_enable(adev, true);
4419 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
4420 r = amdgpu_mes_kiq_hw_init(adev);
4422 r = gfx_v11_0_kiq_resume(adev);
4426 r = gfx_v11_0_kcq_resume(adev);
4430 if (!amdgpu_async_gfx_ring) {
4431 r = gfx_v11_0_cp_gfx_resume(adev);
4435 r = gfx_v11_0_cp_async_gfx_ring_resume(adev);
4440 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4441 ring = &adev->gfx.gfx_ring[i];
4442 r = amdgpu_ring_test_helper(ring);
4447 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4448 ring = &adev->gfx.compute_ring[i];
4449 r = amdgpu_ring_test_helper(ring);
4457 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable)
4459 gfx_v11_0_cp_gfx_enable(adev, enable);
4460 gfx_v11_0_cp_compute_enable(adev, enable);
4463 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
4468 r = adev->gfxhub.funcs->gart_enable(adev);
4472 adev->hdp.funcs->flush_hdp(adev, NULL);
4474 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
4477 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
4478 amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
4483 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev)
4488 if (adev->gfx.rs64_enable) {
4489 tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL);
4490 tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1);
4491 WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp);
4493 tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL);
4494 tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1);
4495 WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp);
4498 if (amdgpu_emu_mode == 1)
4502 static int get_gb_addr_config(struct amdgpu_device * adev)
4506 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
4507 if (gb_addr_config == 0)
4510 adev->gfx.config.gb_addr_config_fields.num_pkrs =
4511 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
4513 adev->gfx.config.gb_addr_config = gb_addr_config;
4515 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
4516 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4517 GB_ADDR_CONFIG, NUM_PIPES);
4519 adev->gfx.config.max_tile_pipes =
4520 adev->gfx.config.gb_addr_config_fields.num_pipes;
4522 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
4523 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4524 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
4525 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
4526 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4527 GB_ADDR_CONFIG, NUM_RB_PER_SE);
4528 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
4529 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4530 GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
4531 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
4532 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4533 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
4538 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev)
4542 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG);
4543 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
4544 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data);
4546 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG);
4547 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
4548 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
4551 static int gfx_v11_0_hw_init(void *handle)
4554 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4556 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
4557 if (adev->gfx.imu.funcs) {
4558 /* RLC autoload sequence 1: Program rlc ram */
4559 if (adev->gfx.imu.funcs->program_rlc_ram)
4560 adev->gfx.imu.funcs->program_rlc_ram(adev);
4562 /* rlc autoload firmware */
4563 r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
4567 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4568 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
4569 if (adev->gfx.imu.funcs->load_microcode)
4570 adev->gfx.imu.funcs->load_microcode(adev);
4571 if (adev->gfx.imu.funcs->setup_imu)
4572 adev->gfx.imu.funcs->setup_imu(adev);
4573 if (adev->gfx.imu.funcs->start_imu)
4574 adev->gfx.imu.funcs->start_imu(adev);
4577 /* disable gpa mode in backdoor loading */
4578 gfx_v11_0_disable_gpa_mode(adev);
4582 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
4583 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
4584 r = gfx_v11_0_wait_for_rlc_autoload_complete(adev);
4586 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
4591 adev->gfx.is_poweron = true;
4593 if(get_gb_addr_config(adev))
4594 DRM_WARN("Invalid gb_addr_config !\n");
4596 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
4597 adev->gfx.rs64_enable)
4598 gfx_v11_0_config_gfx_rs64(adev);
4600 r = gfx_v11_0_gfxhub_enable(adev);
4604 if (!amdgpu_emu_mode)
4605 gfx_v11_0_init_golden_registers(adev);
4607 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
4608 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
4610 * For gfx 11, rlc firmware loading relies on smu firmware is
4611 * loaded firstly, so in direct type, it has to load smc ucode
4614 if (!(adev->flags & AMD_IS_APU)) {
4615 r = amdgpu_pm_load_smu_firmware(adev, NULL);
4621 gfx_v11_0_constants_init(adev);
4623 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
4624 gfx_v11_0_select_cp_fw_arch(adev);
4626 if (adev->nbio.funcs->gc_doorbell_init)
4627 adev->nbio.funcs->gc_doorbell_init(adev);
4629 r = gfx_v11_0_rlc_resume(adev);
4634 * init golden registers and rlc resume may override some registers,
4635 * reconfig them here
4637 gfx_v11_0_tcp_harvest(adev);
4639 r = gfx_v11_0_cp_resume(adev);
4646 #ifndef BRING_UP_DEBUG
4647 static int gfx_v11_0_kiq_disable_kgq(struct amdgpu_device *adev)
4649 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
4650 struct amdgpu_ring *kiq_ring = &kiq->ring;
4653 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
4656 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
4657 adev->gfx.num_gfx_rings))
4660 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
4661 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
4662 PREEMPT_QUEUES, 0, 0);
4664 if (adev->gfx.kiq.ring.sched.ready)
4665 r = amdgpu_ring_test_helper(kiq_ring);
4671 static int gfx_v11_0_hw_fini(void *handle)
4673 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4677 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4678 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4680 if (!adev->no_hw_access) {
4681 #ifndef BRING_UP_DEBUG
4682 if (amdgpu_async_gfx_ring) {
4683 r = gfx_v11_0_kiq_disable_kgq(adev);
4685 DRM_ERROR("KGQ disable failed\n");
4688 if (amdgpu_gfx_disable_kcq(adev))
4689 DRM_ERROR("KCQ disable failed\n");
4691 amdgpu_mes_kiq_hw_fini(adev);
4694 if (amdgpu_sriov_vf(adev)) {
4695 gfx_v11_0_cp_gfx_enable(adev, false);
4696 /* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
4697 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
4699 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
4703 gfx_v11_0_cp_enable(adev, false);
4704 gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4706 adev->gfxhub.funcs->gart_disable(adev);
4708 adev->gfx.is_poweron = false;
4713 static int gfx_v11_0_suspend(void *handle)
4715 return gfx_v11_0_hw_fini(handle);
4718 static int gfx_v11_0_resume(void *handle)
4720 return gfx_v11_0_hw_init(handle);
4723 static bool gfx_v11_0_is_idle(void *handle)
4725 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4727 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
4728 GRBM_STATUS, GUI_ACTIVE))
4734 static int gfx_v11_0_wait_for_idle(void *handle)
4738 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4740 for (i = 0; i < adev->usec_timeout; i++) {
4741 /* read MC_STATUS */
4742 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) &
4743 GRBM_STATUS__GUI_ACTIVE_MASK;
4745 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
4752 static int gfx_v11_0_soft_reset(void *handle)
4754 u32 grbm_soft_reset = 0;
4757 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4759 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4760 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
4761 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0);
4762 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0);
4763 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
4764 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4766 gfx_v11_0_set_safe_mode(adev);
4768 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4769 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4770 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
4771 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
4772 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
4773 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
4774 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
4775 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
4777 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
4778 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
4782 for (i = 0; i < adev->gfx.me.num_me; ++i) {
4783 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
4784 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
4785 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
4786 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
4787 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
4788 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
4789 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
4791 WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1);
4796 WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
4798 // Read CP_VMID_RESET register three times.
4799 // to get sufficient time for GFX_HQD_ACTIVE reach 0
4800 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4801 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4802 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4804 for (i = 0; i < adev->usec_timeout; i++) {
4805 if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
4806 !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
4810 if (i >= adev->usec_timeout) {
4811 printk("Failed to wait all pipes clean\n");
4815 /********** trigger soft reset ***********/
4816 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4817 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4819 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4821 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4823 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4825 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4827 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4828 /********** exit soft reset ***********/
4829 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4830 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4832 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4834 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4836 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4838 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4840 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4842 tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL);
4843 tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1);
4844 WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp);
4846 WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0);
4847 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0);
4849 for (i = 0; i < adev->usec_timeout; i++) {
4850 if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET))
4854 if (i >= adev->usec_timeout) {
4855 printk("Failed to wait CP_VMID_RESET to 0\n");
4859 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4860 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
4861 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
4862 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
4863 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
4864 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4866 gfx_v11_0_unset_safe_mode(adev);
4868 return gfx_v11_0_cp_resume(adev);
4871 static bool gfx_v11_0_check_soft_reset(void *handle)
4874 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4875 struct amdgpu_ring *ring;
4876 long tmo = msecs_to_jiffies(1000);
4878 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4879 ring = &adev->gfx.gfx_ring[i];
4880 r = amdgpu_ring_test_ib(ring, tmo);
4885 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4886 ring = &adev->gfx.compute_ring[i];
4887 r = amdgpu_ring_test_ib(ring, tmo);
4895 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4899 amdgpu_gfx_off_ctrl(adev, false);
4900 mutex_lock(&adev->gfx.gpu_clock_mutex);
4901 clock = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER) |
4902 ((uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER) << 32ULL);
4903 mutex_unlock(&adev->gfx.gpu_clock_mutex);
4904 amdgpu_gfx_off_ctrl(adev, true);
4908 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4910 uint32_t gds_base, uint32_t gds_size,
4911 uint32_t gws_base, uint32_t gws_size,
4912 uint32_t oa_base, uint32_t oa_size)
4914 struct amdgpu_device *adev = ring->adev;
4917 gfx_v11_0_write_data_to_reg(ring, 0, false,
4918 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid,
4922 gfx_v11_0_write_data_to_reg(ring, 0, false,
4923 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid,
4927 gfx_v11_0_write_data_to_reg(ring, 0, false,
4928 SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid,
4929 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4932 gfx_v11_0_write_data_to_reg(ring, 0, false,
4933 SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid,
4934 (1 << (oa_size + oa_base)) - (1 << oa_base));
4937 static int gfx_v11_0_early_init(void *handle)
4939 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4941 adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
4942 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4943 AMDGPU_MAX_COMPUTE_RINGS);
4945 gfx_v11_0_set_kiq_pm4_funcs(adev);
4946 gfx_v11_0_set_ring_funcs(adev);
4947 gfx_v11_0_set_irq_funcs(adev);
4948 gfx_v11_0_set_gds_init(adev);
4949 gfx_v11_0_set_rlc_funcs(adev);
4950 gfx_v11_0_set_mqd_funcs(adev);
4951 gfx_v11_0_set_imu_funcs(adev);
4953 gfx_v11_0_init_rlcg_reg_access_ctrl(adev);
4958 static int gfx_v11_0_late_init(void *handle)
4960 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4963 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4967 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4974 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev)
4978 /* if RLC is not enabled, do nothing */
4979 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL);
4980 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
4983 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev)
4988 data = RLC_SAFE_MODE__CMD_MASK;
4989 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4991 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
4993 /* wait for RLC_SAFE_MODE */
4994 for (i = 0; i < adev->usec_timeout; i++) {
4995 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE),
4996 RLC_SAFE_MODE, CMD))
5002 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev)
5004 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
5007 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
5012 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
5015 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5018 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
5020 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
5023 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5026 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev,
5031 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
5034 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5037 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
5039 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
5042 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5045 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev,
5050 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
5053 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5056 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
5058 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
5061 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5064 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
5069 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
5072 /* It is disabled by HW by default */
5074 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5075 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
5076 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5078 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
5079 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
5080 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
5083 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5086 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
5087 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5089 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
5090 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
5091 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
5094 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5099 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
5104 if (!(adev->cg_flags &
5105 (AMD_CG_SUPPORT_GFX_CGCG |
5106 AMD_CG_SUPPORT_GFX_CGLS |
5107 AMD_CG_SUPPORT_GFX_3D_CGCG |
5108 AMD_CG_SUPPORT_GFX_3D_CGLS)))
5112 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5114 /* unset CGCG override */
5115 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
5116 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
5117 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5118 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
5119 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
5120 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
5121 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
5123 /* update CGCG override bits */
5125 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
5127 /* enable cgcg FSM(0x0000363F) */
5128 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5130 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
5131 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
5132 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5133 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5136 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
5137 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
5138 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5139 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5143 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
5145 /* Program RLC_CGCG_CGLS_CTRL_3D */
5146 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5148 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
5149 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK;
5150 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
5151 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
5154 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
5155 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK;
5156 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
5157 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
5161 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
5163 /* set IDLE_POLL_COUNT(0x00900100) */
5164 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL);
5166 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK);
5167 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
5168 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
5171 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data);
5173 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
5174 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
5175 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
5176 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
5177 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
5178 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data);
5180 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
5181 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
5182 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
5184 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
5185 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
5186 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
5188 /* Program RLC_CGCG_CGLS_CTRL */
5189 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5191 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
5192 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
5194 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
5195 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
5198 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
5200 /* Program RLC_CGCG_CGLS_CTRL_3D */
5201 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5203 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
5204 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
5205 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
5206 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
5209 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
5211 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
5212 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
5213 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
5215 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
5216 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
5217 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
5221 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
5224 amdgpu_gfx_rlc_enter_safe_mode(adev);
5226 gfx_v11_0_update_coarse_grain_clock_gating(adev, enable);
5228 gfx_v11_0_update_medium_grain_clock_gating(adev, enable);
5230 gfx_v11_0_update_repeater_fgcg(adev, enable);
5232 gfx_v11_0_update_sram_fgcg(adev, enable);
5234 gfx_v11_0_update_perf_clk(adev, enable);
5236 if (adev->cg_flags &
5237 (AMD_CG_SUPPORT_GFX_MGCG |
5238 AMD_CG_SUPPORT_GFX_CGLS |
5239 AMD_CG_SUPPORT_GFX_CGCG |
5240 AMD_CG_SUPPORT_GFX_3D_CGCG |
5241 AMD_CG_SUPPORT_GFX_3D_CGLS))
5242 gfx_v11_0_enable_gui_idle_interrupt(adev, enable);
5244 amdgpu_gfx_rlc_exit_safe_mode(adev);
5249 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
5253 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
5254 if (amdgpu_sriov_is_pp_one_vf(adev))
5255 data = RREG32_NO_KIQ(reg);
5259 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
5260 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
5262 if (amdgpu_sriov_is_pp_one_vf(adev))
5263 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
5265 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
5268 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
5269 .is_rlc_enabled = gfx_v11_0_is_rlc_enabled,
5270 .set_safe_mode = gfx_v11_0_set_safe_mode,
5271 .unset_safe_mode = gfx_v11_0_unset_safe_mode,
5272 .init = gfx_v11_0_rlc_init,
5273 .get_csb_size = gfx_v11_0_get_csb_size,
5274 .get_csb_buffer = gfx_v11_0_get_csb_buffer,
5275 .resume = gfx_v11_0_rlc_resume,
5276 .stop = gfx_v11_0_rlc_stop,
5277 .reset = gfx_v11_0_rlc_reset,
5278 .start = gfx_v11_0_rlc_start,
5279 .update_spm_vmid = gfx_v11_0_update_spm_vmid,
5282 static int gfx_v11_0_set_powergating_state(void *handle,
5283 enum amd_powergating_state state)
5285 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5286 bool enable = (state == AMD_PG_STATE_GATE);
5288 if (amdgpu_sriov_vf(adev))
5291 switch (adev->ip_versions[GC_HWIP][0]) {
5292 case IP_VERSION(11, 0, 0):
5293 case IP_VERSION(11, 0, 2):
5294 amdgpu_gfx_off_ctrl(adev, enable);
5303 static int gfx_v11_0_set_clockgating_state(void *handle,
5304 enum amd_clockgating_state state)
5306 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5308 if (amdgpu_sriov_vf(adev))
5311 switch (adev->ip_versions[GC_HWIP][0]) {
5312 case IP_VERSION(11, 0, 0):
5313 case IP_VERSION(11, 0, 2):
5314 gfx_v11_0_update_gfx_clock_gating(adev,
5315 state == AMD_CG_STATE_GATE);
5324 static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags)
5326 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5329 /* AMD_CG_SUPPORT_GFX_MGCG */
5330 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5331 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5332 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5334 /* AMD_CG_SUPPORT_REPEATER_FGCG */
5335 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
5336 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
5338 /* AMD_CG_SUPPORT_GFX_FGCG */
5339 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
5340 *flags |= AMD_CG_SUPPORT_GFX_FGCG;
5342 /* AMD_CG_SUPPORT_GFX_PERF_CLK */
5343 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
5344 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
5346 /* AMD_CG_SUPPORT_GFX_CGCG */
5347 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5348 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5349 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5351 /* AMD_CG_SUPPORT_GFX_CGLS */
5352 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5353 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5355 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5356 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5357 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5358 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5360 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5361 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5362 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5365 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5367 /* gfx11 is 32bit rptr*/
5368 return *(uint32_t *)ring->rptr_cpu_addr;
5371 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5373 struct amdgpu_device *adev = ring->adev;
5376 /* XXX check if swapping is necessary on BE */
5377 if (ring->use_doorbell) {
5378 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5380 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR);
5381 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32;
5387 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5389 struct amdgpu_device *adev = ring->adev;
5390 uint32_t *wptr_saved;
5391 uint32_t *is_queue_unmap;
5392 uint64_t aggregated_db_index;
5393 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size;
5396 if (ring->is_mes_queue) {
5397 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
5398 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
5400 aggregated_db_index =
5401 amdgpu_mes_get_aggregated_doorbell_index(adev,
5404 wptr_tmp = ring->wptr & ring->buf_mask;
5405 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
5406 *wptr_saved = wptr_tmp;
5407 /* assume doorbell always being used by mes mapped queue */
5408 if (*is_queue_unmap) {
5409 WDOORBELL64(aggregated_db_index, wptr_tmp);
5410 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5412 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5414 if (*is_queue_unmap)
5415 WDOORBELL64(aggregated_db_index, wptr_tmp);
5418 if (ring->use_doorbell) {
5419 /* XXX check if swapping is necessary on BE */
5420 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5422 WDOORBELL64(ring->doorbell_index, ring->wptr);
5424 WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
5425 lower_32_bits(ring->wptr));
5426 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
5427 upper_32_bits(ring->wptr));
5432 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5434 /* gfx11 hardware is 32bit rptr */
5435 return *(uint32_t *)ring->rptr_cpu_addr;
5438 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5442 /* XXX check if swapping is necessary on BE */
5443 if (ring->use_doorbell)
5444 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5450 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5452 struct amdgpu_device *adev = ring->adev;
5453 uint32_t *wptr_saved;
5454 uint32_t *is_queue_unmap;
5455 uint64_t aggregated_db_index;
5456 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size;
5459 if (ring->is_mes_queue) {
5460 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
5461 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
5463 aggregated_db_index =
5464 amdgpu_mes_get_aggregated_doorbell_index(adev,
5467 wptr_tmp = ring->wptr & ring->buf_mask;
5468 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
5469 *wptr_saved = wptr_tmp;
5470 /* assume doorbell always used by mes mapped queue */
5471 if (*is_queue_unmap) {
5472 WDOORBELL64(aggregated_db_index, wptr_tmp);
5473 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5475 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5477 if (*is_queue_unmap)
5478 WDOORBELL64(aggregated_db_index, wptr_tmp);
5481 /* XXX check if swapping is necessary on BE */
5482 if (ring->use_doorbell) {
5483 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5485 WDOORBELL64(ring->doorbell_index, ring->wptr);
5487 BUG(); /* only DOORBELL method supported on gfx11 now */
5492 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5494 struct amdgpu_device *adev = ring->adev;
5495 u32 ref_and_mask, reg_mem_engine;
5496 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5498 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5501 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5504 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5511 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5512 reg_mem_engine = 1; /* pfp */
5515 gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5516 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5517 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5518 ref_and_mask, ref_and_mask, 0x20);
5521 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5522 struct amdgpu_job *job,
5523 struct amdgpu_ib *ib,
5526 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5527 u32 header, control = 0;
5529 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
5531 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5533 control |= ib->length_dw | (vmid << 24);
5535 if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5536 control |= INDIRECT_BUFFER_PRE_ENB(1);
5538 if (flags & AMDGPU_IB_PREEMPTED)
5539 control |= INDIRECT_BUFFER_PRE_RESUME(1);
5542 gfx_v11_0_ring_emit_de_meta(ring,
5543 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
5546 if (ring->is_mes_queue)
5547 /* inherit vmid from mqd */
5548 control |= 0x400000;
5550 amdgpu_ring_write(ring, header);
5551 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5552 amdgpu_ring_write(ring,
5556 lower_32_bits(ib->gpu_addr));
5557 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5558 amdgpu_ring_write(ring, control);
5561 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5562 struct amdgpu_job *job,
5563 struct amdgpu_ib *ib,
5566 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5567 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5569 if (ring->is_mes_queue)
5570 /* inherit vmid from mqd */
5571 control |= 0x40000000;
5573 /* Currently, there is a high possibility to get wave ID mismatch
5574 * between ME and GDS, leading to a hw deadlock, because ME generates
5575 * different wave IDs than the GDS expects. This situation happens
5576 * randomly when at least 5 compute pipes use GDS ordered append.
5577 * The wave IDs generated by ME are also wrong after suspend/resume.
5578 * Those are probably bugs somewhere else in the kernel driver.
5580 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5581 * GDS to 0 for this ring (me/pipe).
5583 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5584 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5585 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
5586 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5589 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5590 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5591 amdgpu_ring_write(ring,
5595 lower_32_bits(ib->gpu_addr));
5596 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5597 amdgpu_ring_write(ring, control);
5600 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5601 u64 seq, unsigned flags)
5603 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5604 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5606 /* RELEASE_MEM - flush caches, send int */
5607 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5608 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
5609 PACKET3_RELEASE_MEM_GCR_GL2_WB |
5610 PACKET3_RELEASE_MEM_GCR_GL2_INV |
5611 PACKET3_RELEASE_MEM_GCR_GL2_US |
5612 PACKET3_RELEASE_MEM_GCR_GL1_INV |
5613 PACKET3_RELEASE_MEM_GCR_GLV_INV |
5614 PACKET3_RELEASE_MEM_GCR_GLM_INV |
5615 PACKET3_RELEASE_MEM_GCR_GLM_WB |
5616 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
5617 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5618 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
5619 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
5620 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
5623 * the address should be Qword aligned if 64bit write, Dword
5624 * aligned if only send 32bit data low (discard data high)
5630 amdgpu_ring_write(ring, lower_32_bits(addr));
5631 amdgpu_ring_write(ring, upper_32_bits(addr));
5632 amdgpu_ring_write(ring, lower_32_bits(seq));
5633 amdgpu_ring_write(ring, upper_32_bits(seq));
5634 amdgpu_ring_write(ring, ring->is_mes_queue ?
5635 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
5638 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5640 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5641 uint32_t seq = ring->fence_drv.sync_seq;
5642 uint64_t addr = ring->fence_drv.gpu_addr;
5644 gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
5645 upper_32_bits(addr), seq, 0xffffffff, 4);
5648 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
5649 uint16_t pasid, uint32_t flush_type,
5650 bool all_hub, uint8_t dst_sel)
5652 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
5653 amdgpu_ring_write(ring,
5654 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
5655 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
5656 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
5657 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
5660 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5661 unsigned vmid, uint64_t pd_addr)
5663 if (ring->is_mes_queue)
5664 gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
5666 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5668 /* compute doesn't have PFP */
5669 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5670 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5671 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5672 amdgpu_ring_write(ring, 0x0);
5676 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5677 u64 seq, unsigned int flags)
5679 struct amdgpu_device *adev = ring->adev;
5681 /* we only allocate 32bit for each seq wb address */
5682 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5684 /* write fence seq to the "addr" */
5685 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5686 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5687 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5688 amdgpu_ring_write(ring, lower_32_bits(addr));
5689 amdgpu_ring_write(ring, upper_32_bits(addr));
5690 amdgpu_ring_write(ring, lower_32_bits(seq));
5692 if (flags & AMDGPU_FENCE_FLAG_INT) {
5693 /* set register to trigger INT */
5694 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5695 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5696 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5697 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS));
5698 amdgpu_ring_write(ring, 0);
5699 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5703 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
5708 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5709 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5710 /* set load_global_config & load_global_uconfig */
5712 /* set load_cs_sh_regs */
5714 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5718 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5719 amdgpu_ring_write(ring, dw2);
5720 amdgpu_ring_write(ring, 0);
5723 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5727 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5728 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5729 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5730 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5731 ret = ring->wptr & ring->buf_mask;
5732 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5737 static void gfx_v11_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5740 BUG_ON(offset > ring->buf_mask);
5741 BUG_ON(ring->ring[offset] != 0x55aa55aa);
5743 cur = (ring->wptr - 1) & ring->buf_mask;
5744 if (likely(cur > offset))
5745 ring->ring[offset] = cur - offset;
5747 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
5750 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
5753 struct amdgpu_device *adev = ring->adev;
5754 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
5755 struct amdgpu_ring *kiq_ring = &kiq->ring;
5756 unsigned long flags;
5758 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
5761 spin_lock_irqsave(&kiq->ring_lock, flags);
5763 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
5764 spin_unlock_irqrestore(&kiq->ring_lock, flags);
5768 /* assert preemption condition */
5769 amdgpu_ring_set_preempt_cond_exec(ring, false);
5771 /* assert IB preemption, emit the trailing fence */
5772 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
5773 ring->trail_fence_gpu_addr,
5775 amdgpu_ring_commit(kiq_ring);
5777 spin_unlock_irqrestore(&kiq->ring_lock, flags);
5779 /* poll the trailing fence */
5780 for (i = 0; i < adev->usec_timeout; i++) {
5781 if (ring->trail_seq ==
5782 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
5787 if (i >= adev->usec_timeout) {
5789 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
5792 /* deassert preemption condition */
5793 amdgpu_ring_set_preempt_cond_exec(ring, true);
5797 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
5799 struct amdgpu_device *adev = ring->adev;
5800 struct v10_de_ib_state de_payload = {0};
5801 uint64_t offset, gds_addr, de_payload_gpu_addr;
5802 void *de_payload_cpu_addr;
5805 if (ring->is_mes_queue) {
5806 offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5807 gfx[0].gfx_meta_data) +
5808 offsetof(struct v10_gfx_meta_data, de_payload);
5809 de_payload_gpu_addr =
5810 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5811 de_payload_cpu_addr =
5812 amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
5814 offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5815 gfx[0].gds_backup) +
5816 offsetof(struct v10_gfx_meta_data, de_payload);
5817 gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5819 offset = offsetof(struct v10_gfx_meta_data, de_payload);
5820 de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
5821 de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
5823 gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
5824 AMDGPU_CSA_SIZE - adev->gds.gds_size,
5828 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5829 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5831 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5832 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5833 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5834 WRITE_DATA_DST_SEL(8) |
5836 WRITE_DATA_CACHE_POLICY(0));
5837 amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
5838 amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
5841 amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
5842 sizeof(de_payload) >> 2);
5844 amdgpu_ring_write_multiple(ring, (void *)&de_payload,
5845 sizeof(de_payload) >> 2);
5848 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5851 uint32_t v = secure ? FRAME_TMZ : 0;
5853 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5854 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5857 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5858 uint32_t reg_val_offs)
5860 struct amdgpu_device *adev = ring->adev;
5862 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5863 amdgpu_ring_write(ring, 0 | /* src: register*/
5864 (5 << 8) | /* dst: memory */
5865 (1 << 20)); /* write confirm */
5866 amdgpu_ring_write(ring, reg);
5867 amdgpu_ring_write(ring, 0);
5868 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5870 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5874 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5879 switch (ring->funcs->type) {
5880 case AMDGPU_RING_TYPE_GFX:
5881 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5883 case AMDGPU_RING_TYPE_KIQ:
5884 cmd = (1 << 16); /* no inc addr */
5890 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5891 amdgpu_ring_write(ring, cmd);
5892 amdgpu_ring_write(ring, reg);
5893 amdgpu_ring_write(ring, 0);
5894 amdgpu_ring_write(ring, val);
5897 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5898 uint32_t val, uint32_t mask)
5900 gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5903 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5904 uint32_t reg0, uint32_t reg1,
5905 uint32_t ref, uint32_t mask)
5907 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5909 gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5913 static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
5916 struct amdgpu_device *adev = ring->adev;
5919 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5920 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5921 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5922 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5923 WREG32_SOC15(GC, 0, regSQ_CMD, value);
5927 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5928 uint32_t me, uint32_t pipe,
5929 enum amdgpu_interrupt_state state)
5931 uint32_t cp_int_cntl, cp_int_cntl_reg;
5936 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
5939 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
5942 DRM_DEBUG("invalid pipe %d\n", pipe);
5946 DRM_DEBUG("invalid me %d\n", me);
5951 case AMDGPU_IRQ_STATE_DISABLE:
5952 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
5953 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5954 TIME_STAMP_INT_ENABLE, 0);
5955 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5956 GENERIC0_INT_ENABLE, 0);
5957 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
5959 case AMDGPU_IRQ_STATE_ENABLE:
5960 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
5961 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5962 TIME_STAMP_INT_ENABLE, 1);
5963 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5964 GENERIC0_INT_ENABLE, 1);
5965 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
5972 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5974 enum amdgpu_interrupt_state state)
5976 u32 mec_int_cntl, mec_int_cntl_reg;
5979 * amdgpu controls only the first MEC. That's why this function only
5980 * handles the setting of interrupts for this specific MEC. All other
5981 * pipes' interrupts are set by amdkfd.
5987 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
5990 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
5993 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
5996 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
5999 DRM_DEBUG("invalid pipe %d\n", pipe);
6003 DRM_DEBUG("invalid me %d\n", me);
6008 case AMDGPU_IRQ_STATE_DISABLE:
6009 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
6010 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6011 TIME_STAMP_INT_ENABLE, 0);
6012 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6013 GENERIC0_INT_ENABLE, 0);
6014 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
6016 case AMDGPU_IRQ_STATE_ENABLE:
6017 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
6018 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6019 TIME_STAMP_INT_ENABLE, 1);
6020 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
6021 GENERIC0_INT_ENABLE, 1);
6022 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
6029 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
6030 struct amdgpu_irq_src *src,
6032 enum amdgpu_interrupt_state state)
6035 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
6036 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
6038 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
6039 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
6041 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
6042 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
6044 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
6045 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
6047 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
6048 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
6050 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
6051 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
6059 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
6060 struct amdgpu_irq_src *source,
6061 struct amdgpu_iv_entry *entry)
6064 u8 me_id, pipe_id, queue_id;
6065 struct amdgpu_ring *ring;
6066 uint32_t mes_queue_id = entry->src_data[0];
6068 DRM_DEBUG("IH: CP EOP\n");
6070 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
6071 struct amdgpu_mes_queue *queue;
6073 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
6075 spin_lock(&adev->mes.queue_id_lock);
6076 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
6078 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
6079 amdgpu_fence_process(queue->ring);
6081 spin_unlock(&adev->mes.queue_id_lock);
6083 me_id = (entry->ring_id & 0x0c) >> 2;
6084 pipe_id = (entry->ring_id & 0x03) >> 0;
6085 queue_id = (entry->ring_id & 0x70) >> 4;
6090 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6092 amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
6096 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6097 ring = &adev->gfx.compute_ring[i];
6098 /* Per-queue interrupt is supported for MEC starting from VI.
6099 * The interrupt can only be enabled/disabled per pipe instead
6102 if ((ring->me == me_id) &&
6103 (ring->pipe == pipe_id) &&
6104 (ring->queue == queue_id))
6105 amdgpu_fence_process(ring);
6114 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
6115 struct amdgpu_irq_src *source,
6117 enum amdgpu_interrupt_state state)
6120 case AMDGPU_IRQ_STATE_DISABLE:
6121 case AMDGPU_IRQ_STATE_ENABLE:
6122 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
6123 PRIV_REG_INT_ENABLE,
6124 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6133 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
6134 struct amdgpu_irq_src *source,
6136 enum amdgpu_interrupt_state state)
6139 case AMDGPU_IRQ_STATE_DISABLE:
6140 case AMDGPU_IRQ_STATE_ENABLE:
6141 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
6142 PRIV_INSTR_INT_ENABLE,
6143 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
6152 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
6153 struct amdgpu_iv_entry *entry)
6155 u8 me_id, pipe_id, queue_id;
6156 struct amdgpu_ring *ring;
6159 me_id = (entry->ring_id & 0x0c) >> 2;
6160 pipe_id = (entry->ring_id & 0x03) >> 0;
6161 queue_id = (entry->ring_id & 0x70) >> 4;
6165 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
6166 ring = &adev->gfx.gfx_ring[i];
6167 /* we only enabled 1 gfx queue per pipe for now */
6168 if (ring->me == me_id && ring->pipe == pipe_id)
6169 drm_sched_fault(&ring->sched);
6174 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6175 ring = &adev->gfx.compute_ring[i];
6176 if (ring->me == me_id && ring->pipe == pipe_id &&
6177 ring->queue == queue_id)
6178 drm_sched_fault(&ring->sched);
6187 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev,
6188 struct amdgpu_irq_src *source,
6189 struct amdgpu_iv_entry *entry)
6191 DRM_ERROR("Illegal register access in command stream\n");
6192 gfx_v11_0_handle_priv_fault(adev, entry);
6196 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
6197 struct amdgpu_irq_src *source,
6198 struct amdgpu_iv_entry *entry)
6200 DRM_ERROR("Illegal instruction in command stream\n");
6201 gfx_v11_0_handle_priv_fault(adev, entry);
6206 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
6207 struct amdgpu_irq_src *src,
6209 enum amdgpu_interrupt_state state)
6211 uint32_t tmp, target;
6212 struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
6214 target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
6215 target += ring->pipe;
6218 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
6219 if (state == AMDGPU_IRQ_STATE_DISABLE) {
6220 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6221 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6222 GENERIC2_INT_ENABLE, 0);
6223 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6225 tmp = RREG32_SOC15_IP(GC, target);
6226 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6227 GENERIC2_INT_ENABLE, 0);
6228 WREG32_SOC15_IP(GC, target, tmp);
6230 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6231 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6232 GENERIC2_INT_ENABLE, 1);
6233 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6235 tmp = RREG32_SOC15_IP(GC, target);
6236 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6237 GENERIC2_INT_ENABLE, 1);
6238 WREG32_SOC15_IP(GC, target, tmp);
6242 BUG(); /* kiq only support GENERIC2_INT now */
6249 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
6251 const unsigned int gcr_cntl =
6252 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
6253 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
6254 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
6255 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
6256 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
6257 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
6258 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
6259 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
6261 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
6262 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
6263 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
6264 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
6265 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
6266 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6267 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
6268 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6269 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
6272 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
6273 .name = "gfx_v11_0",
6274 .early_init = gfx_v11_0_early_init,
6275 .late_init = gfx_v11_0_late_init,
6276 .sw_init = gfx_v11_0_sw_init,
6277 .sw_fini = gfx_v11_0_sw_fini,
6278 .hw_init = gfx_v11_0_hw_init,
6279 .hw_fini = gfx_v11_0_hw_fini,
6280 .suspend = gfx_v11_0_suspend,
6281 .resume = gfx_v11_0_resume,
6282 .is_idle = gfx_v11_0_is_idle,
6283 .wait_for_idle = gfx_v11_0_wait_for_idle,
6284 .soft_reset = gfx_v11_0_soft_reset,
6285 .check_soft_reset = gfx_v11_0_check_soft_reset,
6286 .set_clockgating_state = gfx_v11_0_set_clockgating_state,
6287 .set_powergating_state = gfx_v11_0_set_powergating_state,
6288 .get_clockgating_state = gfx_v11_0_get_clockgating_state,
6291 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
6292 .type = AMDGPU_RING_TYPE_GFX,
6294 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6295 .support_64bit_ptrs = true,
6296 .vmhub = AMDGPU_GFXHUB_0,
6297 .get_rptr = gfx_v11_0_ring_get_rptr_gfx,
6298 .get_wptr = gfx_v11_0_ring_get_wptr_gfx,
6299 .set_wptr = gfx_v11_0_ring_set_wptr_gfx,
6300 .emit_frame_size = /* totally 242 maximum if 16 IBs */
6302 7 + /* PIPELINE_SYNC */
6303 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6304 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6306 8 + /* FENCE for VM_FLUSH */
6307 20 + /* GDS switch */
6314 8 + 8 + /* FENCE x2 */
6315 8, /* gfx_v11_0_emit_mem_sync */
6316 .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */
6317 .emit_ib = gfx_v11_0_ring_emit_ib_gfx,
6318 .emit_fence = gfx_v11_0_ring_emit_fence,
6319 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6320 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6321 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6322 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6323 .test_ring = gfx_v11_0_ring_test_ring,
6324 .test_ib = gfx_v11_0_ring_test_ib,
6325 .insert_nop = amdgpu_ring_insert_nop,
6326 .pad_ib = amdgpu_ring_generic_pad_ib,
6327 .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
6328 .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec,
6329 .patch_cond_exec = gfx_v11_0_ring_emit_patch_cond_exec,
6330 .preempt_ib = gfx_v11_0_ring_preempt_ib,
6331 .emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl,
6332 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6333 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6334 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6335 .soft_recovery = gfx_v11_0_ring_soft_recovery,
6336 .emit_mem_sync = gfx_v11_0_emit_mem_sync,
6339 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
6340 .type = AMDGPU_RING_TYPE_COMPUTE,
6342 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6343 .support_64bit_ptrs = true,
6344 .vmhub = AMDGPU_GFXHUB_0,
6345 .get_rptr = gfx_v11_0_ring_get_rptr_compute,
6346 .get_wptr = gfx_v11_0_ring_get_wptr_compute,
6347 .set_wptr = gfx_v11_0_ring_set_wptr_compute,
6349 20 + /* gfx_v11_0_ring_emit_gds_switch */
6350 7 + /* gfx_v11_0_ring_emit_hdp_flush */
6351 5 + /* hdp invalidate */
6352 7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6353 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6354 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6355 2 + /* gfx_v11_0_ring_emit_vm_flush */
6356 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */
6357 8, /* gfx_v11_0_emit_mem_sync */
6358 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
6359 .emit_ib = gfx_v11_0_ring_emit_ib_compute,
6360 .emit_fence = gfx_v11_0_ring_emit_fence,
6361 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6362 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6363 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6364 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6365 .test_ring = gfx_v11_0_ring_test_ring,
6366 .test_ib = gfx_v11_0_ring_test_ib,
6367 .insert_nop = amdgpu_ring_insert_nop,
6368 .pad_ib = amdgpu_ring_generic_pad_ib,
6369 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6370 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6371 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6372 .emit_mem_sync = gfx_v11_0_emit_mem_sync,
6375 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
6376 .type = AMDGPU_RING_TYPE_KIQ,
6378 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6379 .support_64bit_ptrs = true,
6380 .vmhub = AMDGPU_GFXHUB_0,
6381 .get_rptr = gfx_v11_0_ring_get_rptr_compute,
6382 .get_wptr = gfx_v11_0_ring_get_wptr_compute,
6383 .set_wptr = gfx_v11_0_ring_set_wptr_compute,
6385 20 + /* gfx_v11_0_ring_emit_gds_switch */
6386 7 + /* gfx_v11_0_ring_emit_hdp_flush */
6387 5 + /*hdp invalidate */
6388 7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6389 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6390 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6391 2 + /* gfx_v11_0_ring_emit_vm_flush */
6392 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6393 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
6394 .emit_ib = gfx_v11_0_ring_emit_ib_compute,
6395 .emit_fence = gfx_v11_0_ring_emit_fence_kiq,
6396 .test_ring = gfx_v11_0_ring_test_ring,
6397 .test_ib = gfx_v11_0_ring_test_ib,
6398 .insert_nop = amdgpu_ring_insert_nop,
6399 .pad_ib = amdgpu_ring_generic_pad_ib,
6400 .emit_rreg = gfx_v11_0_ring_emit_rreg,
6401 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6402 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6403 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6406 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
6410 adev->gfx.kiq.ring.funcs = &gfx_v11_0_ring_funcs_kiq;
6412 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6413 adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx;
6415 for (i = 0; i < adev->gfx.num_compute_rings; i++)
6416 adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute;
6419 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = {
6420 .set = gfx_v11_0_set_eop_interrupt_state,
6421 .process = gfx_v11_0_eop_irq,
6424 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = {
6425 .set = gfx_v11_0_set_priv_reg_fault_state,
6426 .process = gfx_v11_0_priv_reg_irq,
6429 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
6430 .set = gfx_v11_0_set_priv_inst_fault_state,
6431 .process = gfx_v11_0_priv_inst_irq,
6434 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
6436 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6437 adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs;
6439 adev->gfx.priv_reg_irq.num_types = 1;
6440 adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs;
6442 adev->gfx.priv_inst_irq.num_types = 1;
6443 adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
6446 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev)
6448 if (adev->flags & AMD_IS_APU)
6449 adev->gfx.imu.mode = MISSION_MODE;
6451 adev->gfx.imu.mode = DEBUG_MODE;
6453 adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs;
6456 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev)
6458 adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs;
6461 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev)
6463 unsigned total_cu = adev->gfx.config.max_cu_per_sh *
6464 adev->gfx.config.max_sh_per_se *
6465 adev->gfx.config.max_shader_engines;
6467 adev->gds.gds_size = 0x1000;
6468 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
6469 adev->gds.gws_size = 64;
6470 adev->gds.oa_size = 16;
6473 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev)
6475 /* set gfx eng mqd */
6476 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size =
6477 sizeof(struct v11_gfx_mqd);
6478 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd =
6479 gfx_v11_0_gfx_mqd_init;
6480 /* set compute eng mqd */
6481 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
6482 sizeof(struct v11_compute_mqd);
6483 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
6484 gfx_v11_0_compute_mqd_init;
6487 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
6495 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
6496 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
6498 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data);
6501 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
6503 u32 data, wgp_bitmask;
6504 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG);
6505 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG);
6507 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
6508 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
6511 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
6513 return (~data) & wgp_bitmask;
6516 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
6518 u32 wgp_idx, wgp_active_bitmap;
6519 u32 cu_bitmap_per_wgp, cu_active_bitmap;
6521 wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev);
6522 cu_active_bitmap = 0;
6524 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
6525 /* if there is one WGP enabled, it means 2 CUs will be enabled */
6526 cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
6527 if (wgp_active_bitmap & (1 << wgp_idx))
6528 cu_active_bitmap |= cu_bitmap_per_wgp;
6531 return cu_active_bitmap;
6534 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
6535 struct amdgpu_cu_info *cu_info)
6537 int i, j, k, counter, active_cu_number = 0;
6539 unsigned disable_masks[8 * 2];
6541 if (!adev || !cu_info)
6544 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2);
6546 mutex_lock(&adev->grbm_idx_mutex);
6547 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
6548 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
6551 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff);
6553 gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(
6554 adev, disable_masks[i * 2 + j]);
6555 bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev);
6558 * GFX11 could support more than 4 SEs, while the bitmap
6559 * in cu_info struct is 4x4 and ioctl interface struct
6560 * drm_amdgpu_info_device should keep stable.
6561 * So we use last two columns of bitmap to store cu mask for
6562 * SEs 4 to 7, the layout of the bitmap is as below:
6563 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]}
6564 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]}
6565 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]}
6566 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]}
6567 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]}
6568 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]}
6569 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
6570 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
6572 cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap;
6574 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
6580 active_cu_number += counter;
6583 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6584 mutex_unlock(&adev->grbm_idx_mutex);
6586 cu_info->number = active_cu_number;
6587 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
6592 const struct amdgpu_ip_block_version gfx_v11_0_ip_block =
6594 .type = AMD_IP_BLOCK_TYPE_GFX,
6598 .funcs = &gfx_v11_0_ip_funcs,