2 * Copyright 2021 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/delay.h>
24 #include <linux/kernel.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
29 #include "amdgpu_gfx.h"
30 #include "amdgpu_psp.h"
31 #include "amdgpu_smu.h"
32 #include "amdgpu_atomfirmware.h"
33 #include "imu_v11_0.h"
37 #include "gc/gc_11_0_0_offset.h"
38 #include "gc/gc_11_0_0_sh_mask.h"
39 #include "smuio/smuio_13_0_6_offset.h"
40 #include "smuio/smuio_13_0_6_sh_mask.h"
41 #include "navi10_enum.h"
42 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
46 #include "clearstate_gfx11.h"
47 #include "v11_structs.h"
48 #include "gfx_v11_0.h"
49 #include "gfx_v11_0_3.h"
50 #include "nbio_v4_3.h"
51 #include "mes_v11_0.h"
53 #define GFX11_NUM_GFX_RINGS 1
54 #define GFX11_MEC_HPD_SIZE 2048
56 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
57 #define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388
59 #define regCGTT_WD_CLK_CTRL 0x5086
60 #define regCGTT_WD_CLK_CTRL_BASE_IDX 1
61 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1 0x4e7e
62 #define regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1_BASE_IDX 1
64 MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin");
65 MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin");
66 MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin");
67 MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin");
68 MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin");
69 MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin");
70 MODULE_FIRMWARE("amdgpu/gc_11_0_1_me.bin");
71 MODULE_FIRMWARE("amdgpu/gc_11_0_1_mec.bin");
72 MODULE_FIRMWARE("amdgpu/gc_11_0_1_rlc.bin");
73 MODULE_FIRMWARE("amdgpu/gc_11_0_2_pfp.bin");
74 MODULE_FIRMWARE("amdgpu/gc_11_0_2_me.bin");
75 MODULE_FIRMWARE("amdgpu/gc_11_0_2_mec.bin");
76 MODULE_FIRMWARE("amdgpu/gc_11_0_2_rlc.bin");
77 MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin");
78 MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin");
79 MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin");
80 MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin");
81 MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin");
82 MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
83 MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
84 MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
86 static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
88 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
89 SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_WD_CLK_CTRL, 0xffff8fff, 0x00000010),
90 SOC15_REG_GOLDEN_VALUE(GC, 0, regCPF_GCR_CNTL, 0x0007ffff, 0x0000c200),
91 SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL3, 0xffff001b, 0x00f01988),
92 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_CL_ENHANCE, 0xf0ffffff, 0x00880007),
93 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_ENHANCE_3, 0xfffffffd, 0x00000008),
94 SOC15_REG_GOLDEN_VALUE(GC, 0, regPA_SC_VRS_SURFACE_CNTL_1, 0xfff891ff, 0x55480100),
95 SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xf7f7ffff, 0x01030000),
96 SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL2, 0xfcffffff, 0x0000000a)
99 #define DEFAULT_SH_MEM_CONFIG \
100 ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
101 (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
102 (3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
104 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev);
105 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev);
106 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev);
107 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev);
108 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev);
109 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev);
110 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev);
111 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
112 struct amdgpu_cu_info *cu_info);
113 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev);
114 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
115 u32 sh_num, u32 instance, int xcc_id);
116 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev);
118 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
119 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
120 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
122 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
123 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
124 uint16_t pasid, uint32_t flush_type,
125 bool all_hub, uint8_t dst_sel);
126 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
127 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
128 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
131 static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
133 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
134 amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
135 PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
136 amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
137 amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
138 amdgpu_ring_write(kiq_ring, 0); /* gws mask lo */
139 amdgpu_ring_write(kiq_ring, 0); /* gws mask hi */
140 amdgpu_ring_write(kiq_ring, 0); /* oac mask */
141 amdgpu_ring_write(kiq_ring, 0); /* gds heap base:0, gds heap size:0 */
144 static void gfx11_kiq_map_queues(struct amdgpu_ring *kiq_ring,
145 struct amdgpu_ring *ring)
147 uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
148 uint64_t wptr_addr = ring->wptr_gpu_addr;
149 uint32_t me = 0, eng_sel = 0;
151 switch (ring->funcs->type) {
152 case AMDGPU_RING_TYPE_COMPUTE:
156 case AMDGPU_RING_TYPE_GFX:
160 case AMDGPU_RING_TYPE_MES:
168 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
169 /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
170 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
171 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
172 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
173 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
174 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
175 PACKET3_MAP_QUEUES_ME((me)) |
176 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
177 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
178 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
179 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
180 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
181 amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
182 amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
183 amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
184 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
187 static void gfx11_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
188 struct amdgpu_ring *ring,
189 enum amdgpu_unmap_queues_action action,
190 u64 gpu_addr, u64 seq)
192 struct amdgpu_device *adev = kiq_ring->adev;
193 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
195 if (adev->enable_mes && !adev->gfx.kiq[0].ring.sched.ready) {
196 amdgpu_mes_unmap_legacy_queue(adev, ring, action, gpu_addr, seq);
200 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
201 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
202 PACKET3_UNMAP_QUEUES_ACTION(action) |
203 PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
204 PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
205 PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
206 amdgpu_ring_write(kiq_ring,
207 PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
209 if (action == PREEMPT_QUEUES_NO_UNMAP) {
210 amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
211 amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
212 amdgpu_ring_write(kiq_ring, seq);
214 amdgpu_ring_write(kiq_ring, 0);
215 amdgpu_ring_write(kiq_ring, 0);
216 amdgpu_ring_write(kiq_ring, 0);
220 static void gfx11_kiq_query_status(struct amdgpu_ring *kiq_ring,
221 struct amdgpu_ring *ring,
225 uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
227 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
228 amdgpu_ring_write(kiq_ring,
229 PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
230 PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
231 PACKET3_QUERY_STATUS_COMMAND(2));
232 amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
233 PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
234 PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
235 amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
236 amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
237 amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
238 amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
241 static void gfx11_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
242 uint16_t pasid, uint32_t flush_type,
245 gfx_v11_0_ring_invalidate_tlbs(kiq_ring, pasid, flush_type, all_hub, 1);
248 static const struct kiq_pm4_funcs gfx_v11_0_kiq_pm4_funcs = {
249 .kiq_set_resources = gfx11_kiq_set_resources,
250 .kiq_map_queues = gfx11_kiq_map_queues,
251 .kiq_unmap_queues = gfx11_kiq_unmap_queues,
252 .kiq_query_status = gfx11_kiq_query_status,
253 .kiq_invalidate_tlbs = gfx11_kiq_invalidate_tlbs,
254 .set_resources_size = 8,
255 .map_queues_size = 7,
256 .unmap_queues_size = 6,
257 .query_status_size = 7,
258 .invalidate_tlbs_size = 2,
261 static void gfx_v11_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
263 adev->gfx.kiq[0].pmf = &gfx_v11_0_kiq_pm4_funcs;
266 static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
268 switch (adev->ip_versions[GC_HWIP][0]) {
269 case IP_VERSION(11, 0, 1):
270 case IP_VERSION(11, 0, 4):
271 soc15_program_register_sequence(adev,
272 golden_settings_gc_11_0_1,
273 (const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
280 static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
281 bool wc, uint32_t reg, uint32_t val)
283 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
284 amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
285 WRITE_DATA_DST_SEL(0) | (wc ? WR_CONFIRM : 0));
286 amdgpu_ring_write(ring, reg);
287 amdgpu_ring_write(ring, 0);
288 amdgpu_ring_write(ring, val);
291 static void gfx_v11_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
292 int mem_space, int opt, uint32_t addr0,
293 uint32_t addr1, uint32_t ref, uint32_t mask,
296 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
297 amdgpu_ring_write(ring,
298 /* memory (1) or register (0) */
299 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
300 WAIT_REG_MEM_OPERATION(opt) | /* wait */
301 WAIT_REG_MEM_FUNCTION(3) | /* equal */
302 WAIT_REG_MEM_ENGINE(eng_sel)));
305 BUG_ON(addr0 & 0x3); /* Dword align */
306 amdgpu_ring_write(ring, addr0);
307 amdgpu_ring_write(ring, addr1);
308 amdgpu_ring_write(ring, ref);
309 amdgpu_ring_write(ring, mask);
310 amdgpu_ring_write(ring, inv); /* poll interval */
313 static int gfx_v11_0_ring_test_ring(struct amdgpu_ring *ring)
315 struct amdgpu_device *adev = ring->adev;
316 uint32_t scratch = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
321 WREG32(scratch, 0xCAFEDEAD);
322 r = amdgpu_ring_alloc(ring, 5);
324 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
329 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) {
330 gfx_v11_0_ring_emit_wreg(ring, scratch, 0xDEADBEEF);
332 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
333 amdgpu_ring_write(ring, scratch -
334 PACKET3_SET_UCONFIG_REG_START);
335 amdgpu_ring_write(ring, 0xDEADBEEF);
337 amdgpu_ring_commit(ring);
339 for (i = 0; i < adev->usec_timeout; i++) {
340 tmp = RREG32(scratch);
341 if (tmp == 0xDEADBEEF)
343 if (amdgpu_emu_mode == 1)
349 if (i >= adev->usec_timeout)
354 static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
356 struct amdgpu_device *adev = ring->adev;
358 struct dma_fence *f = NULL;
361 volatile uint32_t *cpu_ptr;
364 /* MES KIQ fw hasn't indirect buffer support for now */
365 if (adev->enable_mes_kiq &&
366 ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
369 memset(&ib, 0, sizeof(ib));
371 if (ring->is_mes_queue) {
372 uint32_t padding, offset;
374 offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
375 padding = amdgpu_mes_ctx_get_offs(ring,
376 AMDGPU_MES_CTX_PADDING_OFFS);
378 ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
379 ib.ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
381 gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, padding);
382 cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, padding);
383 *cpu_ptr = cpu_to_le32(0xCAFEDEAD);
385 r = amdgpu_device_wb_get(adev, &index);
389 gpu_addr = adev->wb.gpu_addr + (index * 4);
390 adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
391 cpu_ptr = &adev->wb.wb[index];
393 r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
395 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
400 ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
401 ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
402 ib.ptr[2] = lower_32_bits(gpu_addr);
403 ib.ptr[3] = upper_32_bits(gpu_addr);
404 ib.ptr[4] = 0xDEADBEEF;
407 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
411 r = dma_fence_wait_timeout(f, false, timeout);
419 if (le32_to_cpu(*cpu_ptr) == 0xDEADBEEF)
424 if (!ring->is_mes_queue)
425 amdgpu_ib_free(adev, &ib, NULL);
428 if (!ring->is_mes_queue)
429 amdgpu_device_wb_free(adev, index);
433 static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
435 amdgpu_ucode_release(&adev->gfx.pfp_fw);
436 amdgpu_ucode_release(&adev->gfx.me_fw);
437 amdgpu_ucode_release(&adev->gfx.rlc_fw);
438 amdgpu_ucode_release(&adev->gfx.mec_fw);
440 kfree(adev->gfx.rlc.register_list_format);
443 static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
445 const struct psp_firmware_header_v1_0 *toc_hdr;
449 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix);
450 err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
454 toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
455 adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
456 adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
457 adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
458 adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
459 le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
462 amdgpu_ucode_release(&adev->psp.toc_fw);
466 static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
468 switch (adev->ip_versions[GC_HWIP][0]) {
469 case IP_VERSION(11, 0, 0):
470 case IP_VERSION(11, 0, 2):
471 case IP_VERSION(11, 0, 3):
472 if ((adev->gfx.me_fw_version >= 1505) &&
473 (adev->gfx.pfp_fw_version >= 1600) &&
474 (adev->gfx.mec_fw_version >= 512))
475 adev->gfx.cp_gfx_shadow = true;
478 adev->gfx.cp_gfx_shadow = false;
483 static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
486 char ucode_prefix[30];
488 const struct rlc_firmware_header_v2_0 *rlc_hdr;
489 uint16_t version_major;
490 uint16_t version_minor;
494 amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
496 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix);
497 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
500 /* check pfp fw hdr version to decide if enable rs64 for gfx11.*/
501 adev->gfx.rs64_enable = amdgpu_ucode_hdr_version(
502 (union amdgpu_firmware_header *)
503 adev->gfx.pfp_fw->data, 2, 0);
504 if (adev->gfx.rs64_enable) {
505 dev_info(adev->dev, "CP RS64 enable\n");
506 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP);
507 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK);
508 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK);
510 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
513 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
514 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
517 if (adev->gfx.rs64_enable) {
518 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME);
519 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK);
520 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK);
522 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
525 if (!amdgpu_sriov_vf(adev)) {
526 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
527 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
530 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
531 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
532 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
533 err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
538 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
539 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
542 if (adev->gfx.rs64_enable) {
543 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC);
544 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK);
545 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK);
546 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK);
547 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK);
549 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
550 amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
553 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
554 err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix);
556 /* only one MEC for gfx 11.0.0. */
557 adev->gfx.mec2_fw = NULL;
559 gfx_v11_0_check_fw_cp_gfx_shadow(adev);
562 amdgpu_ucode_release(&adev->gfx.pfp_fw);
563 amdgpu_ucode_release(&adev->gfx.me_fw);
564 amdgpu_ucode_release(&adev->gfx.rlc_fw);
565 amdgpu_ucode_release(&adev->gfx.mec_fw);
571 static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
574 const struct cs_section_def *sect = NULL;
575 const struct cs_extent_def *ext = NULL;
577 /* begin clear state */
579 /* context control state */
582 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
583 for (ext = sect->section; ext->extent != NULL; ++ext) {
584 if (sect->id == SECT_CONTEXT)
585 count += 2 + ext->reg_count;
591 /* set PA_SC_TILE_STEERING_OVERRIDE */
593 /* end clear state */
601 static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
602 volatile u32 *buffer)
605 const struct cs_section_def *sect = NULL;
606 const struct cs_extent_def *ext = NULL;
609 if (adev->gfx.rlc.cs_data == NULL)
614 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
615 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
617 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
618 buffer[count++] = cpu_to_le32(0x80000000);
619 buffer[count++] = cpu_to_le32(0x80000000);
621 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
622 for (ext = sect->section; ext->extent != NULL; ++ext) {
623 if (sect->id == SECT_CONTEXT) {
625 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
626 buffer[count++] = cpu_to_le32(ext->reg_index -
627 PACKET3_SET_CONTEXT_REG_START);
628 for (i = 0; i < ext->reg_count; i++)
629 buffer[count++] = cpu_to_le32(ext->extent[i]);
637 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
638 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
639 buffer[count++] = cpu_to_le32(ctx_reg_offset);
640 buffer[count++] = cpu_to_le32(adev->gfx.config.pa_sc_tile_steering_override);
642 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
643 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
645 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
646 buffer[count++] = cpu_to_le32(0);
649 static void gfx_v11_0_rlc_fini(struct amdgpu_device *adev)
651 /* clear state block */
652 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
653 &adev->gfx.rlc.clear_state_gpu_addr,
654 (void **)&adev->gfx.rlc.cs_ptr);
656 /* jump table block */
657 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
658 &adev->gfx.rlc.cp_table_gpu_addr,
659 (void **)&adev->gfx.rlc.cp_table_ptr);
662 static void gfx_v11_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
664 struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
666 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
667 reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
668 reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG1);
669 reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG2);
670 reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG3);
671 reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_CNTL);
672 reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX);
673 reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, regRLC_SPARE_INT_0);
674 adev->gfx.rlc.rlcg_reg_access_supported = true;
677 static int gfx_v11_0_rlc_init(struct amdgpu_device *adev)
679 const struct cs_section_def *cs_data;
682 adev->gfx.rlc.cs_data = gfx11_cs_data;
684 cs_data = adev->gfx.rlc.cs_data;
687 /* init clear state block */
688 r = amdgpu_gfx_rlc_init_csb(adev);
693 /* init spm vmid with 0xf */
694 if (adev->gfx.rlc.funcs->update_spm_vmid)
695 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
700 static void gfx_v11_0_mec_fini(struct amdgpu_device *adev)
702 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
703 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
704 amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
707 static void gfx_v11_0_me_init(struct amdgpu_device *adev)
709 bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
711 amdgpu_gfx_graphics_queue_acquire(adev);
714 static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
720 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
722 /* take ownership of the relevant compute queues */
723 amdgpu_gfx_compute_queue_acquire(adev);
724 mec_hpd_size = adev->gfx.num_compute_rings * GFX11_MEC_HPD_SIZE;
727 r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
728 AMDGPU_GEM_DOMAIN_GTT,
729 &adev->gfx.mec.hpd_eop_obj,
730 &adev->gfx.mec.hpd_eop_gpu_addr,
733 dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
734 gfx_v11_0_mec_fini(adev);
738 memset(hpd, 0, mec_hpd_size);
740 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
741 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
747 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t wave, uint32_t address)
749 WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
750 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
751 (address << SQ_IND_INDEX__INDEX__SHIFT));
752 return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
755 static void wave_read_regs(struct amdgpu_device *adev, uint32_t wave,
756 uint32_t thread, uint32_t regno,
757 uint32_t num, uint32_t *out)
759 WREG32_SOC15(GC, 0, regSQ_IND_INDEX,
760 (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
761 (regno << SQ_IND_INDEX__INDEX__SHIFT) |
762 (thread << SQ_IND_INDEX__WORKITEM_ID__SHIFT) |
763 (SQ_IND_INDEX__AUTO_INCR_MASK));
765 *(out++) = RREG32_SOC15(GC, 0, regSQ_IND_DATA);
768 static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields)
770 /* in gfx11 the SIMD_ID is specified as part of the INSTANCE
771 * field when performing a select_se_sh so it should be
775 /* type 3 wave data */
776 dst[(*no_fields)++] = 3;
777 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
778 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
779 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
780 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_LO);
781 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_EXEC_HI);
782 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID1);
783 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_HW_ID2);
784 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_GPR_ALLOC);
785 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_LDS_ALLOC);
786 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_TRAPSTS);
787 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS);
788 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
789 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
790 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
791 dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
794 static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
795 uint32_t wave, uint32_t start,
796 uint32_t size, uint32_t *dst)
801 adev, wave, 0, start + SQIND_WAVE_SGPRS_OFFSET, size,
805 static void gfx_v11_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
806 uint32_t wave, uint32_t thread,
807 uint32_t start, uint32_t size,
812 start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
815 static void gfx_v11_0_select_me_pipe_q(struct amdgpu_device *adev,
816 u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
818 soc21_grbm_select(adev, me, pipe, q, vm);
821 /* all sizes are in bytes */
822 #define MQD_SHADOW_BASE_SIZE 73728
823 #define MQD_SHADOW_BASE_ALIGNMENT 256
824 #define MQD_FWWORKAREA_SIZE 484
825 #define MQD_FWWORKAREA_ALIGNMENT 256
827 static int gfx_v11_0_get_gfx_shadow_info(struct amdgpu_device *adev,
828 struct amdgpu_gfx_shadow_info *shadow_info)
830 if (adev->gfx.cp_gfx_shadow) {
831 shadow_info->shadow_size = MQD_SHADOW_BASE_SIZE;
832 shadow_info->shadow_alignment = MQD_SHADOW_BASE_ALIGNMENT;
833 shadow_info->csa_size = MQD_FWWORKAREA_SIZE;
834 shadow_info->csa_alignment = MQD_FWWORKAREA_ALIGNMENT;
837 memset(shadow_info, 0, sizeof(struct amdgpu_gfx_shadow_info));
842 static const struct amdgpu_gfx_funcs gfx_v11_0_gfx_funcs = {
843 .get_gpu_clock_counter = &gfx_v11_0_get_gpu_clock_counter,
844 .select_se_sh = &gfx_v11_0_select_se_sh,
845 .read_wave_data = &gfx_v11_0_read_wave_data,
846 .read_wave_sgprs = &gfx_v11_0_read_wave_sgprs,
847 .read_wave_vgprs = &gfx_v11_0_read_wave_vgprs,
848 .select_me_pipe_q = &gfx_v11_0_select_me_pipe_q,
849 .update_perfmon_mgcg = &gfx_v11_0_update_perf_clk,
850 .get_gfx_shadow_info = &gfx_v11_0_get_gfx_shadow_info,
853 static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
856 switch (adev->ip_versions[GC_HWIP][0]) {
857 case IP_VERSION(11, 0, 0):
858 case IP_VERSION(11, 0, 2):
859 adev->gfx.config.max_hw_contexts = 8;
860 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
861 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
862 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
863 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
865 case IP_VERSION(11, 0, 3):
866 adev->gfx.ras = &gfx_v11_0_3_ras;
867 adev->gfx.config.max_hw_contexts = 8;
868 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
869 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
870 adev->gfx.config.sc_hiz_tile_fifo_size = 0;
871 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
873 case IP_VERSION(11, 0, 1):
874 case IP_VERSION(11, 0, 4):
875 adev->gfx.config.max_hw_contexts = 8;
876 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
877 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
878 adev->gfx.config.sc_hiz_tile_fifo_size = 0x80;
879 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x300;
889 static int gfx_v11_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
890 int me, int pipe, int queue)
893 struct amdgpu_ring *ring;
894 unsigned int irq_type;
896 ring = &adev->gfx.gfx_ring[ring_id];
902 ring->ring_obj = NULL;
903 ring->use_doorbell = true;
906 ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
908 ring->doorbell_index = adev->doorbell_index.gfx_ring1 << 1;
909 ring->vm_hub = AMDGPU_GFXHUB(0);
910 sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
912 irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
913 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
914 AMDGPU_RING_PRIO_DEFAULT, NULL);
920 static int gfx_v11_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
921 int mec, int pipe, int queue)
925 struct amdgpu_ring *ring;
926 unsigned int hw_prio;
928 ring = &adev->gfx.compute_ring[ring_id];
935 ring->ring_obj = NULL;
936 ring->use_doorbell = true;
937 ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
938 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
939 + (ring_id * GFX11_MEC_HPD_SIZE);
940 ring->vm_hub = AMDGPU_GFXHUB(0);
941 sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
943 irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
944 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
946 hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
947 AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
948 /* type-2 packets are deprecated on MEC, use type-3 instead */
949 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
958 SOC21_FIRMWARE_ID id;
961 } rlc_autoload_info[SOC21_FIRMWARE_ID_MAX];
963 static void gfx_v11_0_parse_rlc_toc(struct amdgpu_device *adev, void *rlc_toc)
965 RLC_TABLE_OF_CONTENT *ucode = rlc_toc;
967 while (ucode && (ucode->id > SOC21_FIRMWARE_ID_INVALID) &&
968 (ucode->id < SOC21_FIRMWARE_ID_MAX)) {
969 rlc_autoload_info[ucode->id].id = ucode->id;
970 rlc_autoload_info[ucode->id].offset = ucode->offset * 4;
971 rlc_autoload_info[ucode->id].size = ucode->size * 4;
977 static uint32_t gfx_v11_0_calc_toc_total_size(struct amdgpu_device *adev)
979 uint32_t total_size = 0;
980 SOC21_FIRMWARE_ID id;
982 gfx_v11_0_parse_rlc_toc(adev, adev->psp.toc.start_addr);
984 for (id = SOC21_FIRMWARE_ID_RLC_G_UCODE; id < SOC21_FIRMWARE_ID_MAX; id++)
985 total_size += rlc_autoload_info[id].size;
987 /* In case the offset in rlc toc ucode is aligned */
988 if (total_size < rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset)
989 total_size = rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].offset +
990 rlc_autoload_info[SOC21_FIRMWARE_ID_MAX-1].size;
995 static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
1000 total_size = gfx_v11_0_calc_toc_total_size(adev);
1002 r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
1003 AMDGPU_GEM_DOMAIN_VRAM |
1004 AMDGPU_GEM_DOMAIN_GTT,
1005 &adev->gfx.rlc.rlc_autoload_bo,
1006 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1007 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1010 dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
1017 static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *adev,
1018 SOC21_FIRMWARE_ID id,
1019 const void *fw_data,
1021 uint32_t *fw_autoload_mask)
1023 uint32_t toc_offset;
1024 uint32_t toc_fw_size;
1025 char *ptr = adev->gfx.rlc.rlc_autoload_ptr;
1027 if (id <= SOC21_FIRMWARE_ID_INVALID || id >= SOC21_FIRMWARE_ID_MAX)
1030 toc_offset = rlc_autoload_info[id].offset;
1031 toc_fw_size = rlc_autoload_info[id].size;
1034 fw_size = toc_fw_size;
1036 if (fw_size > toc_fw_size)
1037 fw_size = toc_fw_size;
1039 memcpy(ptr + toc_offset, fw_data, fw_size);
1041 if (fw_size < toc_fw_size)
1042 memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size);
1044 if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME))
1045 *(uint64_t *)fw_autoload_mask |= 1ULL << id;
1048 static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev,
1049 uint32_t *fw_autoload_mask)
1055 *(uint64_t *)fw_autoload_mask |= 0x1;
1057 DRM_DEBUG("rlc autoload enabled fw: 0x%llx\n", *(uint64_t *)fw_autoload_mask);
1059 data = adev->psp.toc.start_addr;
1060 size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_TOC].size;
1062 toc_ptr = (uint64_t *)data + size / 8 - 1;
1063 *toc_ptr = *(uint64_t *)fw_autoload_mask;
1065 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_TOC,
1066 data, size, fw_autoload_mask);
1069 static void gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(struct amdgpu_device *adev,
1070 uint32_t *fw_autoload_mask)
1072 const __le32 *fw_data;
1074 const struct gfx_firmware_header_v1_0 *cp_hdr;
1075 const struct gfx_firmware_header_v2_0 *cpv2_hdr;
1076 const struct rlc_firmware_header_v2_0 *rlc_hdr;
1077 const struct rlc_firmware_header_v2_2 *rlcv22_hdr;
1078 uint16_t version_major, version_minor;
1080 if (adev->gfx.rs64_enable) {
1082 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1083 adev->gfx.pfp_fw->data;
1085 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1086 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1087 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1088 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP,
1089 fw_data, fw_size, fw_autoload_mask);
1091 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1092 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1093 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1094 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK,
1095 fw_data, fw_size, fw_autoload_mask);
1096 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK,
1097 fw_data, fw_size, fw_autoload_mask);
1099 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1100 adev->gfx.me_fw->data;
1102 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1103 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1104 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1105 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME,
1106 fw_data, fw_size, fw_autoload_mask);
1108 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1109 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1110 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1111 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P0_STACK,
1112 fw_data, fw_size, fw_autoload_mask);
1113 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_ME_P1_STACK,
1114 fw_data, fw_size, fw_autoload_mask);
1116 cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)
1117 adev->gfx.mec_fw->data;
1119 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1120 le32_to_cpu(cpv2_hdr->ucode_offset_bytes));
1121 fw_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1122 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC,
1123 fw_data, fw_size, fw_autoload_mask);
1125 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1126 le32_to_cpu(cpv2_hdr->data_offset_bytes));
1127 fw_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1128 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK,
1129 fw_data, fw_size, fw_autoload_mask);
1130 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK,
1131 fw_data, fw_size, fw_autoload_mask);
1132 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK,
1133 fw_data, fw_size, fw_autoload_mask);
1134 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK,
1135 fw_data, fw_size, fw_autoload_mask);
1138 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1139 adev->gfx.pfp_fw->data;
1140 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
1141 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1142 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1143 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_PFP,
1144 fw_data, fw_size, fw_autoload_mask);
1147 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1148 adev->gfx.me_fw->data;
1149 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
1150 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1151 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
1152 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_ME,
1153 fw_data, fw_size, fw_autoload_mask);
1156 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
1157 adev->gfx.mec_fw->data;
1158 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
1159 le32_to_cpu(cp_hdr->header.ucode_array_offset_bytes));
1160 fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
1161 cp_hdr->jt_size * 4;
1162 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_CP_MEC,
1163 fw_data, fw_size, fw_autoload_mask);
1167 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)
1168 adev->gfx.rlc_fw->data;
1169 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1170 le32_to_cpu(rlc_hdr->header.ucode_array_offset_bytes));
1171 fw_size = le32_to_cpu(rlc_hdr->header.ucode_size_bytes);
1172 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLC_G_UCODE,
1173 fw_data, fw_size, fw_autoload_mask);
1175 version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
1176 version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
1177 if (version_major == 2) {
1178 if (version_minor >= 2) {
1179 rlcv22_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1181 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1182 le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_offset_bytes));
1183 fw_size = le32_to_cpu(rlcv22_hdr->rlc_iram_ucode_size_bytes);
1184 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_UCODE,
1185 fw_data, fw_size, fw_autoload_mask);
1187 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1188 le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_offset_bytes));
1189 fw_size = le32_to_cpu(rlcv22_hdr->rlc_dram_ucode_size_bytes);
1190 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev, SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT,
1191 fw_data, fw_size, fw_autoload_mask);
1196 static void gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(struct amdgpu_device *adev,
1197 uint32_t *fw_autoload_mask)
1199 const __le32 *fw_data;
1201 const struct sdma_firmware_header_v2_0 *sdma_hdr;
1203 sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
1204 adev->sdma.instance[0].fw->data;
1205 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1206 le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes));
1207 fw_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes);
1209 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1210 SOC21_FIRMWARE_ID_SDMA_UCODE_TH0, fw_data, fw_size, fw_autoload_mask);
1212 fw_data = (const __le32 *) (adev->sdma.instance[0].fw->data +
1213 le32_to_cpu(sdma_hdr->ctl_ucode_offset));
1214 fw_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes);
1216 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1217 SOC21_FIRMWARE_ID_SDMA_UCODE_TH1, fw_data, fw_size, fw_autoload_mask);
1220 static void gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(struct amdgpu_device *adev,
1221 uint32_t *fw_autoload_mask)
1223 const __le32 *fw_data;
1225 const struct mes_firmware_header_v1_0 *mes_hdr;
1226 int pipe, ucode_id, data_id;
1228 for (pipe = 0; pipe < 2; pipe++) {
1230 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P0;
1231 data_id = SOC21_FIRMWARE_ID_RS64_MES_P0_STACK;
1233 ucode_id = SOC21_FIRMWARE_ID_RS64_MES_P1;
1234 data_id = SOC21_FIRMWARE_ID_RS64_MES_P1_STACK;
1237 mes_hdr = (const struct mes_firmware_header_v1_0 *)
1238 adev->mes.fw[pipe]->data;
1240 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1241 le32_to_cpu(mes_hdr->mes_ucode_offset_bytes));
1242 fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
1244 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1245 ucode_id, fw_data, fw_size, fw_autoload_mask);
1247 fw_data = (const __le32 *)(adev->mes.fw[pipe]->data +
1248 le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes));
1249 fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
1251 gfx_v11_0_rlc_backdoor_autoload_copy_ucode(adev,
1252 data_id, fw_data, fw_size, fw_autoload_mask);
1256 static int gfx_v11_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev)
1258 uint32_t rlc_g_offset, rlc_g_size;
1260 uint32_t autoload_fw_id[2];
1262 memset(autoload_fw_id, 0, sizeof(uint32_t) * 2);
1264 /* RLC autoload sequence 2: copy ucode */
1265 gfx_v11_0_rlc_backdoor_autoload_copy_sdma_ucode(adev, autoload_fw_id);
1266 gfx_v11_0_rlc_backdoor_autoload_copy_gfx_ucode(adev, autoload_fw_id);
1267 gfx_v11_0_rlc_backdoor_autoload_copy_mes_ucode(adev, autoload_fw_id);
1268 gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(adev, autoload_fw_id);
1270 rlc_g_offset = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].offset;
1271 rlc_g_size = rlc_autoload_info[SOC21_FIRMWARE_ID_RLC_G_UCODE].size;
1272 gpu_addr = adev->gfx.rlc.rlc_autoload_gpu_addr + rlc_g_offset;
1274 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_HI, upper_32_bits(gpu_addr));
1275 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_ADDR_LO, lower_32_bits(gpu_addr));
1277 WREG32_SOC15(GC, 0, regGFX_IMU_RLC_BOOTLOADER_SIZE, rlc_g_size);
1279 /* RLC autoload sequence 3: load IMU fw */
1280 if (adev->gfx.imu.funcs->load_microcode)
1281 adev->gfx.imu.funcs->load_microcode(adev);
1282 /* RLC autoload sequence 4 init IMU fw */
1283 if (adev->gfx.imu.funcs->setup_imu)
1284 adev->gfx.imu.funcs->setup_imu(adev);
1285 if (adev->gfx.imu.funcs->start_imu)
1286 adev->gfx.imu.funcs->start_imu(adev);
1288 /* RLC autoload sequence 5 disable gpa mode */
1289 gfx_v11_0_disable_gpa_mode(adev);
1294 static int gfx_v11_0_sw_init(void *handle)
1296 int i, j, k, r, ring_id = 0;
1297 struct amdgpu_kiq *kiq;
1298 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1300 adev->gfxhub.funcs->init(adev);
1302 switch (adev->ip_versions[GC_HWIP][0]) {
1303 case IP_VERSION(11, 0, 0):
1304 case IP_VERSION(11, 0, 2):
1305 case IP_VERSION(11, 0, 3):
1306 adev->gfx.me.num_me = 1;
1307 adev->gfx.me.num_pipe_per_me = 1;
1308 adev->gfx.me.num_queue_per_pipe = 1;
1309 adev->gfx.mec.num_mec = 2;
1310 adev->gfx.mec.num_pipe_per_mec = 4;
1311 adev->gfx.mec.num_queue_per_pipe = 4;
1313 case IP_VERSION(11, 0, 1):
1314 case IP_VERSION(11, 0, 4):
1315 adev->gfx.me.num_me = 1;
1316 adev->gfx.me.num_pipe_per_me = 1;
1317 adev->gfx.me.num_queue_per_pipe = 1;
1318 adev->gfx.mec.num_mec = 1;
1319 adev->gfx.mec.num_pipe_per_mec = 4;
1320 adev->gfx.mec.num_queue_per_pipe = 4;
1323 adev->gfx.me.num_me = 1;
1324 adev->gfx.me.num_pipe_per_me = 1;
1325 adev->gfx.me.num_queue_per_pipe = 1;
1326 adev->gfx.mec.num_mec = 1;
1327 adev->gfx.mec.num_pipe_per_mec = 4;
1328 adev->gfx.mec.num_queue_per_pipe = 8;
1332 /* Enable CG flag in one VF mode for enabling RLC safe mode enter/exit */
1333 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3) &&
1334 amdgpu_sriov_is_pp_one_vf(adev))
1335 adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG;
1338 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1339 GFX_11_0_0__SRCID__CP_EOP_INTERRUPT,
1340 &adev->gfx.eop_irq);
1344 /* Privileged reg */
1345 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1346 GFX_11_0_0__SRCID__CP_PRIV_REG_FAULT,
1347 &adev->gfx.priv_reg_irq);
1351 /* Privileged inst */
1352 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
1353 GFX_11_0_0__SRCID__CP_PRIV_INSTR_FAULT,
1354 &adev->gfx.priv_inst_irq);
1359 r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
1360 GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT,
1361 &adev->gfx.rlc_gc_fed_irq);
1365 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1367 if (adev->gfx.imu.funcs) {
1368 if (adev->gfx.imu.funcs->init_microcode) {
1369 r = adev->gfx.imu.funcs->init_microcode(adev);
1371 DRM_ERROR("Failed to load imu firmware!\n");
1375 gfx_v11_0_me_init(adev);
1377 r = gfx_v11_0_rlc_init(adev);
1379 DRM_ERROR("Failed to init rlc BOs!\n");
1383 r = gfx_v11_0_mec_init(adev);
1385 DRM_ERROR("Failed to init MEC BOs!\n");
1389 /* set up the gfx ring */
1390 for (i = 0; i < adev->gfx.me.num_me; i++) {
1391 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
1392 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
1393 if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
1396 r = gfx_v11_0_gfx_ring_init(adev, ring_id,
1406 /* set up the compute queues - allocate horizontally across pipes */
1407 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1408 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1409 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
1410 if (!amdgpu_gfx_is_mec_queue_enabled(adev, 0, i,
1414 r = gfx_v11_0_compute_ring_init(adev, ring_id,
1424 if (!adev->enable_mes_kiq) {
1425 r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0);
1427 DRM_ERROR("Failed to init KIQ BOs!\n");
1431 kiq = &adev->gfx.kiq[0];
1432 r = amdgpu_gfx_kiq_init_ring(adev, &kiq->ring, &kiq->irq, 0);
1437 r = amdgpu_gfx_mqd_sw_init(adev, sizeof(struct v11_compute_mqd), 0);
1441 /* allocate visible FB for rlc auto-loading fw */
1442 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
1443 r = gfx_v11_0_rlc_autoload_buffer_init(adev);
1448 r = gfx_v11_0_gpu_early_init(adev);
1452 if (amdgpu_gfx_ras_sw_init(adev)) {
1453 dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
1460 static void gfx_v11_0_pfp_fini(struct amdgpu_device *adev)
1462 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_obj,
1463 &adev->gfx.pfp.pfp_fw_gpu_addr,
1464 (void **)&adev->gfx.pfp.pfp_fw_ptr);
1466 amdgpu_bo_free_kernel(&adev->gfx.pfp.pfp_fw_data_obj,
1467 &adev->gfx.pfp.pfp_fw_data_gpu_addr,
1468 (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
1471 static void gfx_v11_0_me_fini(struct amdgpu_device *adev)
1473 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_obj,
1474 &adev->gfx.me.me_fw_gpu_addr,
1475 (void **)&adev->gfx.me.me_fw_ptr);
1477 amdgpu_bo_free_kernel(&adev->gfx.me.me_fw_data_obj,
1478 &adev->gfx.me.me_fw_data_gpu_addr,
1479 (void **)&adev->gfx.me.me_fw_data_ptr);
1482 static void gfx_v11_0_rlc_autoload_buffer_fini(struct amdgpu_device *adev)
1484 amdgpu_bo_free_kernel(&adev->gfx.rlc.rlc_autoload_bo,
1485 &adev->gfx.rlc.rlc_autoload_gpu_addr,
1486 (void **)&adev->gfx.rlc.rlc_autoload_ptr);
1489 static int gfx_v11_0_sw_fini(void *handle)
1492 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1494 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
1495 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
1496 for (i = 0; i < adev->gfx.num_compute_rings; i++)
1497 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1499 amdgpu_gfx_mqd_sw_fini(adev, 0);
1501 if (!adev->enable_mes_kiq) {
1502 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
1503 amdgpu_gfx_kiq_fini(adev, 0);
1506 gfx_v11_0_pfp_fini(adev);
1507 gfx_v11_0_me_fini(adev);
1508 gfx_v11_0_rlc_fini(adev);
1509 gfx_v11_0_mec_fini(adev);
1511 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
1512 gfx_v11_0_rlc_autoload_buffer_fini(adev);
1514 gfx_v11_0_free_microcode(adev);
1519 static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
1520 u32 sh_num, u32 instance, int xcc_id)
1524 if (instance == 0xffffffff)
1525 data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
1526 INSTANCE_BROADCAST_WRITES, 1);
1528 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
1531 if (se_num == 0xffffffff)
1532 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
1535 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
1537 if (sh_num == 0xffffffff)
1538 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_BROADCAST_WRITES,
1541 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SA_INDEX, sh_num);
1543 WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
1546 static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev)
1548 u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
1550 gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE);
1551 gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
1552 CC_GC_SA_UNIT_DISABLE,
1554 gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE);
1555 gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
1556 GC_USER_SA_UNIT_DISABLE,
1558 sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
1559 adev->gfx.config.max_shader_engines);
1561 return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
1564 static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
1566 u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
1569 gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
1570 gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
1571 CC_RB_BACKEND_DISABLE,
1573 gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
1574 gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
1575 GC_USER_RB_BACKEND_DISABLE,
1577 rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
1578 adev->gfx.config.max_shader_engines);
1580 return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
1583 static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
1585 u32 rb_bitmap_width_per_sa;
1587 u32 active_sa_bitmap;
1588 u32 global_active_rb_bitmap;
1589 u32 active_rb_bitmap = 0;
1592 /* query sa bitmap from SA_UNIT_DISABLE registers */
1593 active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev);
1594 /* query rb bitmap from RB_BACKEND_DISABLE registers */
1595 global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev);
1597 /* generate active rb bitmap according to active sa bitmap */
1598 max_sa = adev->gfx.config.max_shader_engines *
1599 adev->gfx.config.max_sh_per_se;
1600 rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
1601 adev->gfx.config.max_sh_per_se;
1602 for (i = 0; i < max_sa; i++) {
1603 if (active_sa_bitmap & (1 << i))
1604 active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
1607 active_rb_bitmap |= global_active_rb_bitmap;
1608 adev->gfx.config.backend_enable_mask = active_rb_bitmap;
1609 adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
1612 #define DEFAULT_SH_MEM_BASES (0x6000)
1613 #define LDS_APP_BASE 0x1
1614 #define SCRATCH_APP_BASE 0x2
1616 static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev)
1619 uint32_t sh_mem_bases;
1623 * Configure apertures:
1624 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1625 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1626 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1628 sh_mem_bases = (LDS_APP_BASE << SH_MEM_BASES__SHARED_BASE__SHIFT) |
1631 mutex_lock(&adev->srbm_mutex);
1632 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1633 soc21_grbm_select(adev, 0, 0, 0, i);
1634 /* CP and shaders */
1635 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1636 WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases);
1638 /* Enable trap for each kfd vmid. */
1639 data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL);
1640 data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1641 WREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL, data);
1643 soc21_grbm_select(adev, 0, 0, 0, 0);
1644 mutex_unlock(&adev->srbm_mutex);
1646 /* Initialize all compute VMIDs to have no GDS, GWS, or OA
1647 acccess. These should be enabled by FW for target VMIDs. */
1648 for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1649 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * i, 0);
1650 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * i, 0);
1651 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, i, 0);
1652 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, i, 0);
1656 static void gfx_v11_0_init_gds_vmid(struct amdgpu_device *adev)
1661 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1662 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1663 * the driver can enable them for graphics. VMID0 should maintain
1664 * access so that HWS firmware can save/restore entries.
1666 for (vmid = 1; vmid < 16; vmid++) {
1667 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_BASE, 2 * vmid, 0);
1668 WREG32_SOC15_OFFSET(GC, 0, regGDS_VMID0_SIZE, 2 * vmid, 0);
1669 WREG32_SOC15_OFFSET(GC, 0, regGDS_GWS_VMID0, vmid, 0);
1670 WREG32_SOC15_OFFSET(GC, 0, regGDS_OA_VMID0, vmid, 0);
1674 static void gfx_v11_0_tcp_harvest(struct amdgpu_device *adev)
1676 /* TODO: harvest feature to be added later. */
1679 static void gfx_v11_0_get_tcc_info(struct amdgpu_device *adev)
1681 /* TCCs are global (not instanced). */
1682 uint32_t tcc_disable = RREG32_SOC15(GC, 0, regCGTS_TCC_DISABLE) |
1683 RREG32_SOC15(GC, 0, regCGTS_USER_TCC_DISABLE);
1685 adev->gfx.config.tcc_disabled_mask =
1686 REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
1687 (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
1690 static void gfx_v11_0_constants_init(struct amdgpu_device *adev)
1695 if (!amdgpu_sriov_vf(adev))
1696 WREG32_FIELD15_PREREG(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
1698 gfx_v11_0_setup_rb(adev);
1699 gfx_v11_0_get_cu_info(adev, &adev->gfx.cu_info);
1700 gfx_v11_0_get_tcc_info(adev);
1701 adev->gfx.config.pa_sc_tile_steering_override = 0;
1703 /* Set whether texture coordinate truncation is conformant. */
1704 tmp = RREG32_SOC15(GC, 0, regTA_CNTL2);
1705 adev->gfx.config.ta_cntl2_truncate_coord_mode =
1706 REG_GET_FIELD(tmp, TA_CNTL2, TRUNCATE_COORD_MODE);
1708 /* XXX SH_MEM regs */
1709 /* where to put LDS, scratch, GPUVM in FSA64 space */
1710 mutex_lock(&adev->srbm_mutex);
1711 for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1712 soc21_grbm_select(adev, 0, 0, 0, i);
1713 /* CP and shaders */
1714 WREG32_SOC15(GC, 0, regSH_MEM_CONFIG, DEFAULT_SH_MEM_CONFIG);
1716 tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1717 (adev->gmc.private_aperture_start >> 48));
1718 tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1719 (adev->gmc.shared_aperture_start >> 48));
1720 WREG32_SOC15(GC, 0, regSH_MEM_BASES, tmp);
1723 soc21_grbm_select(adev, 0, 0, 0, 0);
1725 mutex_unlock(&adev->srbm_mutex);
1727 gfx_v11_0_init_compute_vmid(adev);
1728 gfx_v11_0_init_gds_vmid(adev);
1731 static void gfx_v11_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1736 if (amdgpu_sriov_vf(adev))
1739 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0);
1741 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE,
1743 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE,
1745 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE,
1747 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE,
1750 WREG32_SOC15(GC, 0, regCP_INT_CNTL_RING0, tmp);
1753 static int gfx_v11_0_init_csb(struct amdgpu_device *adev)
1755 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
1757 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_HI,
1758 adev->gfx.rlc.clear_state_gpu_addr >> 32);
1759 WREG32_SOC15(GC, 0, regRLC_CSIB_ADDR_LO,
1760 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
1761 WREG32_SOC15(GC, 0, regRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
1766 static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev)
1768 u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL);
1770 tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
1771 WREG32_SOC15(GC, 0, regRLC_CNTL, tmp);
1774 static void gfx_v11_0_rlc_reset(struct amdgpu_device *adev)
1776 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
1778 WREG32_FIELD15_PREREG(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
1782 static void gfx_v11_0_rlc_smu_handshake_cntl(struct amdgpu_device *adev,
1785 uint32_t rlc_pg_cntl;
1787 rlc_pg_cntl = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
1790 /* RLC_PG_CNTL[23] = 0 (default)
1791 * RLC will wait for handshake acks with SMU
1792 * GFXOFF will be enabled
1793 * RLC_PG_CNTL[23] = 1
1794 * RLC will not issue any message to SMU
1795 * hence no handshake between SMU & RLC
1796 * GFXOFF will be disabled
1798 rlc_pg_cntl |= RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1800 rlc_pg_cntl &= ~RLC_PG_CNTL__SMU_HANDSHAKE_DISABLE_MASK;
1801 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, rlc_pg_cntl);
1804 static void gfx_v11_0_rlc_start(struct amdgpu_device *adev)
1806 /* TODO: enable rlc & smu handshake until smu
1807 * and gfxoff feature works as expected */
1808 if (!(amdgpu_pp_feature_mask & PP_GFXOFF_MASK))
1809 gfx_v11_0_rlc_smu_handshake_cntl(adev, false);
1811 WREG32_FIELD15_PREREG(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
1815 static void gfx_v11_0_rlc_enable_srm(struct amdgpu_device *adev)
1819 /* enable Save Restore Machine */
1820 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL));
1821 tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
1822 tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
1823 WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_SRM_CNTL), tmp);
1826 static void gfx_v11_0_load_rlcg_microcode(struct amdgpu_device *adev)
1828 const struct rlc_firmware_header_v2_0 *hdr;
1829 const __le32 *fw_data;
1830 unsigned i, fw_size;
1832 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1833 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1834 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1835 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1837 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR,
1838 RLCG_UCODE_LOADING_START_ADDRESS);
1840 for (i = 0; i < fw_size; i++)
1841 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_DATA,
1842 le32_to_cpup(fw_data++));
1844 WREG32_SOC15(GC, 0, regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1847 static void gfx_v11_0_load_rlc_iram_dram_microcode(struct amdgpu_device *adev)
1849 const struct rlc_firmware_header_v2_2 *hdr;
1850 const __le32 *fw_data;
1851 unsigned i, fw_size;
1854 hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
1856 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1857 le32_to_cpu(hdr->rlc_iram_ucode_offset_bytes));
1858 fw_size = le32_to_cpu(hdr->rlc_iram_ucode_size_bytes) / 4;
1860 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, 0);
1862 for (i = 0; i < fw_size; i++) {
1863 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1865 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_DATA,
1866 le32_to_cpup(fw_data++));
1869 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1871 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1872 le32_to_cpu(hdr->rlc_dram_ucode_offset_bytes));
1873 fw_size = le32_to_cpu(hdr->rlc_dram_ucode_size_bytes) / 4;
1875 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_ADDR, 0);
1876 for (i = 0; i < fw_size; i++) {
1877 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1879 WREG32_SOC15(GC, 0, regRLC_LX6_DRAM_DATA,
1880 le32_to_cpup(fw_data++));
1883 WREG32_SOC15(GC, 0, regRLC_LX6_IRAM_ADDR, adev->gfx.rlc_fw_version);
1885 tmp = RREG32_SOC15(GC, 0, regRLC_LX6_CNTL);
1886 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, PDEBUG_ENABLE, 1);
1887 tmp = REG_SET_FIELD(tmp, RLC_LX6_CNTL, BRESET, 0);
1888 WREG32_SOC15(GC, 0, regRLC_LX6_CNTL, tmp);
1891 static void gfx_v11_0_load_rlcp_rlcv_microcode(struct amdgpu_device *adev)
1893 const struct rlc_firmware_header_v2_3 *hdr;
1894 const __le32 *fw_data;
1895 unsigned i, fw_size;
1898 hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
1900 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1901 le32_to_cpu(hdr->rlcp_ucode_offset_bytes));
1902 fw_size = le32_to_cpu(hdr->rlcp_ucode_size_bytes) / 4;
1904 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, 0);
1906 for (i = 0; i < fw_size; i++) {
1907 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1909 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_DATA,
1910 le32_to_cpup(fw_data++));
1913 WREG32_SOC15(GC, 0, regRLC_PACE_UCODE_ADDR, adev->gfx.rlc_fw_version);
1915 tmp = RREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE);
1916 tmp = REG_SET_FIELD(tmp, RLC_GPM_THREAD_ENABLE, THREAD1_ENABLE, 1);
1917 WREG32_SOC15(GC, 0, regRLC_GPM_THREAD_ENABLE, tmp);
1919 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1920 le32_to_cpu(hdr->rlcv_ucode_offset_bytes));
1921 fw_size = le32_to_cpu(hdr->rlcv_ucode_size_bytes) / 4;
1923 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, 0);
1925 for (i = 0; i < fw_size; i++) {
1926 if ((amdgpu_emu_mode == 1) && (i % 100 == 99))
1928 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_DATA,
1929 le32_to_cpup(fw_data++));
1932 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_UCODE_ADDR, adev->gfx.rlc_fw_version);
1934 tmp = RREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL);
1935 tmp = REG_SET_FIELD(tmp, RLC_GPU_IOV_F32_CNTL, ENABLE, 1);
1936 WREG32_SOC15(GC, 0, regRLC_GPU_IOV_F32_CNTL, tmp);
1939 static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
1941 const struct rlc_firmware_header_v2_0 *hdr;
1942 uint16_t version_major;
1943 uint16_t version_minor;
1945 if (!adev->gfx.rlc_fw)
1948 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1949 amdgpu_ucode_print_rlc_hdr(&hdr->header);
1951 version_major = le16_to_cpu(hdr->header.header_version_major);
1952 version_minor = le16_to_cpu(hdr->header.header_version_minor);
1954 if (version_major == 2) {
1955 gfx_v11_0_load_rlcg_microcode(adev);
1956 if (amdgpu_dpm == 1) {
1957 if (version_minor >= 2)
1958 gfx_v11_0_load_rlc_iram_dram_microcode(adev);
1959 if (version_minor == 3)
1960 gfx_v11_0_load_rlcp_rlcv_microcode(adev);
1969 static int gfx_v11_0_rlc_resume(struct amdgpu_device *adev)
1973 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1974 gfx_v11_0_init_csb(adev);
1976 if (!amdgpu_sriov_vf(adev)) /* enable RLC SRM */
1977 gfx_v11_0_rlc_enable_srm(adev);
1979 if (amdgpu_sriov_vf(adev)) {
1980 gfx_v11_0_init_csb(adev);
1984 adev->gfx.rlc.funcs->stop(adev);
1987 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, 0);
1990 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, 0);
1992 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
1993 /* legacy rlc firmware loading */
1994 r = gfx_v11_0_rlc_load_microcode(adev);
1999 gfx_v11_0_init_csb(adev);
2001 adev->gfx.rlc.funcs->start(adev);
2006 static int gfx_v11_0_config_me_cache(struct amdgpu_device *adev, uint64_t addr)
2008 uint32_t usec_timeout = 50000; /* wait for 50ms */
2012 /* Trigger an invalidation of the L1 instruction caches */
2013 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2014 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2015 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2017 /* Wait for invalidation complete */
2018 for (i = 0; i < usec_timeout; i++) {
2019 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2020 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2021 INVALIDATE_CACHE_COMPLETE))
2026 if (i >= usec_timeout) {
2027 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2031 if (amdgpu_emu_mode == 1)
2032 adev->hdp.funcs->flush_hdp(adev, NULL);
2034 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2035 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2036 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2037 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2038 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2039 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2041 /* Program me ucode address into intruction cache address register */
2042 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2043 lower_32_bits(addr) & 0xFFFFF000);
2044 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2045 upper_32_bits(addr));
2050 static int gfx_v11_0_config_pfp_cache(struct amdgpu_device *adev, uint64_t addr)
2052 uint32_t usec_timeout = 50000; /* wait for 50ms */
2056 /* Trigger an invalidation of the L1 instruction caches */
2057 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2058 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2059 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2061 /* Wait for invalidation complete */
2062 for (i = 0; i < usec_timeout; i++) {
2063 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2064 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2065 INVALIDATE_CACHE_COMPLETE))
2070 if (i >= usec_timeout) {
2071 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2075 if (amdgpu_emu_mode == 1)
2076 adev->hdp.funcs->flush_hdp(adev, NULL);
2078 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2079 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2080 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2081 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2082 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2083 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2085 /* Program pfp ucode address into intruction cache address register */
2086 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2087 lower_32_bits(addr) & 0xFFFFF000);
2088 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2089 upper_32_bits(addr));
2094 static int gfx_v11_0_config_mec_cache(struct amdgpu_device *adev, uint64_t addr)
2096 uint32_t usec_timeout = 50000; /* wait for 50ms */
2100 /* Trigger an invalidation of the L1 instruction caches */
2101 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2102 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2104 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2106 /* Wait for invalidation complete */
2107 for (i = 0; i < usec_timeout; i++) {
2108 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2109 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2110 INVALIDATE_CACHE_COMPLETE))
2115 if (i >= usec_timeout) {
2116 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2120 if (amdgpu_emu_mode == 1)
2121 adev->hdp.funcs->flush_hdp(adev, NULL);
2123 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2124 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2125 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2126 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ADDRESS_CLAMP, 1);
2127 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2129 /* Program mec1 ucode address into intruction cache address register */
2130 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO,
2131 lower_32_bits(addr) & 0xFFFFF000);
2132 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2133 upper_32_bits(addr));
2138 static int gfx_v11_0_config_pfp_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2140 uint32_t usec_timeout = 50000; /* wait for 50ms */
2142 unsigned i, pipe_id;
2143 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2145 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2146 adev->gfx.pfp_fw->data;
2148 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2149 lower_32_bits(addr));
2150 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2151 upper_32_bits(addr));
2153 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2154 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2155 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2156 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2157 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2160 * Programming any of the CP_PFP_IC_BASE registers
2161 * forces invalidation of the ME L1 I$. Wait for the
2162 * invalidation complete
2164 for (i = 0; i < usec_timeout; i++) {
2165 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2166 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2167 INVALIDATE_CACHE_COMPLETE))
2172 if (i >= usec_timeout) {
2173 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2177 /* Prime the L1 instruction caches */
2178 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2179 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2180 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2181 /* Waiting for cache primed*/
2182 for (i = 0; i < usec_timeout; i++) {
2183 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2184 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2190 if (i >= usec_timeout) {
2191 dev_err(adev->dev, "failed to prime instruction cache\n");
2195 mutex_lock(&adev->srbm_mutex);
2196 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2197 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2198 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2199 (pfp_hdr->ucode_start_addr_hi << 30) |
2200 (pfp_hdr->ucode_start_addr_lo >> 2));
2201 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2202 pfp_hdr->ucode_start_addr_hi >> 2);
2205 * Program CP_ME_CNTL to reset given PIPE to take
2206 * effect of CP_PFP_PRGRM_CNTR_START.
2208 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2210 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2211 PFP_PIPE0_RESET, 1);
2213 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2214 PFP_PIPE1_RESET, 1);
2215 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2217 /* Clear pfp pipe0 reset bit. */
2219 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2220 PFP_PIPE0_RESET, 0);
2222 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2223 PFP_PIPE1_RESET, 0);
2224 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2226 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2227 lower_32_bits(addr2));
2228 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2229 upper_32_bits(addr2));
2231 soc21_grbm_select(adev, 0, 0, 0, 0);
2232 mutex_unlock(&adev->srbm_mutex);
2234 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2235 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2236 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2237 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2239 /* Invalidate the data caches */
2240 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2241 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2242 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2244 for (i = 0; i < usec_timeout; i++) {
2245 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2246 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2247 INVALIDATE_DCACHE_COMPLETE))
2252 if (i >= usec_timeout) {
2253 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2260 static int gfx_v11_0_config_me_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2262 uint32_t usec_timeout = 50000; /* wait for 50ms */
2264 unsigned i, pipe_id;
2265 const struct gfx_firmware_header_v2_0 *me_hdr;
2267 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2268 adev->gfx.me_fw->data;
2270 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2271 lower_32_bits(addr));
2272 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2273 upper_32_bits(addr));
2275 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2276 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2277 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2278 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2279 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2282 * Programming any of the CP_ME_IC_BASE registers
2283 * forces invalidation of the ME L1 I$. Wait for the
2284 * invalidation complete
2286 for (i = 0; i < usec_timeout; i++) {
2287 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2288 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2289 INVALIDATE_CACHE_COMPLETE))
2294 if (i >= usec_timeout) {
2295 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2299 /* Prime the instruction caches */
2300 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2301 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
2302 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
2304 /* Waiting for instruction cache primed*/
2305 for (i = 0; i < usec_timeout; i++) {
2306 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2307 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2313 if (i >= usec_timeout) {
2314 dev_err(adev->dev, "failed to prime instruction cache\n");
2318 mutex_lock(&adev->srbm_mutex);
2319 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2320 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2321 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2322 (me_hdr->ucode_start_addr_hi << 30) |
2323 (me_hdr->ucode_start_addr_lo >> 2) );
2324 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2325 me_hdr->ucode_start_addr_hi>>2);
2328 * Program CP_ME_CNTL to reset given PIPE to take
2329 * effect of CP_PFP_PRGRM_CNTR_START.
2331 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2333 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2336 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2338 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2340 /* Clear pfp pipe0 reset bit. */
2342 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2345 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2347 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2349 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
2350 lower_32_bits(addr2));
2351 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
2352 upper_32_bits(addr2));
2354 soc21_grbm_select(adev, 0, 0, 0, 0);
2355 mutex_unlock(&adev->srbm_mutex);
2357 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2358 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2359 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2360 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2362 /* Invalidate the data caches */
2363 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2364 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2365 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2367 for (i = 0; i < usec_timeout; i++) {
2368 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2369 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2370 INVALIDATE_DCACHE_COMPLETE))
2375 if (i >= usec_timeout) {
2376 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2383 static int gfx_v11_0_config_mec_cache_rs64(struct amdgpu_device *adev, uint64_t addr, uint64_t addr2)
2385 uint32_t usec_timeout = 50000; /* wait for 50ms */
2388 const struct gfx_firmware_header_v2_0 *mec_hdr;
2390 mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2391 adev->gfx.mec_fw->data;
2393 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
2394 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
2395 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
2396 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
2397 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
2399 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
2400 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
2401 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
2402 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
2404 mutex_lock(&adev->srbm_mutex);
2405 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
2406 soc21_grbm_select(adev, 1, i, 0, 0);
2408 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, addr2);
2409 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
2410 upper_32_bits(addr2));
2412 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2413 mec_hdr->ucode_start_addr_lo >> 2 |
2414 mec_hdr->ucode_start_addr_hi << 30);
2415 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2416 mec_hdr->ucode_start_addr_hi >> 2);
2418 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, addr);
2419 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
2420 upper_32_bits(addr));
2422 mutex_unlock(&adev->srbm_mutex);
2423 soc21_grbm_select(adev, 0, 0, 0, 0);
2425 /* Trigger an invalidation of the L1 instruction caches */
2426 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2427 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2428 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
2430 /* Wait for invalidation complete */
2431 for (i = 0; i < usec_timeout; i++) {
2432 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
2433 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
2434 INVALIDATE_DCACHE_COMPLETE))
2439 if (i >= usec_timeout) {
2440 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2444 /* Trigger an invalidation of the L1 instruction caches */
2445 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2446 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
2447 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
2449 /* Wait for invalidation complete */
2450 for (i = 0; i < usec_timeout; i++) {
2451 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
2452 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
2453 INVALIDATE_CACHE_COMPLETE))
2458 if (i >= usec_timeout) {
2459 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2466 static void gfx_v11_0_config_gfx_rs64(struct amdgpu_device *adev)
2468 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2469 const struct gfx_firmware_header_v2_0 *me_hdr;
2470 const struct gfx_firmware_header_v2_0 *mec_hdr;
2471 uint32_t pipe_id, tmp;
2473 mec_hdr = (const struct gfx_firmware_header_v2_0 *)
2474 adev->gfx.mec_fw->data;
2475 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2476 adev->gfx.me_fw->data;
2477 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2478 adev->gfx.pfp_fw->data;
2480 /* config pfp program start addr */
2481 for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2482 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2483 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2484 (pfp_hdr->ucode_start_addr_hi << 30) |
2485 (pfp_hdr->ucode_start_addr_lo >> 2));
2486 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2487 pfp_hdr->ucode_start_addr_hi >> 2);
2489 soc21_grbm_select(adev, 0, 0, 0, 0);
2491 /* reset pfp pipe */
2492 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2493 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 1);
2494 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 1);
2495 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2497 /* clear pfp pipe reset */
2498 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE0_RESET, 0);
2499 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_PIPE1_RESET, 0);
2500 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2502 /* config me program start addr */
2503 for (pipe_id = 0; pipe_id < 2; pipe_id++) {
2504 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2505 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
2506 (me_hdr->ucode_start_addr_hi << 30) |
2507 (me_hdr->ucode_start_addr_lo >> 2) );
2508 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
2509 me_hdr->ucode_start_addr_hi>>2);
2511 soc21_grbm_select(adev, 0, 0, 0, 0);
2514 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2515 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 1);
2516 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 1);
2517 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2519 /* clear me pipe reset */
2520 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE0_RESET, 0);
2521 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_PIPE1_RESET, 0);
2522 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2524 /* config mec program start addr */
2525 for (pipe_id = 0; pipe_id < 4; pipe_id++) {
2526 soc21_grbm_select(adev, 1, pipe_id, 0, 0);
2527 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
2528 mec_hdr->ucode_start_addr_lo >> 2 |
2529 mec_hdr->ucode_start_addr_hi << 30);
2530 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
2531 mec_hdr->ucode_start_addr_hi >> 2);
2533 soc21_grbm_select(adev, 0, 0, 0, 0);
2535 /* reset mec pipe */
2536 tmp = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
2537 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 1);
2538 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 1);
2539 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 1);
2540 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 1);
2541 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2543 /* clear mec pipe reset */
2544 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET, 0);
2545 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET, 0);
2546 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET, 0);
2547 tmp = REG_SET_FIELD(tmp, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET, 0);
2548 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, tmp);
2551 static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
2554 uint32_t bootload_status;
2556 uint64_t addr, addr2;
2558 for (i = 0; i < adev->usec_timeout; i++) {
2559 cp_status = RREG32_SOC15(GC, 0, regCP_STAT);
2561 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1) ||
2562 adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 4))
2563 bootload_status = RREG32_SOC15(GC, 0,
2564 regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
2566 bootload_status = RREG32_SOC15(GC, 0, regRLC_RLCS_BOOTLOAD_STATUS);
2568 if ((cp_status == 0) &&
2569 (REG_GET_FIELD(bootload_status,
2570 RLC_RLCS_BOOTLOAD_STATUS, BOOTLOAD_COMPLETE) == 1)) {
2576 if (i >= adev->usec_timeout) {
2577 dev_err(adev->dev, "rlc autoload: gc ucode autoload timeout\n");
2581 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
2582 if (adev->gfx.rs64_enable) {
2583 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2584 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME].offset;
2585 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2586 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_ME_P0_STACK].offset;
2587 r = gfx_v11_0_config_me_cache_rs64(adev, addr, addr2);
2590 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2591 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP].offset;
2592 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2593 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK].offset;
2594 r = gfx_v11_0_config_pfp_cache_rs64(adev, addr, addr2);
2597 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2598 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC].offset;
2599 addr2 = adev->gfx.rlc.rlc_autoload_gpu_addr +
2600 rlc_autoload_info[SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK].offset;
2601 r = gfx_v11_0_config_mec_cache_rs64(adev, addr, addr2);
2605 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2606 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_ME].offset;
2607 r = gfx_v11_0_config_me_cache(adev, addr);
2610 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2611 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_PFP].offset;
2612 r = gfx_v11_0_config_pfp_cache(adev, addr);
2615 addr = adev->gfx.rlc.rlc_autoload_gpu_addr +
2616 rlc_autoload_info[SOC21_FIRMWARE_ID_CP_MEC].offset;
2617 r = gfx_v11_0_config_mec_cache(adev, addr);
2626 static int gfx_v11_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
2629 u32 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2631 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
2632 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
2633 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2635 for (i = 0; i < adev->usec_timeout; i++) {
2636 if (RREG32_SOC15(GC, 0, regCP_STAT) == 0)
2641 if (i >= adev->usec_timeout)
2642 DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
2647 static int gfx_v11_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
2650 const struct gfx_firmware_header_v1_0 *pfp_hdr;
2651 const __le32 *fw_data;
2652 unsigned i, fw_size;
2654 pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
2655 adev->gfx.pfp_fw->data;
2657 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2659 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2660 le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
2661 fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes);
2663 r = amdgpu_bo_create_reserved(adev, pfp_hdr->header.ucode_size_bytes,
2664 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2665 &adev->gfx.pfp.pfp_fw_obj,
2666 &adev->gfx.pfp.pfp_fw_gpu_addr,
2667 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2669 dev_err(adev->dev, "(%d) failed to create pfp fw bo\n", r);
2670 gfx_v11_0_pfp_fini(adev);
2674 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_data, fw_size);
2676 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2677 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2679 gfx_v11_0_config_pfp_cache(adev, adev->gfx.pfp.pfp_fw_gpu_addr);
2681 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, 0);
2683 for (i = 0; i < pfp_hdr->jt_size; i++)
2684 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_DATA,
2685 le32_to_cpup(fw_data + pfp_hdr->jt_offset + i));
2687 WREG32_SOC15(GC, 0, regCP_HYP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
2692 static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
2695 const struct gfx_firmware_header_v2_0 *pfp_hdr;
2696 const __le32 *fw_ucode, *fw_data;
2697 unsigned i, pipe_id, fw_ucode_size, fw_data_size;
2699 uint32_t usec_timeout = 50000; /* wait for 50ms */
2701 pfp_hdr = (const struct gfx_firmware_header_v2_0 *)
2702 adev->gfx.pfp_fw->data;
2704 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2707 fw_ucode = (const __le32 *)(adev->gfx.pfp_fw->data +
2708 le32_to_cpu(pfp_hdr->ucode_offset_bytes));
2709 fw_ucode_size = le32_to_cpu(pfp_hdr->ucode_size_bytes);
2711 fw_data = (const __le32 *)(adev->gfx.pfp_fw->data +
2712 le32_to_cpu(pfp_hdr->data_offset_bytes));
2713 fw_data_size = le32_to_cpu(pfp_hdr->data_size_bytes);
2716 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
2718 AMDGPU_GEM_DOMAIN_VRAM |
2719 AMDGPU_GEM_DOMAIN_GTT,
2720 &adev->gfx.pfp.pfp_fw_obj,
2721 &adev->gfx.pfp.pfp_fw_gpu_addr,
2722 (void **)&adev->gfx.pfp.pfp_fw_ptr);
2724 dev_err(adev->dev, "(%d) failed to create pfp ucode fw bo\n", r);
2725 gfx_v11_0_pfp_fini(adev);
2729 r = amdgpu_bo_create_reserved(adev, fw_data_size,
2731 AMDGPU_GEM_DOMAIN_VRAM |
2732 AMDGPU_GEM_DOMAIN_GTT,
2733 &adev->gfx.pfp.pfp_fw_data_obj,
2734 &adev->gfx.pfp.pfp_fw_data_gpu_addr,
2735 (void **)&adev->gfx.pfp.pfp_fw_data_ptr);
2737 dev_err(adev->dev, "(%d) failed to create pfp data fw bo\n", r);
2738 gfx_v11_0_pfp_fini(adev);
2742 memcpy(adev->gfx.pfp.pfp_fw_ptr, fw_ucode, fw_ucode_size);
2743 memcpy(adev->gfx.pfp.pfp_fw_data_ptr, fw_data, fw_data_size);
2745 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_obj);
2746 amdgpu_bo_kunmap(adev->gfx.pfp.pfp_fw_data_obj);
2747 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_obj);
2748 amdgpu_bo_unreserve(adev->gfx.pfp.pfp_fw_data_obj);
2750 if (amdgpu_emu_mode == 1)
2751 adev->hdp.funcs->flush_hdp(adev, NULL);
2753 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_LO,
2754 lower_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2755 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_HI,
2756 upper_32_bits(adev->gfx.pfp.pfp_fw_gpu_addr));
2758 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL);
2759 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0);
2760 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, CACHE_POLICY, 0);
2761 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, EXE_DISABLE, 0);
2762 WREG32_SOC15(GC, 0, regCP_PFP_IC_BASE_CNTL, tmp);
2765 * Programming any of the CP_PFP_IC_BASE registers
2766 * forces invalidation of the ME L1 I$. Wait for the
2767 * invalidation complete
2769 for (i = 0; i < usec_timeout; i++) {
2770 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2771 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2772 INVALIDATE_CACHE_COMPLETE))
2777 if (i >= usec_timeout) {
2778 dev_err(adev->dev, "failed to invalidate instruction cache\n");
2782 /* Prime the L1 instruction caches */
2783 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2784 tmp = REG_SET_FIELD(tmp, CP_PFP_IC_OP_CNTL, PRIME_ICACHE, 1);
2785 WREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL, tmp);
2786 /* Waiting for cache primed*/
2787 for (i = 0; i < usec_timeout; i++) {
2788 tmp = RREG32_SOC15(GC, 0, regCP_PFP_IC_OP_CNTL);
2789 if (1 == REG_GET_FIELD(tmp, CP_PFP_IC_OP_CNTL,
2795 if (i >= usec_timeout) {
2796 dev_err(adev->dev, "failed to prime instruction cache\n");
2800 mutex_lock(&adev->srbm_mutex);
2801 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
2802 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
2803 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START,
2804 (pfp_hdr->ucode_start_addr_hi << 30) |
2805 (pfp_hdr->ucode_start_addr_lo >> 2) );
2806 WREG32_SOC15(GC, 0, regCP_PFP_PRGRM_CNTR_START_HI,
2807 pfp_hdr->ucode_start_addr_hi>>2);
2810 * Program CP_ME_CNTL to reset given PIPE to take
2811 * effect of CP_PFP_PRGRM_CNTR_START.
2813 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
2815 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2816 PFP_PIPE0_RESET, 1);
2818 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2819 PFP_PIPE1_RESET, 1);
2820 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2822 /* Clear pfp pipe0 reset bit. */
2824 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2825 PFP_PIPE0_RESET, 0);
2827 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
2828 PFP_PIPE1_RESET, 0);
2829 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
2831 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_LO,
2832 lower_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
2833 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE0_HI,
2834 upper_32_bits(adev->gfx.pfp.pfp_fw_data_gpu_addr));
2836 soc21_grbm_select(adev, 0, 0, 0, 0);
2837 mutex_unlock(&adev->srbm_mutex);
2839 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
2840 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
2841 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
2842 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
2844 /* Invalidate the data caches */
2845 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2846 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
2847 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
2849 for (i = 0; i < usec_timeout; i++) {
2850 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
2851 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
2852 INVALIDATE_DCACHE_COMPLETE))
2857 if (i >= usec_timeout) {
2858 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
2865 static int gfx_v11_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev)
2868 const struct gfx_firmware_header_v1_0 *me_hdr;
2869 const __le32 *fw_data;
2870 unsigned i, fw_size;
2872 me_hdr = (const struct gfx_firmware_header_v1_0 *)
2873 adev->gfx.me_fw->data;
2875 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2877 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2878 le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
2879 fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes);
2881 r = amdgpu_bo_create_reserved(adev, me_hdr->header.ucode_size_bytes,
2882 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
2883 &adev->gfx.me.me_fw_obj,
2884 &adev->gfx.me.me_fw_gpu_addr,
2885 (void **)&adev->gfx.me.me_fw_ptr);
2887 dev_err(adev->dev, "(%d) failed to create me fw bo\n", r);
2888 gfx_v11_0_me_fini(adev);
2892 memcpy(adev->gfx.me.me_fw_ptr, fw_data, fw_size);
2894 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2895 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2897 gfx_v11_0_config_me_cache(adev, adev->gfx.me.me_fw_gpu_addr);
2899 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, 0);
2901 for (i = 0; i < me_hdr->jt_size; i++)
2902 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_DATA,
2903 le32_to_cpup(fw_data + me_hdr->jt_offset + i));
2905 WREG32_SOC15(GC, 0, regCP_HYP_ME_UCODE_ADDR, adev->gfx.me_fw_version);
2910 static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
2913 const struct gfx_firmware_header_v2_0 *me_hdr;
2914 const __le32 *fw_ucode, *fw_data;
2915 unsigned i, pipe_id, fw_ucode_size, fw_data_size;
2917 uint32_t usec_timeout = 50000; /* wait for 50ms */
2919 me_hdr = (const struct gfx_firmware_header_v2_0 *)
2920 adev->gfx.me_fw->data;
2922 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2925 fw_ucode = (const __le32 *)(adev->gfx.me_fw->data +
2926 le32_to_cpu(me_hdr->ucode_offset_bytes));
2927 fw_ucode_size = le32_to_cpu(me_hdr->ucode_size_bytes);
2929 fw_data = (const __le32 *)(adev->gfx.me_fw->data +
2930 le32_to_cpu(me_hdr->data_offset_bytes));
2931 fw_data_size = le32_to_cpu(me_hdr->data_size_bytes);
2934 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
2936 AMDGPU_GEM_DOMAIN_VRAM |
2937 AMDGPU_GEM_DOMAIN_GTT,
2938 &adev->gfx.me.me_fw_obj,
2939 &adev->gfx.me.me_fw_gpu_addr,
2940 (void **)&adev->gfx.me.me_fw_ptr);
2942 dev_err(adev->dev, "(%d) failed to create me ucode bo\n", r);
2943 gfx_v11_0_me_fini(adev);
2947 r = amdgpu_bo_create_reserved(adev, fw_data_size,
2949 AMDGPU_GEM_DOMAIN_VRAM |
2950 AMDGPU_GEM_DOMAIN_GTT,
2951 &adev->gfx.me.me_fw_data_obj,
2952 &adev->gfx.me.me_fw_data_gpu_addr,
2953 (void **)&adev->gfx.me.me_fw_data_ptr);
2955 dev_err(adev->dev, "(%d) failed to create me data bo\n", r);
2956 gfx_v11_0_pfp_fini(adev);
2960 memcpy(adev->gfx.me.me_fw_ptr, fw_ucode, fw_ucode_size);
2961 memcpy(adev->gfx.me.me_fw_data_ptr, fw_data, fw_data_size);
2963 amdgpu_bo_kunmap(adev->gfx.me.me_fw_obj);
2964 amdgpu_bo_kunmap(adev->gfx.me.me_fw_data_obj);
2965 amdgpu_bo_unreserve(adev->gfx.me.me_fw_obj);
2966 amdgpu_bo_unreserve(adev->gfx.me.me_fw_data_obj);
2968 if (amdgpu_emu_mode == 1)
2969 adev->hdp.funcs->flush_hdp(adev, NULL);
2971 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_LO,
2972 lower_32_bits(adev->gfx.me.me_fw_gpu_addr));
2973 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_HI,
2974 upper_32_bits(adev->gfx.me.me_fw_gpu_addr));
2976 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL);
2977 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0);
2978 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, CACHE_POLICY, 0);
2979 tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, EXE_DISABLE, 0);
2980 WREG32_SOC15(GC, 0, regCP_ME_IC_BASE_CNTL, tmp);
2983 * Programming any of the CP_ME_IC_BASE registers
2984 * forces invalidation of the ME L1 I$. Wait for the
2985 * invalidation complete
2987 for (i = 0; i < usec_timeout; i++) {
2988 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
2989 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
2990 INVALIDATE_CACHE_COMPLETE))
2995 if (i >= usec_timeout) {
2996 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3000 /* Prime the instruction caches */
3001 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3002 tmp = REG_SET_FIELD(tmp, CP_ME_IC_OP_CNTL, PRIME_ICACHE, 1);
3003 WREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL, tmp);
3005 /* Waiting for instruction cache primed*/
3006 for (i = 0; i < usec_timeout; i++) {
3007 tmp = RREG32_SOC15(GC, 0, regCP_ME_IC_OP_CNTL);
3008 if (1 == REG_GET_FIELD(tmp, CP_ME_IC_OP_CNTL,
3014 if (i >= usec_timeout) {
3015 dev_err(adev->dev, "failed to prime instruction cache\n");
3019 mutex_lock(&adev->srbm_mutex);
3020 for (pipe_id = 0; pipe_id < adev->gfx.me.num_pipe_per_me; pipe_id++) {
3021 soc21_grbm_select(adev, 0, pipe_id, 0, 0);
3022 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START,
3023 (me_hdr->ucode_start_addr_hi << 30) |
3024 (me_hdr->ucode_start_addr_lo >> 2) );
3025 WREG32_SOC15(GC, 0, regCP_ME_PRGRM_CNTR_START_HI,
3026 me_hdr->ucode_start_addr_hi>>2);
3029 * Program CP_ME_CNTL to reset given PIPE to take
3030 * effect of CP_PFP_PRGRM_CNTR_START.
3032 tmp = RREG32_SOC15(GC, 0, regCP_ME_CNTL);
3034 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3037 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3039 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3041 /* Clear pfp pipe0 reset bit. */
3043 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3046 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL,
3048 WREG32_SOC15(GC, 0, regCP_ME_CNTL, tmp);
3050 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_LO,
3051 lower_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3052 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE1_HI,
3053 upper_32_bits(adev->gfx.me.me_fw_data_gpu_addr));
3055 soc21_grbm_select(adev, 0, 0, 0, 0);
3056 mutex_unlock(&adev->srbm_mutex);
3058 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL);
3059 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, VMID, 0);
3060 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_BASE_CNTL, CACHE_POLICY, 0);
3061 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_BASE_CNTL, tmp);
3063 /* Invalidate the data caches */
3064 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3065 tmp = REG_SET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3066 WREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL, tmp);
3068 for (i = 0; i < usec_timeout; i++) {
3069 tmp = RREG32_SOC15(GC, 0, regCP_GFX_RS64_DC_OP_CNTL);
3070 if (1 == REG_GET_FIELD(tmp, CP_GFX_RS64_DC_OP_CNTL,
3071 INVALIDATE_DCACHE_COMPLETE))
3076 if (i >= usec_timeout) {
3077 dev_err(adev->dev, "failed to invalidate RS64 data cache\n");
3084 static int gfx_v11_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
3088 if (!adev->gfx.me_fw || !adev->gfx.pfp_fw)
3091 gfx_v11_0_cp_gfx_enable(adev, false);
3093 if (adev->gfx.rs64_enable)
3094 r = gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(adev);
3096 r = gfx_v11_0_cp_gfx_load_pfp_microcode(adev);
3098 dev_err(adev->dev, "(%d) failed to load pfp fw\n", r);
3102 if (adev->gfx.rs64_enable)
3103 r = gfx_v11_0_cp_gfx_load_me_microcode_rs64(adev);
3105 r = gfx_v11_0_cp_gfx_load_me_microcode(adev);
3107 dev_err(adev->dev, "(%d) failed to load me fw\n", r);
3114 static int gfx_v11_0_cp_gfx_start(struct amdgpu_device *adev)
3116 struct amdgpu_ring *ring;
3117 const struct cs_section_def *sect = NULL;
3118 const struct cs_extent_def *ext = NULL;
3123 WREG32_SOC15(GC, 0, regCP_MAX_CONTEXT,
3124 adev->gfx.config.max_hw_contexts - 1);
3125 WREG32_SOC15(GC, 0, regCP_DEVICE_ID, 1);
3127 if (!amdgpu_async_gfx_ring)
3128 gfx_v11_0_cp_gfx_enable(adev, true);
3130 ring = &adev->gfx.gfx_ring[0];
3131 r = amdgpu_ring_alloc(ring, gfx_v11_0_get_csb_size(adev));
3133 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3137 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3138 amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3140 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
3141 amdgpu_ring_write(ring, 0x80000000);
3142 amdgpu_ring_write(ring, 0x80000000);
3144 for (sect = gfx11_cs_data; sect->section != NULL; ++sect) {
3145 for (ext = sect->section; ext->extent != NULL; ++ext) {
3146 if (sect->id == SECT_CONTEXT) {
3147 amdgpu_ring_write(ring,
3148 PACKET3(PACKET3_SET_CONTEXT_REG,
3150 amdgpu_ring_write(ring, ext->reg_index -
3151 PACKET3_SET_CONTEXT_REG_START);
3152 for (i = 0; i < ext->reg_count; i++)
3153 amdgpu_ring_write(ring, ext->extent[i]);
3159 SOC15_REG_OFFSET(GC, 0, regPA_SC_TILE_STEERING_OVERRIDE) - PACKET3_SET_CONTEXT_REG_START;
3160 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
3161 amdgpu_ring_write(ring, ctx_reg_offset);
3162 amdgpu_ring_write(ring, adev->gfx.config.pa_sc_tile_steering_override);
3164 amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3165 amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3167 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3168 amdgpu_ring_write(ring, 0);
3170 amdgpu_ring_commit(ring);
3172 /* submit cs packet to copy state 0 to next available state */
3173 if (adev->gfx.num_gfx_rings > 1) {
3174 /* maximum supported gfx ring is 2 */
3175 ring = &adev->gfx.gfx_ring[1];
3176 r = amdgpu_ring_alloc(ring, 2);
3178 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
3182 amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3183 amdgpu_ring_write(ring, 0);
3185 amdgpu_ring_commit(ring);
3190 static void gfx_v11_0_cp_gfx_switch_pipe(struct amdgpu_device *adev,
3195 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
3196 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, pipe);
3198 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
3201 static void gfx_v11_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
3202 struct amdgpu_ring *ring)
3206 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3207 if (ring->use_doorbell) {
3208 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3209 DOORBELL_OFFSET, ring->doorbell_index);
3210 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3213 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3216 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL, tmp);
3218 tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
3219 DOORBELL_RANGE_LOWER, ring->doorbell_index);
3220 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER, tmp);
3222 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3223 CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK);
3226 static int gfx_v11_0_cp_gfx_resume(struct amdgpu_device *adev)
3228 struct amdgpu_ring *ring;
3231 u64 rb_addr, rptr_addr, wptr_gpu_addr;
3233 /* Set the write pointer delay */
3234 WREG32_SOC15(GC, 0, regCP_RB_WPTR_DELAY, 0);
3236 /* set the RB to use vmid 0 */
3237 WREG32_SOC15(GC, 0, regCP_RB_VMID, 0);
3239 /* Init gfx ring 0 for pipe 0 */
3240 mutex_lock(&adev->srbm_mutex);
3241 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3243 /* Set ring buffer size */
3244 ring = &adev->gfx.gfx_ring[0];
3245 rb_bufsz = order_base_2(ring->ring_size / 8);
3246 tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz);
3247 tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2);
3248 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3250 /* Initialize the ring buffer's write pointers */
3252 WREG32_SOC15(GC, 0, regCP_RB0_WPTR, lower_32_bits(ring->wptr));
3253 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI, upper_32_bits(ring->wptr));
3255 /* set the wb address wether it's enabled or not */
3256 rptr_addr = ring->rptr_gpu_addr;
3257 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
3258 WREG32_SOC15(GC, 0, regCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3259 CP_RB_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3261 wptr_gpu_addr = ring->wptr_gpu_addr;
3262 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3263 lower_32_bits(wptr_gpu_addr));
3264 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3265 upper_32_bits(wptr_gpu_addr));
3268 WREG32_SOC15(GC, 0, regCP_RB0_CNTL, tmp);
3270 rb_addr = ring->gpu_addr >> 8;
3271 WREG32_SOC15(GC, 0, regCP_RB0_BASE, rb_addr);
3272 WREG32_SOC15(GC, 0, regCP_RB0_BASE_HI, upper_32_bits(rb_addr));
3274 WREG32_SOC15(GC, 0, regCP_RB_ACTIVE, 1);
3276 gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3277 mutex_unlock(&adev->srbm_mutex);
3279 /* Init gfx ring 1 for pipe 1 */
3280 if (adev->gfx.num_gfx_rings > 1) {
3281 mutex_lock(&adev->srbm_mutex);
3282 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
3283 /* maximum supported gfx ring is 2 */
3284 ring = &adev->gfx.gfx_ring[1];
3285 rb_bufsz = order_base_2(ring->ring_size / 8);
3286 tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
3287 tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
3288 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3289 /* Initialize the ring buffer's write pointers */
3291 WREG32_SOC15(GC, 0, regCP_RB1_WPTR, lower_32_bits(ring->wptr));
3292 WREG32_SOC15(GC, 0, regCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
3293 /* Set the wb address wether it's enabled or not */
3294 rptr_addr = ring->rptr_gpu_addr;
3295 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
3296 WREG32_SOC15(GC, 0, regCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
3297 CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
3298 wptr_gpu_addr = ring->wptr_gpu_addr;
3299 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_LO,
3300 lower_32_bits(wptr_gpu_addr));
3301 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_ADDR_HI,
3302 upper_32_bits(wptr_gpu_addr));
3305 WREG32_SOC15(GC, 0, regCP_RB1_CNTL, tmp);
3307 rb_addr = ring->gpu_addr >> 8;
3308 WREG32_SOC15(GC, 0, regCP_RB1_BASE, rb_addr);
3309 WREG32_SOC15(GC, 0, regCP_RB1_BASE_HI, upper_32_bits(rb_addr));
3310 WREG32_SOC15(GC, 0, regCP_RB1_ACTIVE, 1);
3312 gfx_v11_0_cp_gfx_set_doorbell(adev, ring);
3313 mutex_unlock(&adev->srbm_mutex);
3315 /* Switch to pipe 0 */
3316 mutex_lock(&adev->srbm_mutex);
3317 gfx_v11_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
3318 mutex_unlock(&adev->srbm_mutex);
3320 /* start the ring */
3321 gfx_v11_0_cp_gfx_start(adev);
3326 static void gfx_v11_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
3330 if (adev->gfx.rs64_enable) {
3331 data = RREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL);
3332 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_INVALIDATE_ICACHE,
3334 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_RESET,
3336 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_RESET,
3338 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_RESET,
3340 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_RESET,
3342 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE0_ACTIVE,
3344 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE1_ACTIVE,
3346 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE2_ACTIVE,
3348 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_PIPE3_ACTIVE,
3350 data = REG_SET_FIELD(data, CP_MEC_RS64_CNTL, MEC_HALT,
3352 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, data);
3354 data = RREG32_SOC15(GC, 0, regCP_MEC_CNTL);
3357 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 0);
3358 if (!adev->enable_mes_kiq)
3359 data = REG_SET_FIELD(data, CP_MEC_CNTL,
3362 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME1_HALT, 1);
3363 data = REG_SET_FIELD(data, CP_MEC_CNTL, MEC_ME2_HALT, 1);
3365 WREG32_SOC15(GC, 0, regCP_MEC_CNTL, data);
3371 static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3373 const struct gfx_firmware_header_v1_0 *mec_hdr;
3374 const __le32 *fw_data;
3375 unsigned i, fw_size;
3379 if (!adev->gfx.mec_fw)
3382 gfx_v11_0_cp_compute_enable(adev, false);
3384 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3385 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3387 fw_data = (const __le32 *)
3388 (adev->gfx.mec_fw->data +
3389 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
3390 fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
3392 r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
3393 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
3394 &adev->gfx.mec.mec_fw_obj,
3395 &adev->gfx.mec.mec_fw_gpu_addr,
3398 dev_err(adev->dev, "(%d) failed to create mec fw bo\n", r);
3399 gfx_v11_0_mec_fini(adev);
3403 memcpy(fw, fw_data, fw_size);
3405 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3406 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3408 gfx_v11_0_config_mec_cache(adev, adev->gfx.mec.mec_fw_gpu_addr);
3411 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, 0);
3413 for (i = 0; i < mec_hdr->jt_size; i++)
3414 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_DATA,
3415 le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
3417 WREG32_SOC15(GC, 0, regCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
3422 static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
3424 const struct gfx_firmware_header_v2_0 *mec_hdr;
3425 const __le32 *fw_ucode, *fw_data;
3426 u32 tmp, fw_ucode_size, fw_data_size;
3427 u32 i, usec_timeout = 50000; /* Wait for 50 ms */
3428 u32 *fw_ucode_ptr, *fw_data_ptr;
3431 if (!adev->gfx.mec_fw)
3434 gfx_v11_0_cp_compute_enable(adev, false);
3436 mec_hdr = (const struct gfx_firmware_header_v2_0 *)adev->gfx.mec_fw->data;
3437 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3439 fw_ucode = (const __le32 *) (adev->gfx.mec_fw->data +
3440 le32_to_cpu(mec_hdr->ucode_offset_bytes));
3441 fw_ucode_size = le32_to_cpu(mec_hdr->ucode_size_bytes);
3443 fw_data = (const __le32 *) (adev->gfx.mec_fw->data +
3444 le32_to_cpu(mec_hdr->data_offset_bytes));
3445 fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
3447 r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
3449 AMDGPU_GEM_DOMAIN_VRAM |
3450 AMDGPU_GEM_DOMAIN_GTT,
3451 &adev->gfx.mec.mec_fw_obj,
3452 &adev->gfx.mec.mec_fw_gpu_addr,
3453 (void **)&fw_ucode_ptr);
3455 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3456 gfx_v11_0_mec_fini(adev);
3460 r = amdgpu_bo_create_reserved(adev, fw_data_size,
3462 AMDGPU_GEM_DOMAIN_VRAM |
3463 AMDGPU_GEM_DOMAIN_GTT,
3464 &adev->gfx.mec.mec_fw_data_obj,
3465 &adev->gfx.mec.mec_fw_data_gpu_addr,
3466 (void **)&fw_data_ptr);
3468 dev_err(adev->dev, "(%d) failed to create mec fw ucode bo\n", r);
3469 gfx_v11_0_mec_fini(adev);
3473 memcpy(fw_ucode_ptr, fw_ucode, fw_ucode_size);
3474 memcpy(fw_data_ptr, fw_data, fw_data_size);
3476 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
3477 amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_data_obj);
3478 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
3479 amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_data_obj);
3481 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL);
3482 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
3483 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, EXE_DISABLE, 0);
3484 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
3485 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_CNTL, tmp);
3487 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL);
3488 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, VMID, 0);
3489 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_BASE_CNTL, CACHE_POLICY, 0);
3490 WREG32_SOC15(GC, 0, regCP_MEC_DC_BASE_CNTL, tmp);
3492 mutex_lock(&adev->srbm_mutex);
3493 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3494 soc21_grbm_select(adev, 1, i, 0, 0);
3496 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_LO, adev->gfx.mec.mec_fw_data_gpu_addr);
3497 WREG32_SOC15(GC, 0, regCP_MEC_MDBASE_HI,
3498 upper_32_bits(adev->gfx.mec.mec_fw_data_gpu_addr));
3500 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START,
3501 mec_hdr->ucode_start_addr_lo >> 2 |
3502 mec_hdr->ucode_start_addr_hi << 30);
3503 WREG32_SOC15(GC, 0, regCP_MEC_RS64_PRGRM_CNTR_START_HI,
3504 mec_hdr->ucode_start_addr_hi >> 2);
3506 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_LO, adev->gfx.mec.mec_fw_gpu_addr);
3507 WREG32_SOC15(GC, 0, regCP_CPC_IC_BASE_HI,
3508 upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
3510 mutex_unlock(&adev->srbm_mutex);
3511 soc21_grbm_select(adev, 0, 0, 0, 0);
3513 /* Trigger an invalidation of the L1 instruction caches */
3514 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3515 tmp = REG_SET_FIELD(tmp, CP_MEC_DC_OP_CNTL, INVALIDATE_DCACHE, 1);
3516 WREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL, tmp);
3518 /* Wait for invalidation complete */
3519 for (i = 0; i < usec_timeout; i++) {
3520 tmp = RREG32_SOC15(GC, 0, regCP_MEC_DC_OP_CNTL);
3521 if (1 == REG_GET_FIELD(tmp, CP_MEC_DC_OP_CNTL,
3522 INVALIDATE_DCACHE_COMPLETE))
3527 if (i >= usec_timeout) {
3528 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3532 /* Trigger an invalidation of the L1 instruction caches */
3533 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3534 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_OP_CNTL, INVALIDATE_CACHE, 1);
3535 WREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL, tmp);
3537 /* Wait for invalidation complete */
3538 for (i = 0; i < usec_timeout; i++) {
3539 tmp = RREG32_SOC15(GC, 0, regCP_CPC_IC_OP_CNTL);
3540 if (1 == REG_GET_FIELD(tmp, CP_CPC_IC_OP_CNTL,
3541 INVALIDATE_CACHE_COMPLETE))
3546 if (i >= usec_timeout) {
3547 dev_err(adev->dev, "failed to invalidate instruction cache\n");
3554 static void gfx_v11_0_kiq_setting(struct amdgpu_ring *ring)
3557 struct amdgpu_device *adev = ring->adev;
3559 /* tell RLC which is KIQ queue */
3560 tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS);
3562 tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
3563 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3565 WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp);
3568 static void gfx_v11_0_cp_set_doorbell_range(struct amdgpu_device *adev)
3570 /* set graphics engine doorbell range */
3571 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_LOWER,
3572 (adev->doorbell_index.gfx_ring0 * 2) << 2);
3573 WREG32_SOC15(GC, 0, regCP_RB_DOORBELL_RANGE_UPPER,
3574 (adev->doorbell_index.gfx_userqueue_end * 2) << 2);
3576 /* set compute engine doorbell range */
3577 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
3578 (adev->doorbell_index.kiq * 2) << 2);
3579 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
3580 (adev->doorbell_index.userqueue_end * 2) << 2);
3583 static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
3584 struct amdgpu_mqd_prop *prop)
3586 struct v11_gfx_mqd *mqd = m;
3587 uint64_t hqd_gpu_addr, wb_gpu_addr;
3591 /* set up gfx hqd wptr */
3592 mqd->cp_gfx_hqd_wptr = 0;
3593 mqd->cp_gfx_hqd_wptr_hi = 0;
3595 /* set the pointer to the MQD */
3596 mqd->cp_mqd_base_addr = prop->mqd_gpu_addr & 0xfffffffc;
3597 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
3599 /* set up mqd control */
3600 tmp = RREG32_SOC15(GC, 0, regCP_GFX_MQD_CONTROL);
3601 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, VMID, 0);
3602 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, PRIV_STATE, 1);
3603 tmp = REG_SET_FIELD(tmp, CP_GFX_MQD_CONTROL, CACHE_POLICY, 0);
3604 mqd->cp_gfx_mqd_control = tmp;
3606 /* set up gfx_hqd_vimd with 0x0 to indicate the ring buffer's vmid */
3607 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_VMID);
3608 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_VMID, VMID, 0);
3609 mqd->cp_gfx_hqd_vmid = 0;
3611 /* set up default queue priority level
3612 * 0x0 = low priority, 0x1 = high priority */
3613 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUEUE_PRIORITY);
3614 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUEUE_PRIORITY, PRIORITY_LEVEL, 0);
3615 mqd->cp_gfx_hqd_queue_priority = tmp;
3617 /* set up time quantum */
3618 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_QUANTUM);
3619 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_QUANTUM, QUANTUM_EN, 1);
3620 mqd->cp_gfx_hqd_quantum = tmp;
3622 /* set up gfx hqd base. this is similar as CP_RB_BASE */
3623 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
3624 mqd->cp_gfx_hqd_base = hqd_gpu_addr;
3625 mqd->cp_gfx_hqd_base_hi = upper_32_bits(hqd_gpu_addr);
3627 /* set up hqd_rptr_addr/_hi, similar as CP_RB_RPTR */
3628 wb_gpu_addr = prop->rptr_gpu_addr;
3629 mqd->cp_gfx_hqd_rptr_addr = wb_gpu_addr & 0xfffffffc;
3630 mqd->cp_gfx_hqd_rptr_addr_hi =
3631 upper_32_bits(wb_gpu_addr) & 0xffff;
3633 /* set up rb_wptr_poll addr */
3634 wb_gpu_addr = prop->wptr_gpu_addr;
3635 mqd->cp_rb_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3636 mqd->cp_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3638 /* set up the gfx_hqd_control, similar as CP_RB0_CNTL */
3639 rb_bufsz = order_base_2(prop->queue_size / 4) - 1;
3640 tmp = RREG32_SOC15(GC, 0, regCP_GFX_HQD_CNTL);
3641 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BUFSZ, rb_bufsz);
3642 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_BLKSZ, rb_bufsz - 2);
3644 tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, BUF_SWAP, 1);
3646 mqd->cp_gfx_hqd_cntl = tmp;
3648 /* set up cp_doorbell_control */
3649 tmp = RREG32_SOC15(GC, 0, regCP_RB_DOORBELL_CONTROL);
3650 if (prop->use_doorbell) {
3651 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3652 DOORBELL_OFFSET, prop->doorbell_index);
3653 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3656 tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL,
3658 mqd->cp_rb_doorbell_control = tmp;
3660 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3661 mqd->cp_gfx_hqd_rptr = RREG32_SOC15(GC, 0, regCP_GFX_HQD_RPTR);
3663 /* active the queue */
3664 mqd->cp_gfx_hqd_active = 1;
3669 static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
3671 struct amdgpu_device *adev = ring->adev;
3672 struct v11_gfx_mqd *mqd = ring->mqd_ptr;
3673 int mqd_idx = ring - &adev->gfx.gfx_ring[0];
3675 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
3676 memset((void *)mqd, 0, sizeof(*mqd));
3677 mutex_lock(&adev->srbm_mutex);
3678 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3679 amdgpu_ring_init_mqd(ring);
3680 soc21_grbm_select(adev, 0, 0, 0, 0);
3681 mutex_unlock(&adev->srbm_mutex);
3682 if (adev->gfx.me.mqd_backup[mqd_idx])
3683 memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
3685 /* restore mqd with the backup copy */
3686 if (adev->gfx.me.mqd_backup[mqd_idx])
3687 memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
3688 /* reset the ring */
3690 *ring->wptr_cpu_addr = 0;
3691 amdgpu_ring_clear_ring(ring);
3697 static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
3700 struct amdgpu_ring *ring;
3702 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
3703 ring = &adev->gfx.gfx_ring[i];
3705 r = amdgpu_bo_reserve(ring->mqd_obj, false);
3706 if (unlikely(r != 0))
3709 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
3711 r = gfx_v11_0_gfx_init_queue(ring);
3712 amdgpu_bo_kunmap(ring->mqd_obj);
3713 ring->mqd_ptr = NULL;
3715 amdgpu_bo_unreserve(ring->mqd_obj);
3720 r = amdgpu_gfx_enable_kgq(adev, 0);
3724 return gfx_v11_0_cp_gfx_start(adev);
3727 static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
3728 struct amdgpu_mqd_prop *prop)
3730 struct v11_compute_mqd *mqd = m;
3731 uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
3734 mqd->header = 0xC0310800;
3735 mqd->compute_pipelinestat_enable = 0x00000001;
3736 mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
3737 mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
3738 mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
3739 mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
3740 mqd->compute_misc_reserved = 0x00000007;
3742 eop_base_addr = prop->eop_gpu_addr >> 8;
3743 mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
3744 mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
3746 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3747 tmp = RREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL);
3748 tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
3749 (order_base_2(GFX11_MEC_HPD_SIZE / 4) - 1));
3751 mqd->cp_hqd_eop_control = tmp;
3753 /* enable doorbell? */
3754 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
3756 if (prop->use_doorbell) {
3757 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3758 DOORBELL_OFFSET, prop->doorbell_index);
3759 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3761 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3762 DOORBELL_SOURCE, 0);
3763 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3766 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3770 mqd->cp_hqd_pq_doorbell_control = tmp;
3772 /* disable the queue if it's active */
3773 mqd->cp_hqd_dequeue_request = 0;
3774 mqd->cp_hqd_pq_rptr = 0;
3775 mqd->cp_hqd_pq_wptr_lo = 0;
3776 mqd->cp_hqd_pq_wptr_hi = 0;
3778 /* set the pointer to the MQD */
3779 mqd->cp_mqd_base_addr_lo = prop->mqd_gpu_addr & 0xfffffffc;
3780 mqd->cp_mqd_base_addr_hi = upper_32_bits(prop->mqd_gpu_addr);
3782 /* set MQD vmid to 0 */
3783 tmp = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL);
3784 tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
3785 mqd->cp_mqd_control = tmp;
3787 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3788 hqd_gpu_addr = prop->hqd_base_gpu_addr >> 8;
3789 mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
3790 mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
3792 /* set up the HQD, this is similar to CP_RB0_CNTL */
3793 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL);
3794 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
3795 (order_base_2(prop->queue_size / 4) - 1));
3796 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
3797 (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
3798 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
3799 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0);
3800 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
3801 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
3802 mqd->cp_hqd_pq_control = tmp;
3804 /* set the wb address whether it's enabled or not */
3805 wb_gpu_addr = prop->rptr_gpu_addr;
3806 mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
3807 mqd->cp_hqd_pq_rptr_report_addr_hi =
3808 upper_32_bits(wb_gpu_addr) & 0xffff;
3810 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3811 wb_gpu_addr = prop->wptr_gpu_addr;
3812 mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
3813 mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
3816 /* enable the doorbell if requested */
3817 if (prop->use_doorbell) {
3818 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL);
3819 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3820 DOORBELL_OFFSET, prop->doorbell_index);
3822 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3824 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3825 DOORBELL_SOURCE, 0);
3826 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
3830 mqd->cp_hqd_pq_doorbell_control = tmp;
3832 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3833 mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR);
3835 /* set the vmid for the queue */
3836 mqd->cp_hqd_vmid = 0;
3838 tmp = RREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE);
3839 tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x55);
3840 mqd->cp_hqd_persistent_state = tmp;
3842 /* set MIN_IB_AVAIL_SIZE */
3843 tmp = RREG32_SOC15(GC, 0, regCP_HQD_IB_CONTROL);
3844 tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
3845 mqd->cp_hqd_ib_control = tmp;
3847 /* set static priority for a compute queue/ring */
3848 mqd->cp_hqd_pipe_priority = prop->hqd_pipe_priority;
3849 mqd->cp_hqd_queue_priority = prop->hqd_queue_priority;
3851 mqd->cp_hqd_active = prop->hqd_active;
3856 static int gfx_v11_0_kiq_init_register(struct amdgpu_ring *ring)
3858 struct amdgpu_device *adev = ring->adev;
3859 struct v11_compute_mqd *mqd = ring->mqd_ptr;
3862 /* inactivate the queue */
3863 if (amdgpu_sriov_vf(adev))
3864 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, 0);
3866 /* disable wptr polling */
3867 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
3869 /* write the EOP addr */
3870 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR,
3871 mqd->cp_hqd_eop_base_addr_lo);
3872 WREG32_SOC15(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI,
3873 mqd->cp_hqd_eop_base_addr_hi);
3875 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
3876 WREG32_SOC15(GC, 0, regCP_HQD_EOP_CONTROL,
3877 mqd->cp_hqd_eop_control);
3879 /* enable doorbell? */
3880 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
3881 mqd->cp_hqd_pq_doorbell_control);
3883 /* disable the queue if it's active */
3884 if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) {
3885 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1);
3886 for (j = 0; j < adev->usec_timeout; j++) {
3887 if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
3891 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST,
3892 mqd->cp_hqd_dequeue_request);
3893 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR,
3894 mqd->cp_hqd_pq_rptr);
3895 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
3896 mqd->cp_hqd_pq_wptr_lo);
3897 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
3898 mqd->cp_hqd_pq_wptr_hi);
3901 /* set the pointer to the MQD */
3902 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR,
3903 mqd->cp_mqd_base_addr_lo);
3904 WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI,
3905 mqd->cp_mqd_base_addr_hi);
3907 /* set MQD vmid to 0 */
3908 WREG32_SOC15(GC, 0, regCP_MQD_CONTROL,
3909 mqd->cp_mqd_control);
3911 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
3912 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE,
3913 mqd->cp_hqd_pq_base_lo);
3914 WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI,
3915 mqd->cp_hqd_pq_base_hi);
3917 /* set up the HQD, this is similar to CP_RB0_CNTL */
3918 WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL,
3919 mqd->cp_hqd_pq_control);
3921 /* set the wb address whether it's enabled or not */
3922 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR,
3923 mqd->cp_hqd_pq_rptr_report_addr_lo);
3924 WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
3925 mqd->cp_hqd_pq_rptr_report_addr_hi);
3927 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
3928 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR,
3929 mqd->cp_hqd_pq_wptr_poll_addr_lo);
3930 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
3931 mqd->cp_hqd_pq_wptr_poll_addr_hi);
3933 /* enable the doorbell if requested */
3934 if (ring->use_doorbell) {
3935 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_LOWER,
3936 (adev->doorbell_index.kiq * 2) << 2);
3937 WREG32_SOC15(GC, 0, regCP_MEC_DOORBELL_RANGE_UPPER,
3938 (adev->doorbell_index.userqueue_end * 2) << 2);
3941 WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL,
3942 mqd->cp_hqd_pq_doorbell_control);
3944 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3945 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO,
3946 mqd->cp_hqd_pq_wptr_lo);
3947 WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI,
3948 mqd->cp_hqd_pq_wptr_hi);
3950 /* set the vmid for the queue */
3951 WREG32_SOC15(GC, 0, regCP_HQD_VMID, mqd->cp_hqd_vmid);
3953 WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE,
3954 mqd->cp_hqd_persistent_state);
3956 /* activate the queue */
3957 WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE,
3958 mqd->cp_hqd_active);
3960 if (ring->use_doorbell)
3961 WREG32_FIELD15_PREREG(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
3966 static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
3968 struct amdgpu_device *adev = ring->adev;
3969 struct v11_compute_mqd *mqd = ring->mqd_ptr;
3971 gfx_v11_0_kiq_setting(ring);
3973 if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
3974 /* reset MQD to a clean status */
3975 if (adev->gfx.kiq[0].mqd_backup)
3976 memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
3978 /* reset ring buffer */
3980 amdgpu_ring_clear_ring(ring);
3982 mutex_lock(&adev->srbm_mutex);
3983 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3984 gfx_v11_0_kiq_init_register(ring);
3985 soc21_grbm_select(adev, 0, 0, 0, 0);
3986 mutex_unlock(&adev->srbm_mutex);
3988 memset((void *)mqd, 0, sizeof(*mqd));
3989 if (amdgpu_sriov_vf(adev) && adev->in_suspend)
3990 amdgpu_ring_clear_ring(ring);
3991 mutex_lock(&adev->srbm_mutex);
3992 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
3993 amdgpu_ring_init_mqd(ring);
3994 gfx_v11_0_kiq_init_register(ring);
3995 soc21_grbm_select(adev, 0, 0, 0, 0);
3996 mutex_unlock(&adev->srbm_mutex);
3998 if (adev->gfx.kiq[0].mqd_backup)
3999 memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
4005 static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
4007 struct amdgpu_device *adev = ring->adev;
4008 struct v11_compute_mqd *mqd = ring->mqd_ptr;
4009 int mqd_idx = ring - &adev->gfx.compute_ring[0];
4011 if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
4012 memset((void *)mqd, 0, sizeof(*mqd));
4013 mutex_lock(&adev->srbm_mutex);
4014 soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
4015 amdgpu_ring_init_mqd(ring);
4016 soc21_grbm_select(adev, 0, 0, 0, 0);
4017 mutex_unlock(&adev->srbm_mutex);
4019 if (adev->gfx.mec.mqd_backup[mqd_idx])
4020 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
4022 /* restore MQD to a clean status */
4023 if (adev->gfx.mec.mqd_backup[mqd_idx])
4024 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
4025 /* reset ring buffer */
4027 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
4028 amdgpu_ring_clear_ring(ring);
4034 static int gfx_v11_0_kiq_resume(struct amdgpu_device *adev)
4036 struct amdgpu_ring *ring;
4039 ring = &adev->gfx.kiq[0].ring;
4041 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4042 if (unlikely(r != 0))
4045 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4046 if (unlikely(r != 0)) {
4047 amdgpu_bo_unreserve(ring->mqd_obj);
4051 gfx_v11_0_kiq_init_queue(ring);
4052 amdgpu_bo_kunmap(ring->mqd_obj);
4053 ring->mqd_ptr = NULL;
4054 amdgpu_bo_unreserve(ring->mqd_obj);
4055 ring->sched.ready = true;
4059 static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
4061 struct amdgpu_ring *ring = NULL;
4064 if (!amdgpu_async_gfx_ring)
4065 gfx_v11_0_cp_compute_enable(adev, true);
4067 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4068 ring = &adev->gfx.compute_ring[i];
4070 r = amdgpu_bo_reserve(ring->mqd_obj, false);
4071 if (unlikely(r != 0))
4073 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
4075 r = gfx_v11_0_kcq_init_queue(ring);
4076 amdgpu_bo_kunmap(ring->mqd_obj);
4077 ring->mqd_ptr = NULL;
4079 amdgpu_bo_unreserve(ring->mqd_obj);
4084 r = amdgpu_gfx_enable_kcq(adev, 0);
4089 static int gfx_v11_0_cp_resume(struct amdgpu_device *adev)
4092 struct amdgpu_ring *ring;
4094 if (!(adev->flags & AMD_IS_APU))
4095 gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4097 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4098 /* legacy firmware loading */
4099 r = gfx_v11_0_cp_gfx_load_microcode(adev);
4103 if (adev->gfx.rs64_enable)
4104 r = gfx_v11_0_cp_compute_load_microcode_rs64(adev);
4106 r = gfx_v11_0_cp_compute_load_microcode(adev);
4111 gfx_v11_0_cp_set_doorbell_range(adev);
4113 if (amdgpu_async_gfx_ring) {
4114 gfx_v11_0_cp_compute_enable(adev, true);
4115 gfx_v11_0_cp_gfx_enable(adev, true);
4118 if (adev->enable_mes_kiq && adev->mes.kiq_hw_init)
4119 r = amdgpu_mes_kiq_hw_init(adev);
4121 r = gfx_v11_0_kiq_resume(adev);
4125 r = gfx_v11_0_kcq_resume(adev);
4129 if (!amdgpu_async_gfx_ring) {
4130 r = gfx_v11_0_cp_gfx_resume(adev);
4134 r = gfx_v11_0_cp_async_gfx_ring_resume(adev);
4139 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4140 ring = &adev->gfx.gfx_ring[i];
4141 r = amdgpu_ring_test_helper(ring);
4146 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4147 ring = &adev->gfx.compute_ring[i];
4148 r = amdgpu_ring_test_helper(ring);
4156 static void gfx_v11_0_cp_enable(struct amdgpu_device *adev, bool enable)
4158 gfx_v11_0_cp_gfx_enable(adev, enable);
4159 gfx_v11_0_cp_compute_enable(adev, enable);
4162 static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
4167 r = adev->gfxhub.funcs->gart_enable(adev);
4171 adev->hdp.funcs->flush_hdp(adev, NULL);
4173 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
4176 adev->gfxhub.funcs->set_fault_enable_default(adev, value);
4177 amdgpu_gmc_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB(0), 0);
4182 static void gfx_v11_0_select_cp_fw_arch(struct amdgpu_device *adev)
4187 if (adev->gfx.rs64_enable) {
4188 tmp = RREG32_SOC15(GC, 0, regCP_GFX_CNTL);
4189 tmp = REG_SET_FIELD(tmp, CP_GFX_CNTL, ENGINE_SEL, 1);
4190 WREG32_SOC15(GC, 0, regCP_GFX_CNTL, tmp);
4192 tmp = RREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL);
4193 tmp = REG_SET_FIELD(tmp, CP_MEC_ISA_CNTL, ISA_MODE, 1);
4194 WREG32_SOC15(GC, 0, regCP_MEC_ISA_CNTL, tmp);
4197 if (amdgpu_emu_mode == 1)
4201 static int get_gb_addr_config(struct amdgpu_device * adev)
4205 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
4206 if (gb_addr_config == 0)
4209 adev->gfx.config.gb_addr_config_fields.num_pkrs =
4210 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
4212 adev->gfx.config.gb_addr_config = gb_addr_config;
4214 adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
4215 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4216 GB_ADDR_CONFIG, NUM_PIPES);
4218 adev->gfx.config.max_tile_pipes =
4219 adev->gfx.config.gb_addr_config_fields.num_pipes;
4221 adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
4222 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4223 GB_ADDR_CONFIG, MAX_COMPRESSED_FRAGS);
4224 adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
4225 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4226 GB_ADDR_CONFIG, NUM_RB_PER_SE);
4227 adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
4228 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4229 GB_ADDR_CONFIG, NUM_SHADER_ENGINES);
4230 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
4231 REG_GET_FIELD(adev->gfx.config.gb_addr_config,
4232 GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE));
4237 static void gfx_v11_0_disable_gpa_mode(struct amdgpu_device *adev)
4241 data = RREG32_SOC15(GC, 0, regCPC_PSP_DEBUG);
4242 data |= CPC_PSP_DEBUG__GPA_OVERRIDE_MASK;
4243 WREG32_SOC15(GC, 0, regCPC_PSP_DEBUG, data);
4245 data = RREG32_SOC15(GC, 0, regCPG_PSP_DEBUG);
4246 data |= CPG_PSP_DEBUG__GPA_OVERRIDE_MASK;
4247 WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
4250 static int gfx_v11_0_hw_init(void *handle)
4253 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4255 if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
4256 if (adev->gfx.imu.funcs) {
4257 /* RLC autoload sequence 1: Program rlc ram */
4258 if (adev->gfx.imu.funcs->program_rlc_ram)
4259 adev->gfx.imu.funcs->program_rlc_ram(adev);
4261 /* rlc autoload firmware */
4262 r = gfx_v11_0_rlc_backdoor_autoload_enable(adev);
4266 if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
4267 if (adev->gfx.imu.funcs && (amdgpu_dpm > 0)) {
4268 if (adev->gfx.imu.funcs->load_microcode)
4269 adev->gfx.imu.funcs->load_microcode(adev);
4270 if (adev->gfx.imu.funcs->setup_imu)
4271 adev->gfx.imu.funcs->setup_imu(adev);
4272 if (adev->gfx.imu.funcs->start_imu)
4273 adev->gfx.imu.funcs->start_imu(adev);
4276 /* disable gpa mode in backdoor loading */
4277 gfx_v11_0_disable_gpa_mode(adev);
4281 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) ||
4282 (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
4283 r = gfx_v11_0_wait_for_rlc_autoload_complete(adev);
4285 dev_err(adev->dev, "(%d) failed to wait rlc autoload complete\n", r);
4290 adev->gfx.is_poweron = true;
4292 if(get_gb_addr_config(adev))
4293 DRM_WARN("Invalid gb_addr_config !\n");
4295 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
4296 adev->gfx.rs64_enable)
4297 gfx_v11_0_config_gfx_rs64(adev);
4299 r = gfx_v11_0_gfxhub_enable(adev);
4303 if (!amdgpu_emu_mode)
4304 gfx_v11_0_init_golden_registers(adev);
4306 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
4307 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
4309 * For gfx 11, rlc firmware loading relies on smu firmware is
4310 * loaded firstly, so in direct type, it has to load smc ucode
4313 if (!(adev->flags & AMD_IS_APU)) {
4314 r = amdgpu_pm_load_smu_firmware(adev, NULL);
4320 gfx_v11_0_constants_init(adev);
4322 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
4323 gfx_v11_0_select_cp_fw_arch(adev);
4325 if (adev->nbio.funcs->gc_doorbell_init)
4326 adev->nbio.funcs->gc_doorbell_init(adev);
4328 r = gfx_v11_0_rlc_resume(adev);
4333 * init golden registers and rlc resume may override some registers,
4334 * reconfig them here
4336 gfx_v11_0_tcp_harvest(adev);
4338 r = gfx_v11_0_cp_resume(adev);
4345 static int gfx_v11_0_hw_fini(void *handle)
4347 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4349 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4350 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4352 if (!adev->no_hw_access) {
4353 if (amdgpu_async_gfx_ring) {
4354 if (amdgpu_gfx_disable_kgq(adev, 0))
4355 DRM_ERROR("KGQ disable failed\n");
4358 if (amdgpu_gfx_disable_kcq(adev, 0))
4359 DRM_ERROR("KCQ disable failed\n");
4361 amdgpu_mes_kiq_hw_fini(adev);
4364 if (amdgpu_sriov_vf(adev))
4365 /* Remove the steps disabling CPG and clearing KIQ position,
4366 * so that CP could perform IDLE-SAVE during switch. Those
4367 * steps are necessary to avoid a DMAR error in gfx9 but it is
4368 * not reproduced on gfx11.
4372 gfx_v11_0_cp_enable(adev, false);
4373 gfx_v11_0_enable_gui_idle_interrupt(adev, false);
4375 adev->gfxhub.funcs->gart_disable(adev);
4377 adev->gfx.is_poweron = false;
4382 static int gfx_v11_0_suspend(void *handle)
4384 return gfx_v11_0_hw_fini(handle);
4387 static int gfx_v11_0_resume(void *handle)
4389 return gfx_v11_0_hw_init(handle);
4392 static bool gfx_v11_0_is_idle(void *handle)
4394 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4396 if (REG_GET_FIELD(RREG32_SOC15(GC, 0, regGRBM_STATUS),
4397 GRBM_STATUS, GUI_ACTIVE))
4403 static int gfx_v11_0_wait_for_idle(void *handle)
4407 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4409 for (i = 0; i < adev->usec_timeout; i++) {
4410 /* read MC_STATUS */
4411 tmp = RREG32_SOC15(GC, 0, regGRBM_STATUS) &
4412 GRBM_STATUS__GUI_ACTIVE_MASK;
4414 if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
4421 static int gfx_v11_0_soft_reset(void *handle)
4423 u32 grbm_soft_reset = 0;
4426 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4428 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4429 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
4430 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 0);
4431 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 0);
4432 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 0);
4433 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4435 gfx_v11_0_set_safe_mode(adev, 0);
4437 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
4438 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
4439 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
4440 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
4441 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
4442 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
4443 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
4444 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
4446 WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
4447 WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
4451 for (i = 0; i < adev->gfx.me.num_me; ++i) {
4452 for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
4453 for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
4454 tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL);
4455 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i);
4456 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j);
4457 tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k);
4458 WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp);
4460 WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1);
4465 WREG32_SOC15(GC, 0, regCP_VMID_RESET, 0xfffffffe);
4467 // Read CP_VMID_RESET register three times.
4468 // to get sufficient time for GFX_HQD_ACTIVE reach 0
4469 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4470 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4471 RREG32_SOC15(GC, 0, regCP_VMID_RESET);
4473 for (i = 0; i < adev->usec_timeout; i++) {
4474 if (!RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) &&
4475 !RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE))
4479 if (i >= adev->usec_timeout) {
4480 printk("Failed to wait all pipes clean\n");
4484 /********** trigger soft reset ***********/
4485 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4486 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4488 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4490 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4492 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4494 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4496 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4497 /********** exit soft reset ***********/
4498 grbm_soft_reset = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
4499 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4501 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4503 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4505 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4507 grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET,
4509 WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, grbm_soft_reset);
4511 tmp = RREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL);
4512 tmp = REG_SET_FIELD(tmp, CP_SOFT_RESET_CNTL, CMP_HQD_REG_RESET, 0x1);
4513 WREG32_SOC15(GC, 0, regCP_SOFT_RESET_CNTL, tmp);
4515 WREG32_SOC15(GC, 0, regCP_ME_CNTL, 0x0);
4516 WREG32_SOC15(GC, 0, regCP_MEC_RS64_CNTL, 0x0);
4518 for (i = 0; i < adev->usec_timeout; i++) {
4519 if (!RREG32_SOC15(GC, 0, regCP_VMID_RESET))
4523 if (i >= adev->usec_timeout) {
4524 printk("Failed to wait CP_VMID_RESET to 0\n");
4528 tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4529 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
4530 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
4531 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
4532 tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
4533 WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
4535 gfx_v11_0_unset_safe_mode(adev, 0);
4537 return gfx_v11_0_cp_resume(adev);
4540 static bool gfx_v11_0_check_soft_reset(void *handle)
4543 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4544 struct amdgpu_ring *ring;
4545 long tmo = msecs_to_jiffies(1000);
4547 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
4548 ring = &adev->gfx.gfx_ring[i];
4549 r = amdgpu_ring_test_ib(ring, tmo);
4554 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4555 ring = &adev->gfx.compute_ring[i];
4556 r = amdgpu_ring_test_ib(ring, tmo);
4564 static int gfx_v11_0_post_soft_reset(void *handle)
4567 * GFX soft reset will impact MES, need resume MES when do GFX soft reset
4569 return amdgpu_mes_resume((struct amdgpu_device *)handle);
4572 static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4575 uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
4577 if (amdgpu_sriov_vf(adev)) {
4578 amdgpu_gfx_off_ctrl(adev, false);
4579 mutex_lock(&adev->gfx.gpu_clock_mutex);
4580 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
4581 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
4582 clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
4583 if (clock_counter_hi_pre != clock_counter_hi_after)
4584 clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
4585 mutex_unlock(&adev->gfx.gpu_clock_mutex);
4586 amdgpu_gfx_off_ctrl(adev, true);
4589 clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
4590 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
4591 clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
4592 if (clock_counter_hi_pre != clock_counter_hi_after)
4593 clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
4596 clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
4601 static void gfx_v11_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4603 uint32_t gds_base, uint32_t gds_size,
4604 uint32_t gws_base, uint32_t gws_size,
4605 uint32_t oa_base, uint32_t oa_size)
4607 struct amdgpu_device *adev = ring->adev;
4610 gfx_v11_0_write_data_to_reg(ring, 0, false,
4611 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_BASE) + 2 * vmid,
4615 gfx_v11_0_write_data_to_reg(ring, 0, false,
4616 SOC15_REG_OFFSET(GC, 0, regGDS_VMID0_SIZE) + 2 * vmid,
4620 gfx_v11_0_write_data_to_reg(ring, 0, false,
4621 SOC15_REG_OFFSET(GC, 0, regGDS_GWS_VMID0) + vmid,
4622 gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
4625 gfx_v11_0_write_data_to_reg(ring, 0, false,
4626 SOC15_REG_OFFSET(GC, 0, regGDS_OA_VMID0) + vmid,
4627 (1 << (oa_size + oa_base)) - (1 << oa_base));
4630 static int gfx_v11_0_early_init(void *handle)
4632 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4634 adev->gfx.funcs = &gfx_v11_0_gfx_funcs;
4636 adev->gfx.num_gfx_rings = GFX11_NUM_GFX_RINGS;
4637 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
4638 AMDGPU_MAX_COMPUTE_RINGS);
4640 gfx_v11_0_set_kiq_pm4_funcs(adev);
4641 gfx_v11_0_set_ring_funcs(adev);
4642 gfx_v11_0_set_irq_funcs(adev);
4643 gfx_v11_0_set_gds_init(adev);
4644 gfx_v11_0_set_rlc_funcs(adev);
4645 gfx_v11_0_set_mqd_funcs(adev);
4646 gfx_v11_0_set_imu_funcs(adev);
4648 gfx_v11_0_init_rlcg_reg_access_ctrl(adev);
4650 return gfx_v11_0_init_microcode(adev);
4653 static int gfx_v11_0_ras_late_init(void *handle)
4655 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4656 struct ras_common_if *gfx_common_if;
4659 gfx_common_if = kzalloc(sizeof(struct ras_common_if), GFP_KERNEL);
4663 gfx_common_if->block = AMDGPU_RAS_BLOCK__GFX;
4665 ret = amdgpu_ras_feature_enable(adev, gfx_common_if, true);
4667 dev_warn(adev->dev, "Failed to enable gfx11 ras feature\n");
4669 kfree(gfx_common_if);
4673 static int gfx_v11_0_late_init(void *handle)
4675 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4678 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
4682 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
4686 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) {
4687 r = gfx_v11_0_ras_late_init(handle);
4695 static bool gfx_v11_0_is_rlc_enabled(struct amdgpu_device *adev)
4699 /* if RLC is not enabled, do nothing */
4700 rlc_cntl = RREG32_SOC15(GC, 0, regRLC_CNTL);
4701 return (REG_GET_FIELD(rlc_cntl, RLC_CNTL, RLC_ENABLE_F32)) ? true : false;
4704 static void gfx_v11_0_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
4709 data = RLC_SAFE_MODE__CMD_MASK;
4710 data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
4712 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, data);
4714 /* wait for RLC_SAFE_MODE */
4715 for (i = 0; i < adev->usec_timeout; i++) {
4716 if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, regRLC_SAFE_MODE),
4717 RLC_SAFE_MODE, CMD))
4723 static void gfx_v11_0_unset_safe_mode(struct amdgpu_device *adev, int xcc_id)
4725 WREG32_SOC15(GC, 0, regRLC_SAFE_MODE, RLC_SAFE_MODE__CMD_MASK);
4728 static void gfx_v11_0_update_perf_clk(struct amdgpu_device *adev,
4733 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_PERF_CLK))
4736 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4739 data &= ~RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
4741 data |= RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK;
4744 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4747 static void gfx_v11_0_update_sram_fgcg(struct amdgpu_device *adev,
4752 if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
4755 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4758 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
4760 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
4763 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4766 static void gfx_v11_0_update_repeater_fgcg(struct amdgpu_device *adev,
4771 if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
4774 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4777 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
4779 data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK;
4782 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4785 static void gfx_v11_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
4790 if (!(adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)))
4793 /* It is disabled by HW by default */
4795 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
4796 /* 1 - RLC_CGTT_MGCG_OVERRIDE */
4797 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4799 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4800 RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4801 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
4804 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4807 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG) {
4808 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4810 data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
4811 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
4812 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK);
4815 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4820 static void gfx_v11_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
4825 if (!(adev->cg_flags &
4826 (AMD_CG_SUPPORT_GFX_CGCG |
4827 AMD_CG_SUPPORT_GFX_CGLS |
4828 AMD_CG_SUPPORT_GFX_3D_CGCG |
4829 AMD_CG_SUPPORT_GFX_3D_CGLS)))
4833 def = data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
4835 /* unset CGCG override */
4836 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
4837 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
4838 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4839 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
4840 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG ||
4841 adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4842 data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_GFX3D_CG_OVERRIDE_MASK;
4844 /* update CGCG override bits */
4846 WREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE, data);
4848 /* enable cgcg FSM(0x0000363F) */
4849 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
4851 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG) {
4852 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD_MASK;
4853 data |= (0x36 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4854 RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4857 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
4858 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY_MASK;
4859 data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4860 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4864 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
4866 /* Program RLC_CGCG_CGLS_CTRL_3D */
4867 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
4869 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG) {
4870 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD_MASK;
4871 data |= (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
4872 RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4875 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS) {
4876 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY_MASK;
4877 data |= (0xf << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
4878 RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4882 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
4884 /* set IDLE_POLL_COUNT(0x00900100) */
4885 def = data = RREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL);
4887 data &= ~(CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY_MASK | CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK);
4888 data |= (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
4889 (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
4892 WREG32_SOC15(GC, 0, regCP_RB_WPTR_POLL_CNTL, data);
4894 data = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
4895 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_BUSY_INT_ENABLE, 1);
4896 data = REG_SET_FIELD(data, CP_INT_CNTL, CNTX_EMPTY_INT_ENABLE, 1);
4897 data = REG_SET_FIELD(data, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 1);
4898 data = REG_SET_FIELD(data, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
4899 WREG32_SOC15(GC, 0, regCP_INT_CNTL, data);
4901 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
4902 data = REG_SET_FIELD(data, SDMA0_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
4903 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
4905 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
4906 if (adev->sdma.num_instances > 1) {
4907 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
4908 data = REG_SET_FIELD(data, SDMA1_RLC_CGCG_CTRL, CGCG_INT_ENABLE, 1);
4909 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
4912 /* Program RLC_CGCG_CGLS_CTRL */
4913 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
4915 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)
4916 data &= ~RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
4918 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
4919 data &= ~RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
4922 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL, data);
4924 /* Program RLC_CGCG_CGLS_CTRL_3D */
4925 def = data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
4927 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
4928 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
4929 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
4930 data &= ~RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
4933 WREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D, data);
4935 data = RREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL);
4936 data &= ~SDMA0_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
4937 WREG32_SOC15(GC, 0, regSDMA0_RLC_CGCG_CTRL, data);
4939 /* Some ASICs only have one SDMA instance, not need to configure SDMA1 */
4940 if (adev->sdma.num_instances > 1) {
4941 data = RREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL);
4942 data &= ~SDMA1_RLC_CGCG_CTRL__CGCG_INT_ENABLE_MASK;
4943 WREG32_SOC15(GC, 0, regSDMA1_RLC_CGCG_CTRL, data);
4948 static int gfx_v11_0_update_gfx_clock_gating(struct amdgpu_device *adev,
4951 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
4953 gfx_v11_0_update_coarse_grain_clock_gating(adev, enable);
4955 gfx_v11_0_update_medium_grain_clock_gating(adev, enable);
4957 gfx_v11_0_update_repeater_fgcg(adev, enable);
4959 gfx_v11_0_update_sram_fgcg(adev, enable);
4961 gfx_v11_0_update_perf_clk(adev, enable);
4963 if (adev->cg_flags &
4964 (AMD_CG_SUPPORT_GFX_MGCG |
4965 AMD_CG_SUPPORT_GFX_CGLS |
4966 AMD_CG_SUPPORT_GFX_CGCG |
4967 AMD_CG_SUPPORT_GFX_3D_CGCG |
4968 AMD_CG_SUPPORT_GFX_3D_CGLS))
4969 gfx_v11_0_enable_gui_idle_interrupt(adev, enable);
4971 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
4976 static void gfx_v11_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
4980 amdgpu_gfx_off_ctrl(adev, false);
4982 reg = SOC15_REG_OFFSET(GC, 0, regRLC_SPM_MC_CNTL);
4983 if (amdgpu_sriov_is_pp_one_vf(adev))
4984 data = RREG32_NO_KIQ(reg);
4988 data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
4989 data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
4991 if (amdgpu_sriov_is_pp_one_vf(adev))
4992 WREG32_SOC15_NO_KIQ(GC, 0, regRLC_SPM_MC_CNTL, data);
4994 WREG32_SOC15(GC, 0, regRLC_SPM_MC_CNTL, data);
4996 amdgpu_gfx_off_ctrl(adev, true);
4999 static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
5000 .is_rlc_enabled = gfx_v11_0_is_rlc_enabled,
5001 .set_safe_mode = gfx_v11_0_set_safe_mode,
5002 .unset_safe_mode = gfx_v11_0_unset_safe_mode,
5003 .init = gfx_v11_0_rlc_init,
5004 .get_csb_size = gfx_v11_0_get_csb_size,
5005 .get_csb_buffer = gfx_v11_0_get_csb_buffer,
5006 .resume = gfx_v11_0_rlc_resume,
5007 .stop = gfx_v11_0_rlc_stop,
5008 .reset = gfx_v11_0_rlc_reset,
5009 .start = gfx_v11_0_rlc_start,
5010 .update_spm_vmid = gfx_v11_0_update_spm_vmid,
5013 static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
5015 u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
5017 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
5018 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5020 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5022 WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
5024 // Program RLC_PG_DELAY3 for CGPG hysteresis
5025 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
5026 switch (adev->ip_versions[GC_HWIP][0]) {
5027 case IP_VERSION(11, 0, 1):
5028 case IP_VERSION(11, 0, 4):
5029 WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
5037 static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
5039 amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
5041 gfx_v11_cntl_power_gating(adev, enable);
5043 amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
5046 static int gfx_v11_0_set_powergating_state(void *handle,
5047 enum amd_powergating_state state)
5049 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5050 bool enable = (state == AMD_PG_STATE_GATE);
5052 if (amdgpu_sriov_vf(adev))
5055 switch (adev->ip_versions[GC_HWIP][0]) {
5056 case IP_VERSION(11, 0, 0):
5057 case IP_VERSION(11, 0, 2):
5058 case IP_VERSION(11, 0, 3):
5059 amdgpu_gfx_off_ctrl(adev, enable);
5061 case IP_VERSION(11, 0, 1):
5062 case IP_VERSION(11, 0, 4):
5064 amdgpu_gfx_off_ctrl(adev, false);
5066 gfx_v11_cntl_pg(adev, enable);
5069 amdgpu_gfx_off_ctrl(adev, true);
5079 static int gfx_v11_0_set_clockgating_state(void *handle,
5080 enum amd_clockgating_state state)
5082 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5084 if (amdgpu_sriov_vf(adev))
5087 switch (adev->ip_versions[GC_HWIP][0]) {
5088 case IP_VERSION(11, 0, 0):
5089 case IP_VERSION(11, 0, 1):
5090 case IP_VERSION(11, 0, 2):
5091 case IP_VERSION(11, 0, 3):
5092 case IP_VERSION(11, 0, 4):
5093 gfx_v11_0_update_gfx_clock_gating(adev,
5094 state == AMD_CG_STATE_GATE);
5103 static void gfx_v11_0_get_clockgating_state(void *handle, u64 *flags)
5105 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5108 /* AMD_CG_SUPPORT_GFX_MGCG */
5109 data = RREG32_SOC15(GC, 0, regRLC_CGTT_MGCG_OVERRIDE);
5110 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
5111 *flags |= AMD_CG_SUPPORT_GFX_MGCG;
5113 /* AMD_CG_SUPPORT_REPEATER_FGCG */
5114 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_REPEATER_FGCG_OVERRIDE_MASK))
5115 *flags |= AMD_CG_SUPPORT_REPEATER_FGCG;
5117 /* AMD_CG_SUPPORT_GFX_FGCG */
5118 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK))
5119 *flags |= AMD_CG_SUPPORT_GFX_FGCG;
5121 /* AMD_CG_SUPPORT_GFX_PERF_CLK */
5122 if (!(data & RLC_CGTT_MGCG_OVERRIDE__PERFMON_CLOCK_STATE_MASK))
5123 *flags |= AMD_CG_SUPPORT_GFX_PERF_CLK;
5125 /* AMD_CG_SUPPORT_GFX_CGCG */
5126 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL);
5127 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
5128 *flags |= AMD_CG_SUPPORT_GFX_CGCG;
5130 /* AMD_CG_SUPPORT_GFX_CGLS */
5131 if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
5132 *flags |= AMD_CG_SUPPORT_GFX_CGLS;
5134 /* AMD_CG_SUPPORT_GFX_3D_CGCG */
5135 data = RREG32_SOC15(GC, 0, regRLC_CGCG_CGLS_CTRL_3D);
5136 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
5137 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG;
5139 /* AMD_CG_SUPPORT_GFX_3D_CGLS */
5140 if (data & RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK)
5141 *flags |= AMD_CG_SUPPORT_GFX_3D_CGLS;
5144 static u64 gfx_v11_0_ring_get_rptr_gfx(struct amdgpu_ring *ring)
5146 /* gfx11 is 32bit rptr*/
5147 return *(uint32_t *)ring->rptr_cpu_addr;
5150 static u64 gfx_v11_0_ring_get_wptr_gfx(struct amdgpu_ring *ring)
5152 struct amdgpu_device *adev = ring->adev;
5155 /* XXX check if swapping is necessary on BE */
5156 if (ring->use_doorbell) {
5157 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5159 wptr = RREG32_SOC15(GC, 0, regCP_RB0_WPTR);
5160 wptr += (u64)RREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI) << 32;
5166 static void gfx_v11_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
5168 struct amdgpu_device *adev = ring->adev;
5169 uint32_t *wptr_saved;
5170 uint32_t *is_queue_unmap;
5171 uint64_t aggregated_db_index;
5172 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_GFX].mqd_size;
5175 if (ring->is_mes_queue) {
5176 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
5177 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
5179 aggregated_db_index =
5180 amdgpu_mes_get_aggregated_doorbell_index(adev,
5183 wptr_tmp = ring->wptr & ring->buf_mask;
5184 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
5185 *wptr_saved = wptr_tmp;
5186 /* assume doorbell always being used by mes mapped queue */
5187 if (*is_queue_unmap) {
5188 WDOORBELL64(aggregated_db_index, wptr_tmp);
5189 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5191 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5193 if (*is_queue_unmap)
5194 WDOORBELL64(aggregated_db_index, wptr_tmp);
5197 if (ring->use_doorbell) {
5198 /* XXX check if swapping is necessary on BE */
5199 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5201 WDOORBELL64(ring->doorbell_index, ring->wptr);
5203 WREG32_SOC15(GC, 0, regCP_RB0_WPTR,
5204 lower_32_bits(ring->wptr));
5205 WREG32_SOC15(GC, 0, regCP_RB0_WPTR_HI,
5206 upper_32_bits(ring->wptr));
5211 static u64 gfx_v11_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
5213 /* gfx11 hardware is 32bit rptr */
5214 return *(uint32_t *)ring->rptr_cpu_addr;
5217 static u64 gfx_v11_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
5221 /* XXX check if swapping is necessary on BE */
5222 if (ring->use_doorbell)
5223 wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
5229 static void gfx_v11_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
5231 struct amdgpu_device *adev = ring->adev;
5232 uint32_t *wptr_saved;
5233 uint32_t *is_queue_unmap;
5234 uint64_t aggregated_db_index;
5235 uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size;
5238 if (ring->is_mes_queue) {
5239 wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
5240 is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
5242 aggregated_db_index =
5243 amdgpu_mes_get_aggregated_doorbell_index(adev,
5246 wptr_tmp = ring->wptr & ring->buf_mask;
5247 atomic64_set((atomic64_t *)ring->wptr_cpu_addr, wptr_tmp);
5248 *wptr_saved = wptr_tmp;
5249 /* assume doorbell always used by mes mapped queue */
5250 if (*is_queue_unmap) {
5251 WDOORBELL64(aggregated_db_index, wptr_tmp);
5252 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5254 WDOORBELL64(ring->doorbell_index, wptr_tmp);
5256 if (*is_queue_unmap)
5257 WDOORBELL64(aggregated_db_index, wptr_tmp);
5260 /* XXX check if swapping is necessary on BE */
5261 if (ring->use_doorbell) {
5262 atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
5264 WDOORBELL64(ring->doorbell_index, ring->wptr);
5266 BUG(); /* only DOORBELL method supported on gfx11 now */
5271 static void gfx_v11_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
5273 struct amdgpu_device *adev = ring->adev;
5274 u32 ref_and_mask, reg_mem_engine;
5275 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
5277 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
5280 ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
5283 ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
5290 ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
5291 reg_mem_engine = 1; /* pfp */
5294 gfx_v11_0_wait_reg_mem(ring, reg_mem_engine, 0, 1,
5295 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
5296 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
5297 ref_and_mask, ref_and_mask, 0x20);
5300 static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5301 struct amdgpu_job *job,
5302 struct amdgpu_ib *ib,
5305 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5306 u32 header, control = 0;
5308 BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
5310 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
5312 control |= ib->length_dw | (vmid << 24);
5314 if (ring->adev->gfx.mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
5315 control |= INDIRECT_BUFFER_PRE_ENB(1);
5317 if (flags & AMDGPU_IB_PREEMPTED)
5318 control |= INDIRECT_BUFFER_PRE_RESUME(1);
5321 gfx_v11_0_ring_emit_de_meta(ring,
5322 (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
5325 if (ring->is_mes_queue)
5326 /* inherit vmid from mqd */
5327 control |= 0x400000;
5329 amdgpu_ring_write(ring, header);
5330 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5331 amdgpu_ring_write(ring,
5335 lower_32_bits(ib->gpu_addr));
5336 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5337 amdgpu_ring_write(ring, control);
5340 static void gfx_v11_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5341 struct amdgpu_job *job,
5342 struct amdgpu_ib *ib,
5345 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
5346 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
5348 if (ring->is_mes_queue)
5349 /* inherit vmid from mqd */
5350 control |= 0x40000000;
5352 /* Currently, there is a high possibility to get wave ID mismatch
5353 * between ME and GDS, leading to a hw deadlock, because ME generates
5354 * different wave IDs than the GDS expects. This situation happens
5355 * randomly when at least 5 compute pipes use GDS ordered append.
5356 * The wave IDs generated by ME are also wrong after suspend/resume.
5357 * Those are probably bugs somewhere else in the kernel driver.
5359 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
5360 * GDS to 0 for this ring (me/pipe).
5362 if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
5363 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
5364 amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
5365 amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
5368 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5369 BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
5370 amdgpu_ring_write(ring,
5374 lower_32_bits(ib->gpu_addr));
5375 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
5376 amdgpu_ring_write(ring, control);
5379 static void gfx_v11_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
5380 u64 seq, unsigned flags)
5382 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
5383 bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
5385 /* RELEASE_MEM - flush caches, send int */
5386 amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
5387 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_GCR_SEQ |
5388 PACKET3_RELEASE_MEM_GCR_GL2_WB |
5389 PACKET3_RELEASE_MEM_GCR_GL2_INV |
5390 PACKET3_RELEASE_MEM_GCR_GL2_US |
5391 PACKET3_RELEASE_MEM_GCR_GL1_INV |
5392 PACKET3_RELEASE_MEM_GCR_GLV_INV |
5393 PACKET3_RELEASE_MEM_GCR_GLM_INV |
5394 PACKET3_RELEASE_MEM_GCR_GLM_WB |
5395 PACKET3_RELEASE_MEM_CACHE_POLICY(3) |
5396 PACKET3_RELEASE_MEM_EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
5397 PACKET3_RELEASE_MEM_EVENT_INDEX(5)));
5398 amdgpu_ring_write(ring, (PACKET3_RELEASE_MEM_DATA_SEL(write64bit ? 2 : 1) |
5399 PACKET3_RELEASE_MEM_INT_SEL(int_sel ? 2 : 0)));
5402 * the address should be Qword aligned if 64bit write, Dword
5403 * aligned if only send 32bit data low (discard data high)
5409 amdgpu_ring_write(ring, lower_32_bits(addr));
5410 amdgpu_ring_write(ring, upper_32_bits(addr));
5411 amdgpu_ring_write(ring, lower_32_bits(seq));
5412 amdgpu_ring_write(ring, upper_32_bits(seq));
5413 amdgpu_ring_write(ring, ring->is_mes_queue ?
5414 (ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0);
5417 static void gfx_v11_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
5419 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5420 uint32_t seq = ring->fence_drv.sync_seq;
5421 uint64_t addr = ring->fence_drv.gpu_addr;
5423 gfx_v11_0_wait_reg_mem(ring, usepfp, 1, 0, lower_32_bits(addr),
5424 upper_32_bits(addr), seq, 0xffffffff, 4);
5427 static void gfx_v11_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
5428 uint16_t pasid, uint32_t flush_type,
5429 bool all_hub, uint8_t dst_sel)
5431 amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
5432 amdgpu_ring_write(ring,
5433 PACKET3_INVALIDATE_TLBS_DST_SEL(dst_sel) |
5434 PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
5435 PACKET3_INVALIDATE_TLBS_PASID(pasid) |
5436 PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
5439 static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
5440 unsigned vmid, uint64_t pd_addr)
5442 if (ring->is_mes_queue)
5443 gfx_v11_0_ring_invalidate_tlbs(ring, 0, 0, false, 0);
5445 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
5447 /* compute doesn't have PFP */
5448 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
5449 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5450 amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5451 amdgpu_ring_write(ring, 0x0);
5455 static void gfx_v11_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
5456 u64 seq, unsigned int flags)
5458 struct amdgpu_device *adev = ring->adev;
5460 /* we only allocate 32bit for each seq wb address */
5461 BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
5463 /* write fence seq to the "addr" */
5464 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5465 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5466 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
5467 amdgpu_ring_write(ring, lower_32_bits(addr));
5468 amdgpu_ring_write(ring, upper_32_bits(addr));
5469 amdgpu_ring_write(ring, lower_32_bits(seq));
5471 if (flags & AMDGPU_FENCE_FLAG_INT) {
5472 /* set register to trigger INT */
5473 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5474 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
5475 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
5476 amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, regCPC_INT_STATUS));
5477 amdgpu_ring_write(ring, 0);
5478 amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
5482 static void gfx_v11_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
5487 dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
5488 if (flags & AMDGPU_HAVE_CTX_SWITCH) {
5489 /* set load_global_config & load_global_uconfig */
5491 /* set load_cs_sh_regs */
5493 /* set load_per_context_state & load_gfx_sh_regs for GFX */
5497 amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5498 amdgpu_ring_write(ring, dw2);
5499 amdgpu_ring_write(ring, 0);
5502 static void gfx_v11_0_ring_emit_gfx_shadow(struct amdgpu_ring *ring,
5503 u64 shadow_va, u64 csa_va,
5504 u64 gds_va, bool init_shadow,
5507 struct amdgpu_device *adev = ring->adev;
5509 if (!adev->gfx.cp_gfx_shadow)
5512 amdgpu_ring_write(ring, PACKET3(PACKET3_SET_Q_PREEMPTION_MODE, 7));
5513 amdgpu_ring_write(ring, lower_32_bits(shadow_va));
5514 amdgpu_ring_write(ring, upper_32_bits(shadow_va));
5515 amdgpu_ring_write(ring, lower_32_bits(gds_va));
5516 amdgpu_ring_write(ring, upper_32_bits(gds_va));
5517 amdgpu_ring_write(ring, lower_32_bits(csa_va));
5518 amdgpu_ring_write(ring, upper_32_bits(csa_va));
5519 amdgpu_ring_write(ring, shadow_va ?
5520 PACKET3_SET_Q_PREEMPTION_MODE_IB_VMID(vmid) : 0);
5521 amdgpu_ring_write(ring, init_shadow ?
5522 PACKET3_SET_Q_PREEMPTION_MODE_INIT_SHADOW_MEM : 0);
5525 static unsigned gfx_v11_0_ring_emit_init_cond_exec(struct amdgpu_ring *ring)
5529 amdgpu_ring_write(ring, PACKET3(PACKET3_COND_EXEC, 3));
5530 amdgpu_ring_write(ring, lower_32_bits(ring->cond_exe_gpu_addr));
5531 amdgpu_ring_write(ring, upper_32_bits(ring->cond_exe_gpu_addr));
5532 amdgpu_ring_write(ring, 0); /* discard following DWs if *cond_exec_gpu_addr==0 */
5533 ret = ring->wptr & ring->buf_mask;
5534 amdgpu_ring_write(ring, 0x55aa55aa); /* patch dummy value later */
5539 static void gfx_v11_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigned offset)
5542 BUG_ON(offset > ring->buf_mask);
5543 BUG_ON(ring->ring[offset] != 0x55aa55aa);
5545 cur = (ring->wptr - 1) & ring->buf_mask;
5546 if (likely(cur > offset))
5547 ring->ring[offset] = cur - offset;
5549 ring->ring[offset] = (ring->buf_mask + 1) - offset + cur;
5552 static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
5555 struct amdgpu_device *adev = ring->adev;
5556 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
5557 struct amdgpu_ring *kiq_ring = &kiq->ring;
5558 unsigned long flags;
5560 if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
5563 spin_lock_irqsave(&kiq->ring_lock, flags);
5565 if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
5566 spin_unlock_irqrestore(&kiq->ring_lock, flags);
5570 /* assert preemption condition */
5571 amdgpu_ring_set_preempt_cond_exec(ring, false);
5573 /* assert IB preemption, emit the trailing fence */
5574 kiq->pmf->kiq_unmap_queues(kiq_ring, ring, PREEMPT_QUEUES_NO_UNMAP,
5575 ring->trail_fence_gpu_addr,
5577 amdgpu_ring_commit(kiq_ring);
5579 spin_unlock_irqrestore(&kiq->ring_lock, flags);
5581 /* poll the trailing fence */
5582 for (i = 0; i < adev->usec_timeout; i++) {
5583 if (ring->trail_seq ==
5584 le32_to_cpu(*(ring->trail_fence_cpu_addr)))
5589 if (i >= adev->usec_timeout) {
5591 DRM_ERROR("ring %d failed to preempt ib\n", ring->idx);
5594 /* deassert preemption condition */
5595 amdgpu_ring_set_preempt_cond_exec(ring, true);
5599 static void gfx_v11_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
5601 struct amdgpu_device *adev = ring->adev;
5602 struct v10_de_ib_state de_payload = {0};
5603 uint64_t offset, gds_addr, de_payload_gpu_addr;
5604 void *de_payload_cpu_addr;
5607 if (ring->is_mes_queue) {
5608 offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5609 gfx[0].gfx_meta_data) +
5610 offsetof(struct v10_gfx_meta_data, de_payload);
5611 de_payload_gpu_addr =
5612 amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5613 de_payload_cpu_addr =
5614 amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
5616 offset = offsetof(struct amdgpu_mes_ctx_meta_data,
5617 gfx[0].gds_backup) +
5618 offsetof(struct v10_gfx_meta_data, de_payload);
5619 gds_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
5621 offset = offsetof(struct v10_gfx_meta_data, de_payload);
5622 de_payload_gpu_addr = amdgpu_csa_vaddr(ring->adev) + offset;
5623 de_payload_cpu_addr = adev->virt.csa_cpu_addr + offset;
5625 gds_addr = ALIGN(amdgpu_csa_vaddr(ring->adev) +
5626 AMDGPU_CSA_SIZE - adev->gds.gds_size,
5630 de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
5631 de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
5633 cnt = (sizeof(de_payload) >> 2) + 4 - 2;
5634 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
5635 amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5636 WRITE_DATA_DST_SEL(8) |
5638 WRITE_DATA_CACHE_POLICY(0));
5639 amdgpu_ring_write(ring, lower_32_bits(de_payload_gpu_addr));
5640 amdgpu_ring_write(ring, upper_32_bits(de_payload_gpu_addr));
5643 amdgpu_ring_write_multiple(ring, de_payload_cpu_addr,
5644 sizeof(de_payload) >> 2);
5646 amdgpu_ring_write_multiple(ring, (void *)&de_payload,
5647 sizeof(de_payload) >> 2);
5650 static void gfx_v11_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
5653 uint32_t v = secure ? FRAME_TMZ : 0;
5655 amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
5656 amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
5659 static void gfx_v11_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
5660 uint32_t reg_val_offs)
5662 struct amdgpu_device *adev = ring->adev;
5664 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
5665 amdgpu_ring_write(ring, 0 | /* src: register*/
5666 (5 << 8) | /* dst: memory */
5667 (1 << 20)); /* write confirm */
5668 amdgpu_ring_write(ring, reg);
5669 amdgpu_ring_write(ring, 0);
5670 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
5672 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
5676 static void gfx_v11_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
5681 switch (ring->funcs->type) {
5682 case AMDGPU_RING_TYPE_GFX:
5683 cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
5685 case AMDGPU_RING_TYPE_KIQ:
5686 cmd = (1 << 16); /* no inc addr */
5692 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5693 amdgpu_ring_write(ring, cmd);
5694 amdgpu_ring_write(ring, reg);
5695 amdgpu_ring_write(ring, 0);
5696 amdgpu_ring_write(ring, val);
5699 static void gfx_v11_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
5700 uint32_t val, uint32_t mask)
5702 gfx_v11_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
5705 static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
5706 uint32_t reg0, uint32_t reg1,
5707 uint32_t ref, uint32_t mask)
5709 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
5711 gfx_v11_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1,
5715 static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
5718 struct amdgpu_device *adev = ring->adev;
5721 value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
5722 value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
5723 value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
5724 value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
5725 WREG32_SOC15(GC, 0, regSQ_CMD, value);
5729 gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
5730 uint32_t me, uint32_t pipe,
5731 enum amdgpu_interrupt_state state)
5733 uint32_t cp_int_cntl, cp_int_cntl_reg;
5738 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING0);
5741 cp_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_INT_CNTL_RING1);
5744 DRM_DEBUG("invalid pipe %d\n", pipe);
5748 DRM_DEBUG("invalid me %d\n", me);
5753 case AMDGPU_IRQ_STATE_DISABLE:
5754 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
5755 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5756 TIME_STAMP_INT_ENABLE, 0);
5757 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5758 GENERIC0_INT_ENABLE, 0);
5759 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
5761 case AMDGPU_IRQ_STATE_ENABLE:
5762 cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
5763 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5764 TIME_STAMP_INT_ENABLE, 1);
5765 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
5766 GENERIC0_INT_ENABLE, 1);
5767 WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
5774 static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev,
5776 enum amdgpu_interrupt_state state)
5778 u32 mec_int_cntl, mec_int_cntl_reg;
5781 * amdgpu controls only the first MEC. That's why this function only
5782 * handles the setting of interrupts for this specific MEC. All other
5783 * pipes' interrupts are set by amdkfd.
5789 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
5792 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE1_INT_CNTL);
5795 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE2_INT_CNTL);
5798 mec_int_cntl_reg = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE3_INT_CNTL);
5801 DRM_DEBUG("invalid pipe %d\n", pipe);
5805 DRM_DEBUG("invalid me %d\n", me);
5810 case AMDGPU_IRQ_STATE_DISABLE:
5811 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
5812 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5813 TIME_STAMP_INT_ENABLE, 0);
5814 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5815 GENERIC0_INT_ENABLE, 0);
5816 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5818 case AMDGPU_IRQ_STATE_ENABLE:
5819 mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
5820 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5821 TIME_STAMP_INT_ENABLE, 1);
5822 mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
5823 GENERIC0_INT_ENABLE, 1);
5824 WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
5831 static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
5832 struct amdgpu_irq_src *src,
5834 enum amdgpu_interrupt_state state)
5837 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP:
5838 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 0, state);
5840 case AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP:
5841 gfx_v11_0_set_gfx_eop_interrupt_state(adev, 0, 1, state);
5843 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
5844 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 0, state);
5846 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
5847 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 1, state);
5849 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
5850 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 2, state);
5852 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
5853 gfx_v11_0_set_compute_eop_interrupt_state(adev, 1, 3, state);
5861 static int gfx_v11_0_eop_irq(struct amdgpu_device *adev,
5862 struct amdgpu_irq_src *source,
5863 struct amdgpu_iv_entry *entry)
5866 u8 me_id, pipe_id, queue_id;
5867 struct amdgpu_ring *ring;
5868 uint32_t mes_queue_id = entry->src_data[0];
5870 DRM_DEBUG("IH: CP EOP\n");
5872 if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
5873 struct amdgpu_mes_queue *queue;
5875 mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
5877 spin_lock(&adev->mes.queue_id_lock);
5878 queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
5880 DRM_DEBUG("process mes queue id = %d\n", mes_queue_id);
5881 amdgpu_fence_process(queue->ring);
5883 spin_unlock(&adev->mes.queue_id_lock);
5885 me_id = (entry->ring_id & 0x0c) >> 2;
5886 pipe_id = (entry->ring_id & 0x03) >> 0;
5887 queue_id = (entry->ring_id & 0x70) >> 4;
5892 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
5894 amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
5898 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5899 ring = &adev->gfx.compute_ring[i];
5900 /* Per-queue interrupt is supported for MEC starting from VI.
5901 * The interrupt can only be enabled/disabled per pipe instead
5904 if ((ring->me == me_id) &&
5905 (ring->pipe == pipe_id) &&
5906 (ring->queue == queue_id))
5907 amdgpu_fence_process(ring);
5916 static int gfx_v11_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
5917 struct amdgpu_irq_src *source,
5919 enum amdgpu_interrupt_state state)
5922 case AMDGPU_IRQ_STATE_DISABLE:
5923 case AMDGPU_IRQ_STATE_ENABLE:
5924 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
5925 PRIV_REG_INT_ENABLE,
5926 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5935 static int gfx_v11_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
5936 struct amdgpu_irq_src *source,
5938 enum amdgpu_interrupt_state state)
5941 case AMDGPU_IRQ_STATE_DISABLE:
5942 case AMDGPU_IRQ_STATE_ENABLE:
5943 WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0,
5944 PRIV_INSTR_INT_ENABLE,
5945 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
5954 static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev,
5955 struct amdgpu_iv_entry *entry)
5957 u8 me_id, pipe_id, queue_id;
5958 struct amdgpu_ring *ring;
5961 me_id = (entry->ring_id & 0x0c) >> 2;
5962 pipe_id = (entry->ring_id & 0x03) >> 0;
5963 queue_id = (entry->ring_id & 0x70) >> 4;
5967 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
5968 ring = &adev->gfx.gfx_ring[i];
5969 /* we only enabled 1 gfx queue per pipe for now */
5970 if (ring->me == me_id && ring->pipe == pipe_id)
5971 drm_sched_fault(&ring->sched);
5976 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5977 ring = &adev->gfx.compute_ring[i];
5978 if (ring->me == me_id && ring->pipe == pipe_id &&
5979 ring->queue == queue_id)
5980 drm_sched_fault(&ring->sched);
5989 static int gfx_v11_0_priv_reg_irq(struct amdgpu_device *adev,
5990 struct amdgpu_irq_src *source,
5991 struct amdgpu_iv_entry *entry)
5993 DRM_ERROR("Illegal register access in command stream\n");
5994 gfx_v11_0_handle_priv_fault(adev, entry);
5998 static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
5999 struct amdgpu_irq_src *source,
6000 struct amdgpu_iv_entry *entry)
6002 DRM_ERROR("Illegal instruction in command stream\n");
6003 gfx_v11_0_handle_priv_fault(adev, entry);
6007 static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev,
6008 struct amdgpu_irq_src *source,
6009 struct amdgpu_iv_entry *entry)
6011 if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq)
6012 return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry);
6018 static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
6019 struct amdgpu_irq_src *src,
6021 enum amdgpu_interrupt_state state)
6023 uint32_t tmp, target;
6024 struct amdgpu_ring *ring = &(adev->gfx.kiq[0].ring);
6026 target = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
6027 target += ring->pipe;
6030 case AMDGPU_CP_KIQ_IRQ_DRIVER0:
6031 if (state == AMDGPU_IRQ_STATE_DISABLE) {
6032 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6033 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6034 GENERIC2_INT_ENABLE, 0);
6035 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6037 tmp = RREG32_SOC15_IP(GC, target);
6038 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6039 GENERIC2_INT_ENABLE, 0);
6040 WREG32_SOC15_IP(GC, target, tmp);
6042 tmp = RREG32_SOC15(GC, 0, regCPC_INT_CNTL);
6043 tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
6044 GENERIC2_INT_ENABLE, 1);
6045 WREG32_SOC15(GC, 0, regCPC_INT_CNTL, tmp);
6047 tmp = RREG32_SOC15_IP(GC, target);
6048 tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL,
6049 GENERIC2_INT_ENABLE, 1);
6050 WREG32_SOC15_IP(GC, target, tmp);
6054 BUG(); /* kiq only support GENERIC2_INT now */
6061 static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
6063 const unsigned int gcr_cntl =
6064 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
6065 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
6066 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
6067 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
6068 PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
6069 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
6070 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
6071 PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
6073 /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
6074 amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
6075 amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
6076 amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
6077 amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
6078 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
6079 amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
6080 amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
6081 amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
6084 static const struct amd_ip_funcs gfx_v11_0_ip_funcs = {
6085 .name = "gfx_v11_0",
6086 .early_init = gfx_v11_0_early_init,
6087 .late_init = gfx_v11_0_late_init,
6088 .sw_init = gfx_v11_0_sw_init,
6089 .sw_fini = gfx_v11_0_sw_fini,
6090 .hw_init = gfx_v11_0_hw_init,
6091 .hw_fini = gfx_v11_0_hw_fini,
6092 .suspend = gfx_v11_0_suspend,
6093 .resume = gfx_v11_0_resume,
6094 .is_idle = gfx_v11_0_is_idle,
6095 .wait_for_idle = gfx_v11_0_wait_for_idle,
6096 .soft_reset = gfx_v11_0_soft_reset,
6097 .check_soft_reset = gfx_v11_0_check_soft_reset,
6098 .post_soft_reset = gfx_v11_0_post_soft_reset,
6099 .set_clockgating_state = gfx_v11_0_set_clockgating_state,
6100 .set_powergating_state = gfx_v11_0_set_powergating_state,
6101 .get_clockgating_state = gfx_v11_0_get_clockgating_state,
6104 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
6105 .type = AMDGPU_RING_TYPE_GFX,
6107 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6108 .support_64bit_ptrs = true,
6109 .secure_submission_supported = true,
6110 .get_rptr = gfx_v11_0_ring_get_rptr_gfx,
6111 .get_wptr = gfx_v11_0_ring_get_wptr_gfx,
6112 .set_wptr = gfx_v11_0_ring_set_wptr_gfx,
6113 .emit_frame_size = /* totally 242 maximum if 16 IBs */
6115 9 + /* SET_Q_PREEMPTION_MODE */
6116 7 + /* PIPELINE_SYNC */
6117 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6118 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6120 8 + /* FENCE for VM_FLUSH */
6121 20 + /* GDS switch */
6128 8 + 8 + /* FENCE x2 */
6129 8, /* gfx_v11_0_emit_mem_sync */
6130 .emit_ib_size = 4, /* gfx_v11_0_ring_emit_ib_gfx */
6131 .emit_ib = gfx_v11_0_ring_emit_ib_gfx,
6132 .emit_fence = gfx_v11_0_ring_emit_fence,
6133 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6134 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6135 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6136 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6137 .test_ring = gfx_v11_0_ring_test_ring,
6138 .test_ib = gfx_v11_0_ring_test_ib,
6139 .insert_nop = amdgpu_ring_insert_nop,
6140 .pad_ib = amdgpu_ring_generic_pad_ib,
6141 .emit_cntxcntl = gfx_v11_0_ring_emit_cntxcntl,
6142 .emit_gfx_shadow = gfx_v11_0_ring_emit_gfx_shadow,
6143 .init_cond_exec = gfx_v11_0_ring_emit_init_cond_exec,
6144 .patch_cond_exec = gfx_v11_0_ring_emit_patch_cond_exec,
6145 .preempt_ib = gfx_v11_0_ring_preempt_ib,
6146 .emit_frame_cntl = gfx_v11_0_ring_emit_frame_cntl,
6147 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6148 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6149 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6150 .soft_recovery = gfx_v11_0_ring_soft_recovery,
6151 .emit_mem_sync = gfx_v11_0_emit_mem_sync,
6154 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
6155 .type = AMDGPU_RING_TYPE_COMPUTE,
6157 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6158 .support_64bit_ptrs = true,
6159 .get_rptr = gfx_v11_0_ring_get_rptr_compute,
6160 .get_wptr = gfx_v11_0_ring_get_wptr_compute,
6161 .set_wptr = gfx_v11_0_ring_set_wptr_compute,
6163 20 + /* gfx_v11_0_ring_emit_gds_switch */
6164 7 + /* gfx_v11_0_ring_emit_hdp_flush */
6165 5 + /* hdp invalidate */
6166 7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6167 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6168 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6169 2 + /* gfx_v11_0_ring_emit_vm_flush */
6170 8 + 8 + 8 + /* gfx_v11_0_ring_emit_fence x3 for user fence, vm fence */
6171 8, /* gfx_v11_0_emit_mem_sync */
6172 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
6173 .emit_ib = gfx_v11_0_ring_emit_ib_compute,
6174 .emit_fence = gfx_v11_0_ring_emit_fence,
6175 .emit_pipeline_sync = gfx_v11_0_ring_emit_pipeline_sync,
6176 .emit_vm_flush = gfx_v11_0_ring_emit_vm_flush,
6177 .emit_gds_switch = gfx_v11_0_ring_emit_gds_switch,
6178 .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
6179 .test_ring = gfx_v11_0_ring_test_ring,
6180 .test_ib = gfx_v11_0_ring_test_ib,
6181 .insert_nop = amdgpu_ring_insert_nop,
6182 .pad_ib = amdgpu_ring_generic_pad_ib,
6183 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6184 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6185 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6186 .emit_mem_sync = gfx_v11_0_emit_mem_sync,
6189 static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
6190 .type = AMDGPU_RING_TYPE_KIQ,
6192 .nop = PACKET3(PACKET3_NOP, 0x3FFF),
6193 .support_64bit_ptrs = true,
6194 .get_rptr = gfx_v11_0_ring_get_rptr_compute,
6195 .get_wptr = gfx_v11_0_ring_get_wptr_compute,
6196 .set_wptr = gfx_v11_0_ring_set_wptr_compute,
6198 20 + /* gfx_v11_0_ring_emit_gds_switch */
6199 7 + /* gfx_v11_0_ring_emit_hdp_flush */
6200 5 + /*hdp invalidate */
6201 7 + /* gfx_v11_0_ring_emit_pipeline_sync */
6202 SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
6203 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
6204 2 + /* gfx_v11_0_ring_emit_vm_flush */
6205 8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
6206 .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
6207 .emit_ib = gfx_v11_0_ring_emit_ib_compute,
6208 .emit_fence = gfx_v11_0_ring_emit_fence_kiq,
6209 .test_ring = gfx_v11_0_ring_test_ring,
6210 .test_ib = gfx_v11_0_ring_test_ib,
6211 .insert_nop = amdgpu_ring_insert_nop,
6212 .pad_ib = amdgpu_ring_generic_pad_ib,
6213 .emit_rreg = gfx_v11_0_ring_emit_rreg,
6214 .emit_wreg = gfx_v11_0_ring_emit_wreg,
6215 .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
6216 .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
6219 static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
6223 adev->gfx.kiq[0].ring.funcs = &gfx_v11_0_ring_funcs_kiq;
6225 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
6226 adev->gfx.gfx_ring[i].funcs = &gfx_v11_0_ring_funcs_gfx;
6228 for (i = 0; i < adev->gfx.num_compute_rings; i++)
6229 adev->gfx.compute_ring[i].funcs = &gfx_v11_0_ring_funcs_compute;
6232 static const struct amdgpu_irq_src_funcs gfx_v11_0_eop_irq_funcs = {
6233 .set = gfx_v11_0_set_eop_interrupt_state,
6234 .process = gfx_v11_0_eop_irq,
6237 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_reg_irq_funcs = {
6238 .set = gfx_v11_0_set_priv_reg_fault_state,
6239 .process = gfx_v11_0_priv_reg_irq,
6242 static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
6243 .set = gfx_v11_0_set_priv_inst_fault_state,
6244 .process = gfx_v11_0_priv_inst_irq,
6247 static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = {
6248 .process = gfx_v11_0_rlc_gc_fed_irq,
6251 static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
6253 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
6254 adev->gfx.eop_irq.funcs = &gfx_v11_0_eop_irq_funcs;
6256 adev->gfx.priv_reg_irq.num_types = 1;
6257 adev->gfx.priv_reg_irq.funcs = &gfx_v11_0_priv_reg_irq_funcs;
6259 adev->gfx.priv_inst_irq.num_types = 1;
6260 adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
6262 adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */
6263 adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs;
6267 static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev)
6269 if (adev->flags & AMD_IS_APU)
6270 adev->gfx.imu.mode = MISSION_MODE;
6272 adev->gfx.imu.mode = DEBUG_MODE;
6274 adev->gfx.imu.funcs = &gfx_v11_0_imu_funcs;
6277 static void gfx_v11_0_set_rlc_funcs(struct amdgpu_device *adev)
6279 adev->gfx.rlc.funcs = &gfx_v11_0_rlc_funcs;
6282 static void gfx_v11_0_set_gds_init(struct amdgpu_device *adev)
6284 unsigned total_cu = adev->gfx.config.max_cu_per_sh *
6285 adev->gfx.config.max_sh_per_se *
6286 adev->gfx.config.max_shader_engines;
6288 adev->gds.gds_size = 0x1000;
6289 adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1;
6290 adev->gds.gws_size = 64;
6291 adev->gds.oa_size = 16;
6294 static void gfx_v11_0_set_mqd_funcs(struct amdgpu_device *adev)
6296 /* set gfx eng mqd */
6297 adev->mqds[AMDGPU_HW_IP_GFX].mqd_size =
6298 sizeof(struct v11_gfx_mqd);
6299 adev->mqds[AMDGPU_HW_IP_GFX].init_mqd =
6300 gfx_v11_0_gfx_mqd_init;
6301 /* set compute eng mqd */
6302 adev->mqds[AMDGPU_HW_IP_COMPUTE].mqd_size =
6303 sizeof(struct v11_compute_mqd);
6304 adev->mqds[AMDGPU_HW_IP_COMPUTE].init_mqd =
6305 gfx_v11_0_compute_mqd_init;
6308 static void gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(struct amdgpu_device *adev,
6316 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
6317 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
6319 WREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG, data);
6322 static u32 gfx_v11_0_get_wgp_active_bitmap_per_sh(struct amdgpu_device *adev)
6324 u32 data, wgp_bitmask;
6325 data = RREG32_SOC15(GC, 0, regCC_GC_SHADER_ARRAY_CONFIG);
6326 data |= RREG32_SOC15(GC, 0, regGC_USER_SHADER_ARRAY_CONFIG);
6328 data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS_MASK;
6329 data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_WGPS__SHIFT;
6332 amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh >> 1);
6334 return (~data) & wgp_bitmask;
6337 static u32 gfx_v11_0_get_cu_active_bitmap_per_sh(struct amdgpu_device *adev)
6339 u32 wgp_idx, wgp_active_bitmap;
6340 u32 cu_bitmap_per_wgp, cu_active_bitmap;
6342 wgp_active_bitmap = gfx_v11_0_get_wgp_active_bitmap_per_sh(adev);
6343 cu_active_bitmap = 0;
6345 for (wgp_idx = 0; wgp_idx < 16; wgp_idx++) {
6346 /* if there is one WGP enabled, it means 2 CUs will be enabled */
6347 cu_bitmap_per_wgp = 3 << (2 * wgp_idx);
6348 if (wgp_active_bitmap & (1 << wgp_idx))
6349 cu_active_bitmap |= cu_bitmap_per_wgp;
6352 return cu_active_bitmap;
6355 static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
6356 struct amdgpu_cu_info *cu_info)
6358 int i, j, k, counter, active_cu_number = 0;
6360 unsigned disable_masks[8 * 2];
6362 if (!adev || !cu_info)
6365 amdgpu_gfx_parse_disable_cu(disable_masks, 8, 2);
6367 mutex_lock(&adev->grbm_idx_mutex);
6368 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
6369 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
6372 gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff, 0);
6374 gfx_v11_0_set_user_wgp_inactive_bitmap_per_sh(
6375 adev, disable_masks[i * 2 + j]);
6376 bitmap = gfx_v11_0_get_cu_active_bitmap_per_sh(adev);
6379 * GFX11 could support more than 4 SEs, while the bitmap
6380 * in cu_info struct is 4x4 and ioctl interface struct
6381 * drm_amdgpu_info_device should keep stable.
6382 * So we use last two columns of bitmap to store cu mask for
6383 * SEs 4 to 7, the layout of the bitmap is as below:
6384 * SE0: {SH0,SH1} --> {bitmap[0][0], bitmap[0][1]}
6385 * SE1: {SH0,SH1} --> {bitmap[1][0], bitmap[1][1]}
6386 * SE2: {SH0,SH1} --> {bitmap[2][0], bitmap[2][1]}
6387 * SE3: {SH0,SH1} --> {bitmap[3][0], bitmap[3][1]}
6388 * SE4: {SH0,SH1} --> {bitmap[0][2], bitmap[0][3]}
6389 * SE5: {SH0,SH1} --> {bitmap[1][2], bitmap[1][3]}
6390 * SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
6391 * SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
6393 cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap;
6395 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
6401 active_cu_number += counter;
6404 gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
6405 mutex_unlock(&adev->grbm_idx_mutex);
6407 cu_info->number = active_cu_number;
6408 cu_info->simd_per_cu = NUM_SIMD_PER_CU;
6413 const struct amdgpu_ip_block_version gfx_v11_0_ip_block =
6415 .type = AMD_IP_BLOCK_TYPE_GFX,
6419 .funcs = &gfx_v11_0_ip_funcs,