2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
33 #include "mmsch_v1_0.h"
36 #include "vcn/vcn_2_5_offset.h"
37 #include "vcn/vcn_2_5_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
40 #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00
41 #define VCN1_VID_SOC_ADDRESS_3_0 0x48200
43 #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27
44 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f
45 #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10
46 #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11
47 #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29
48 #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66
49 #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d
51 #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431
52 #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4
53 #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5
54 #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c
56 #define VCN25_MAX_HW_INSTANCES_ARCTURUS 2
58 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
59 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
60 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
61 static int vcn_v2_5_set_powergating_state(void *handle,
62 enum amd_powergating_state state);
63 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
64 int inst_idx, struct dpg_pause_state *new_state);
65 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
66 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
68 static int amdgpu_ih_clientid_vcns[] = {
69 SOC15_IH_CLIENTID_VCN,
70 SOC15_IH_CLIENTID_VCN1
74 * vcn_v2_5_early_init - set function pointers and load microcode
76 * @handle: amdgpu_device pointer
78 * Set ring and irq function pointers
79 * Load microcode from filesystem
81 static int vcn_v2_5_early_init(void *handle)
83 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
85 if (amdgpu_sriov_vf(adev)) {
86 adev->vcn.num_vcn_inst = 2;
87 adev->vcn.harvest_config = 0;
88 adev->vcn.num_enc_rings = 1;
93 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
94 harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
95 if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
96 adev->vcn.harvest_config |= 1 << i;
98 if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
99 AMDGPU_VCN_HARVEST_VCN1))
100 /* both instances are harvested, disable the block */
103 adev->vcn.num_enc_rings = 2;
106 vcn_v2_5_set_dec_ring_funcs(adev);
107 vcn_v2_5_set_enc_ring_funcs(adev);
108 vcn_v2_5_set_irq_funcs(adev);
109 vcn_v2_5_set_ras_funcs(adev);
111 return amdgpu_vcn_early_init(adev);
115 * vcn_v2_5_sw_init - sw init for VCN block
117 * @handle: amdgpu_device pointer
119 * Load firmware and sw initialization
121 static int vcn_v2_5_sw_init(void *handle)
123 struct amdgpu_ring *ring;
125 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
127 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
128 if (adev->vcn.harvest_config & (1 << j))
131 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
132 VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
137 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
138 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
139 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
144 /* VCN POISON TRAP */
145 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
146 VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq);
151 r = amdgpu_vcn_sw_init(adev);
155 amdgpu_vcn_setup_ucode(adev);
157 r = amdgpu_vcn_resume(adev);
161 for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
162 volatile struct amdgpu_fw_shared *fw_shared;
164 if (adev->vcn.harvest_config & (1 << j))
166 adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
167 adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
168 adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
169 adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
170 adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
171 adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
173 adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
174 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
175 adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
176 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
177 adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
178 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
179 adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
180 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
181 adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
182 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
184 ring = &adev->vcn.inst[j].ring_dec;
185 ring->use_doorbell = true;
187 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
188 (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
190 if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
191 ring->vm_hub = AMDGPU_MMHUB_1;
193 ring->vm_hub = AMDGPU_MMHUB_0;
195 sprintf(ring->name, "vcn_dec_%d", j);
196 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
197 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
201 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
202 enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
204 ring = &adev->vcn.inst[j].ring_enc[i];
205 ring->use_doorbell = true;
207 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
208 (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
210 if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
211 ring->vm_hub = AMDGPU_MMHUB_1;
213 ring->vm_hub = AMDGPU_MMHUB_0;
215 sprintf(ring->name, "vcn_enc_%d.%d", j, i);
216 r = amdgpu_ring_init(adev, ring, 512,
217 &adev->vcn.inst[j].irq, 0,
223 fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr;
224 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
226 if (amdgpu_vcnfw_log)
227 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
230 if (amdgpu_sriov_vf(adev)) {
231 r = amdgpu_virt_alloc_mm_table(adev);
236 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
237 adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
239 r = amdgpu_vcn_ras_sw_init(adev);
247 * vcn_v2_5_sw_fini - sw fini for VCN block
249 * @handle: amdgpu_device pointer
251 * VCN suspend and free up sw allocation
253 static int vcn_v2_5_sw_fini(void *handle)
256 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
257 volatile struct amdgpu_fw_shared *fw_shared;
259 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
260 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
261 if (adev->vcn.harvest_config & (1 << i))
263 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
264 fw_shared->present_flag_0 = 0;
270 if (amdgpu_sriov_vf(adev))
271 amdgpu_virt_free_mm_table(adev);
273 r = amdgpu_vcn_suspend(adev);
277 r = amdgpu_vcn_sw_fini(adev);
283 * vcn_v2_5_hw_init - start and test VCN block
285 * @handle: amdgpu_device pointer
287 * Initialize the hardware, boot up the VCPU and do some testing
289 static int vcn_v2_5_hw_init(void *handle)
291 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
292 struct amdgpu_ring *ring;
295 if (amdgpu_sriov_vf(adev))
296 r = vcn_v2_5_sriov_start(adev);
298 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
299 if (adev->vcn.harvest_config & (1 << j))
302 if (amdgpu_sriov_vf(adev)) {
303 adev->vcn.inst[j].ring_enc[0].sched.ready = true;
304 adev->vcn.inst[j].ring_enc[1].sched.ready = false;
305 adev->vcn.inst[j].ring_enc[2].sched.ready = false;
306 adev->vcn.inst[j].ring_dec.sched.ready = true;
309 ring = &adev->vcn.inst[j].ring_dec;
311 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
312 ring->doorbell_index, j);
314 r = amdgpu_ring_test_helper(ring);
318 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
319 ring = &adev->vcn.inst[j].ring_enc[i];
320 r = amdgpu_ring_test_helper(ring);
329 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
330 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
336 * vcn_v2_5_hw_fini - stop the hardware block
338 * @handle: amdgpu_device pointer
340 * Stop the VCN block, mark ring as not ready any more
342 static int vcn_v2_5_hw_fini(void *handle)
344 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
347 cancel_delayed_work_sync(&adev->vcn.idle_work);
349 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
350 if (adev->vcn.harvest_config & (1 << i))
353 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
354 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
355 RREG32_SOC15(VCN, i, mmUVD_STATUS)))
356 vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
363 * vcn_v2_5_suspend - suspend VCN block
365 * @handle: amdgpu_device pointer
367 * HW fini and suspend VCN block
369 static int vcn_v2_5_suspend(void *handle)
372 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
374 r = vcn_v2_5_hw_fini(adev);
378 r = amdgpu_vcn_suspend(adev);
384 * vcn_v2_5_resume - resume VCN block
386 * @handle: amdgpu_device pointer
388 * Resume firmware and hw init VCN block
390 static int vcn_v2_5_resume(void *handle)
393 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
395 r = amdgpu_vcn_resume(adev);
399 r = vcn_v2_5_hw_init(adev);
405 * vcn_v2_5_mc_resume - memory controller programming
407 * @adev: amdgpu_device pointer
409 * Let the VCN memory controller know it's offsets
411 static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
413 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
417 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
418 if (adev->vcn.harvest_config & (1 << i))
420 /* cache window 0: fw */
421 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
422 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
423 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
424 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
425 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
426 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
429 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
430 lower_32_bits(adev->vcn.inst[i].gpu_addr));
431 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
432 upper_32_bits(adev->vcn.inst[i].gpu_addr));
434 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
435 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
437 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
439 /* cache window 1: stack */
440 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
441 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
442 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
443 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
444 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
445 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
447 /* cache window 2: context */
448 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
449 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
450 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
451 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
452 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
453 WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
455 /* non-cache window */
456 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
457 lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
458 WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
459 upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
460 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
461 WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
462 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
466 static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
468 uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
471 /* cache window 0: fw */
472 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
474 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
475 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
476 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
477 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
478 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
479 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
480 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
481 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
483 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
484 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
485 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
486 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
487 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
488 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
492 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
493 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
494 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
495 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
496 VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
497 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
499 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
500 VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
501 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
505 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
506 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
508 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
509 VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
511 /* cache window 1: stack */
513 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
514 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
515 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
516 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
517 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
518 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
519 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
520 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
522 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
523 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
524 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
525 VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
526 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
527 VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
529 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
530 VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
532 /* cache window 2: context */
533 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
534 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
535 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
536 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
537 VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
538 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
539 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
540 VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
541 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
542 VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
544 /* non-cache window */
545 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
546 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
547 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
548 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
549 VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
550 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
551 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
552 VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
553 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
554 VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
555 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
557 /* VCN global tiling registers */
558 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
559 VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
563 * vcn_v2_5_disable_clock_gating - disable VCN clock gating
565 * @adev: amdgpu_device pointer
567 * Disable clock gating for VCN block
569 static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
574 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
575 if (adev->vcn.harvest_config & (1 << i))
577 /* UVD disable CGC */
578 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
579 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
580 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
582 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
583 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
584 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
585 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
587 data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
588 data &= ~(UVD_CGC_GATE__SYS_MASK
589 | UVD_CGC_GATE__UDEC_MASK
590 | UVD_CGC_GATE__MPEG2_MASK
591 | UVD_CGC_GATE__REGS_MASK
592 | UVD_CGC_GATE__RBC_MASK
593 | UVD_CGC_GATE__LMI_MC_MASK
594 | UVD_CGC_GATE__LMI_UMC_MASK
595 | UVD_CGC_GATE__IDCT_MASK
596 | UVD_CGC_GATE__MPRD_MASK
597 | UVD_CGC_GATE__MPC_MASK
598 | UVD_CGC_GATE__LBSI_MASK
599 | UVD_CGC_GATE__LRBBM_MASK
600 | UVD_CGC_GATE__UDEC_RE_MASK
601 | UVD_CGC_GATE__UDEC_CM_MASK
602 | UVD_CGC_GATE__UDEC_IT_MASK
603 | UVD_CGC_GATE__UDEC_DB_MASK
604 | UVD_CGC_GATE__UDEC_MP_MASK
605 | UVD_CGC_GATE__WCB_MASK
606 | UVD_CGC_GATE__VCPU_MASK
607 | UVD_CGC_GATE__MMSCH_MASK);
609 WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
611 SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0, 0xFFFFFFFF);
613 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
614 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
615 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
616 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
617 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
618 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
619 | UVD_CGC_CTRL__SYS_MODE_MASK
620 | UVD_CGC_CTRL__UDEC_MODE_MASK
621 | UVD_CGC_CTRL__MPEG2_MODE_MASK
622 | UVD_CGC_CTRL__REGS_MODE_MASK
623 | UVD_CGC_CTRL__RBC_MODE_MASK
624 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
625 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
626 | UVD_CGC_CTRL__IDCT_MODE_MASK
627 | UVD_CGC_CTRL__MPRD_MODE_MASK
628 | UVD_CGC_CTRL__MPC_MODE_MASK
629 | UVD_CGC_CTRL__LBSI_MODE_MASK
630 | UVD_CGC_CTRL__LRBBM_MODE_MASK
631 | UVD_CGC_CTRL__WCB_MODE_MASK
632 | UVD_CGC_CTRL__VCPU_MODE_MASK
633 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
634 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
637 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
638 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
639 | UVD_SUVD_CGC_GATE__SIT_MASK
640 | UVD_SUVD_CGC_GATE__SMP_MASK
641 | UVD_SUVD_CGC_GATE__SCM_MASK
642 | UVD_SUVD_CGC_GATE__SDB_MASK
643 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
644 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
645 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
646 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
647 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
648 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
649 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
650 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
651 | UVD_SUVD_CGC_GATE__SCLR_MASK
652 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
653 | UVD_SUVD_CGC_GATE__ENT_MASK
654 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
655 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
656 | UVD_SUVD_CGC_GATE__SITE_MASK
657 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
658 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
659 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
660 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
661 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
662 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
664 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
665 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
666 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
667 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
668 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
669 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
670 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
671 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
672 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
673 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
674 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
675 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
679 static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
680 uint8_t sram_sel, int inst_idx, uint8_t indirect)
682 uint32_t reg_data = 0;
684 /* enable sw clock gating control */
685 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
686 reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
688 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
689 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
690 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
691 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
692 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
693 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
694 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
695 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
696 UVD_CGC_CTRL__SYS_MODE_MASK |
697 UVD_CGC_CTRL__UDEC_MODE_MASK |
698 UVD_CGC_CTRL__MPEG2_MODE_MASK |
699 UVD_CGC_CTRL__REGS_MODE_MASK |
700 UVD_CGC_CTRL__RBC_MODE_MASK |
701 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
702 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
703 UVD_CGC_CTRL__IDCT_MODE_MASK |
704 UVD_CGC_CTRL__MPRD_MODE_MASK |
705 UVD_CGC_CTRL__MPC_MODE_MASK |
706 UVD_CGC_CTRL__LBSI_MODE_MASK |
707 UVD_CGC_CTRL__LRBBM_MODE_MASK |
708 UVD_CGC_CTRL__WCB_MODE_MASK |
709 UVD_CGC_CTRL__VCPU_MODE_MASK |
710 UVD_CGC_CTRL__MMSCH_MODE_MASK);
711 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
712 VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
714 /* turn off clock gating */
715 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
716 VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
718 /* turn on SUVD clock gating */
719 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
720 VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
722 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
723 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
724 VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
728 * vcn_v2_5_enable_clock_gating - enable VCN clock gating
730 * @adev: amdgpu_device pointer
732 * Enable clock gating for VCN block
734 static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
739 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
740 if (adev->vcn.harvest_config & (1 << i))
743 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
744 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
745 data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
747 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
748 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
749 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
750 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
752 data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
753 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
754 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
755 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
756 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
757 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
758 | UVD_CGC_CTRL__SYS_MODE_MASK
759 | UVD_CGC_CTRL__UDEC_MODE_MASK
760 | UVD_CGC_CTRL__MPEG2_MODE_MASK
761 | UVD_CGC_CTRL__REGS_MODE_MASK
762 | UVD_CGC_CTRL__RBC_MODE_MASK
763 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
764 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
765 | UVD_CGC_CTRL__IDCT_MODE_MASK
766 | UVD_CGC_CTRL__MPRD_MODE_MASK
767 | UVD_CGC_CTRL__MPC_MODE_MASK
768 | UVD_CGC_CTRL__LBSI_MODE_MASK
769 | UVD_CGC_CTRL__LRBBM_MODE_MASK
770 | UVD_CGC_CTRL__WCB_MODE_MASK
771 | UVD_CGC_CTRL__VCPU_MODE_MASK);
772 WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
774 data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
775 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
776 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
777 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
778 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
779 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
780 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
781 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
782 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
783 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
784 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
785 WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
789 static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx,
794 if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(2, 6, 0))
797 tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
798 VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
799 VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
800 VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
801 WREG32_SOC15_DPG_MODE(inst_idx,
802 SOC15_DPG_MODE_OFFSET(VCN, 0, mmVCN_RAS_CNTL),
805 tmp = UVD_VCPU_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
806 WREG32_SOC15_DPG_MODE(inst_idx,
807 SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_VCPU_INT_EN),
810 tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
811 WREG32_SOC15_DPG_MODE(inst_idx,
812 SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_SYS_INT_EN),
816 static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
818 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
819 struct amdgpu_ring *ring;
820 uint32_t rb_bufsz, tmp;
822 /* disable register anti-hang mechanism */
823 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
824 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
825 /* enable dynamic power gating mode */
826 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
827 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
828 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
829 WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
832 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
834 /* enable clock gating */
835 vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
837 /* enable VCPU clock */
838 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
839 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
840 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
841 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
842 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
844 /* disable master interupt */
845 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
846 VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
848 /* setup mmUVD_LMI_CTRL */
849 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
850 UVD_LMI_CTRL__REQ_MODE_MASK |
851 UVD_LMI_CTRL__CRC_RESET_MASK |
852 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
853 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
854 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
855 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
857 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
858 VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
860 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
861 VCN, 0, mmUVD_MPC_CNTL),
862 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
864 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
865 VCN, 0, mmUVD_MPC_SET_MUXA0),
866 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
867 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
868 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
869 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
871 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
872 VCN, 0, mmUVD_MPC_SET_MUXB0),
873 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
874 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
875 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
876 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
878 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
879 VCN, 0, mmUVD_MPC_SET_MUX),
880 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
881 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
882 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
884 vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
886 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
887 VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
888 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
889 VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
891 /* enable LMI MC and UMC channels */
892 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
893 VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
895 vcn_v2_6_enable_ras(adev, inst_idx, indirect);
897 /* unblock VCPU register access */
898 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
899 VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
901 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
902 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
903 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
904 VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
906 /* enable master interrupt */
907 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
908 VCN, 0, mmUVD_MASTINT_EN),
909 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
912 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
913 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
914 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
916 ring = &adev->vcn.inst[inst_idx].ring_dec;
917 /* force RBC into idle state */
918 rb_bufsz = order_base_2(ring->ring_size);
919 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
920 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
921 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
922 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
923 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
924 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
926 /* Stall DPG before WPTR/RPTR reset */
927 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
928 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
929 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
930 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
932 /* set the write pointer delay */
933 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
935 /* set the wb address */
936 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
937 (upper_32_bits(ring->gpu_addr) >> 2));
939 /* program the RB_BASE for ring buffer */
940 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
941 lower_32_bits(ring->gpu_addr));
942 WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
943 upper_32_bits(ring->gpu_addr));
945 /* Initialize the ring buffer's read and write pointers */
946 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
948 WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
950 ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
951 WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
952 lower_32_bits(ring->wptr));
954 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
956 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
957 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
962 static int vcn_v2_5_start(struct amdgpu_device *adev)
964 struct amdgpu_ring *ring;
965 uint32_t rb_bufsz, tmp;
968 if (adev->pm.dpm_enabled)
969 amdgpu_dpm_enable_uvd(adev, true);
971 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
972 if (adev->vcn.harvest_config & (1 << i))
974 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
975 r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
979 /* disable register anti-hang mechanism */
980 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
981 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
983 /* set uvd status busy */
984 tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
985 WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
988 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
992 vcn_v2_5_disable_clock_gating(adev);
994 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
995 if (adev->vcn.harvest_config & (1 << i))
997 /* enable VCPU clock */
998 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
999 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1001 /* disable master interrupt */
1002 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1003 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1005 /* setup mmUVD_LMI_CTRL */
1006 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1008 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
1009 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1010 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1011 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1012 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1014 /* setup mmUVD_MPC_CNTL */
1015 tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1016 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1017 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1018 WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1020 /* setup UVD_MPC_SET_MUXA0 */
1021 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1022 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1023 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1024 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1025 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1027 /* setup UVD_MPC_SET_MUXB0 */
1028 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1029 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1030 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1031 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1032 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1034 /* setup mmUVD_MPC_SET_MUX */
1035 WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1036 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1037 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1038 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1041 vcn_v2_5_mc_resume(adev);
1043 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1044 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1045 if (adev->vcn.harvest_config & (1 << i))
1047 /* VCN global tiling registers */
1048 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1049 adev->gfx.config.gb_addr_config);
1050 WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1051 adev->gfx.config.gb_addr_config);
1053 /* enable LMI MC and UMC channels */
1054 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1055 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1057 /* unblock VCPU register access */
1058 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1059 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1061 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1062 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1064 for (k = 0; k < 10; ++k) {
1067 for (j = 0; j < 100; ++j) {
1068 status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1071 if (amdgpu_emu_mode == 1)
1080 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1081 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1082 UVD_VCPU_CNTL__BLK_RST_MASK,
1083 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1085 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1086 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1093 DRM_ERROR("VCN decode not responding, giving up!!!\n");
1097 /* enable master interrupt */
1098 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1099 UVD_MASTINT_EN__VCPU_EN_MASK,
1100 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1102 /* clear the busy bit of VCN_STATUS */
1103 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1104 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1106 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1108 ring = &adev->vcn.inst[i].ring_dec;
1109 /* force RBC into idle state */
1110 rb_bufsz = order_base_2(ring->ring_size);
1111 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1112 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1113 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1114 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1115 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1116 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1118 fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1119 /* program the RB_BASE for ring buffer */
1120 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1121 lower_32_bits(ring->gpu_addr));
1122 WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1123 upper_32_bits(ring->gpu_addr));
1125 /* Initialize the ring buffer's read and write pointers */
1126 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1128 ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1129 WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1130 lower_32_bits(ring->wptr));
1131 fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1133 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1134 ring = &adev->vcn.inst[i].ring_enc[0];
1135 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1136 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1137 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1138 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1139 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1140 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1142 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1143 ring = &adev->vcn.inst[i].ring_enc[1];
1144 WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1145 WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1146 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1147 WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1148 WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1149 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1155 static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1156 struct amdgpu_mm_table *table)
1158 uint32_t data = 0, loop = 0, size = 0;
1159 uint64_t addr = table->gpu_addr;
1160 struct mmsch_v1_1_init_header *header = NULL;
1162 header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1163 size = header->total_size;
1166 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1167 * memory descriptor location
1169 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1170 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1172 /* 2, update vmid of descriptor */
1173 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1174 data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1175 /* use domain0 for MM scheduler */
1176 data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1177 WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
1179 /* 3, notify mmsch about the size of this descriptor */
1180 WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1182 /* 4, set resp to zero */
1183 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1186 * 5, kick off the initialization and wait until
1187 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1189 WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1191 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1193 while ((data & 0x10000002) != 0x10000002) {
1195 data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1203 "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1211 static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1213 struct amdgpu_ring *ring;
1214 uint32_t offset, size, tmp, i, rb_bufsz;
1215 uint32_t table_size = 0;
1216 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1217 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1218 struct mmsch_v1_0_cmd_end end = { { 0 } };
1219 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1220 struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1222 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1223 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1224 end.cmd_header.command_type = MMSCH_COMMAND__END;
1226 header->version = MMSCH_VERSION;
1227 header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1228 init_table += header->total_size;
1230 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1231 header->eng[i].table_offset = header->total_size;
1232 header->eng[i].init_status = 0;
1233 header->eng[i].table_size = 0;
1237 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1238 SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
1239 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1241 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1243 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1244 MMSCH_V1_0_INSERT_DIRECT_WT(
1245 SOC15_REG_OFFSET(VCN, i,
1246 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1247 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1248 MMSCH_V1_0_INSERT_DIRECT_WT(
1249 SOC15_REG_OFFSET(VCN, i,
1250 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1251 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1253 MMSCH_V1_0_INSERT_DIRECT_WT(
1254 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1256 MMSCH_V1_0_INSERT_DIRECT_WT(
1257 SOC15_REG_OFFSET(VCN, i,
1258 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1259 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1260 MMSCH_V1_0_INSERT_DIRECT_WT(
1261 SOC15_REG_OFFSET(VCN, i,
1262 mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1263 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1265 MMSCH_V1_0_INSERT_DIRECT_WT(
1266 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
1267 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1270 MMSCH_V1_0_INSERT_DIRECT_WT(
1271 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
1273 MMSCH_V1_0_INSERT_DIRECT_WT(
1274 SOC15_REG_OFFSET(VCN, i,
1275 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1276 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1277 MMSCH_V1_0_INSERT_DIRECT_WT(
1278 SOC15_REG_OFFSET(VCN, i,
1279 mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1280 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1281 MMSCH_V1_0_INSERT_DIRECT_WT(
1282 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
1284 MMSCH_V1_0_INSERT_DIRECT_WT(
1285 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
1286 AMDGPU_VCN_STACK_SIZE);
1287 MMSCH_V1_0_INSERT_DIRECT_WT(
1288 SOC15_REG_OFFSET(VCN, i,
1289 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1290 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1291 AMDGPU_VCN_STACK_SIZE));
1292 MMSCH_V1_0_INSERT_DIRECT_WT(
1293 SOC15_REG_OFFSET(VCN, i,
1294 mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1295 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1296 AMDGPU_VCN_STACK_SIZE));
1297 MMSCH_V1_0_INSERT_DIRECT_WT(
1298 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
1300 MMSCH_V1_0_INSERT_DIRECT_WT(
1301 SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
1302 AMDGPU_VCN_CONTEXT_SIZE);
1304 ring = &adev->vcn.inst[i].ring_enc[0];
1307 MMSCH_V1_0_INSERT_DIRECT_WT(
1308 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
1309 lower_32_bits(ring->gpu_addr));
1310 MMSCH_V1_0_INSERT_DIRECT_WT(
1311 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
1312 upper_32_bits(ring->gpu_addr));
1313 MMSCH_V1_0_INSERT_DIRECT_WT(
1314 SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
1315 ring->ring_size / 4);
1317 ring = &adev->vcn.inst[i].ring_dec;
1319 MMSCH_V1_0_INSERT_DIRECT_WT(
1320 SOC15_REG_OFFSET(VCN, i,
1321 mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1322 lower_32_bits(ring->gpu_addr));
1323 MMSCH_V1_0_INSERT_DIRECT_WT(
1324 SOC15_REG_OFFSET(VCN, i,
1325 mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1326 upper_32_bits(ring->gpu_addr));
1328 /* force RBC into idle state */
1329 rb_bufsz = order_base_2(ring->ring_size);
1330 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1331 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1332 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1333 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1334 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1335 MMSCH_V1_0_INSERT_DIRECT_WT(
1336 SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
1338 /* add end packet */
1339 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1340 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1341 init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1344 header->eng[i].table_size = table_size;
1345 header->total_size += table_size;
1348 return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1351 static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1355 /* Wait for power status to be 1 */
1356 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1357 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1359 /* wait for read ptr to be equal to write ptr */
1360 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1361 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1363 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1364 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1366 tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1367 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1369 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1370 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1372 /* disable dynamic power gating mode */
1373 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1374 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1379 static int vcn_v2_5_stop(struct amdgpu_device *adev)
1384 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1385 if (adev->vcn.harvest_config & (1 << i))
1387 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1388 r = vcn_v2_5_stop_dpg_mode(adev, i);
1392 /* wait for vcn idle */
1393 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1397 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1398 UVD_LMI_STATUS__READ_CLEAN_MASK |
1399 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1400 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1401 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1405 /* block LMI UMC channel */
1406 tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1407 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1408 WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1410 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1411 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1412 r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1416 /* block VCPU register access */
1417 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1418 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1419 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1422 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1423 UVD_VCPU_CNTL__BLK_RST_MASK,
1424 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1426 /* disable VCPU clock */
1427 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1428 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1431 WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1433 vcn_v2_5_enable_clock_gating(adev);
1435 /* enable register anti-hang mechanism */
1436 WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
1437 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1438 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1441 if (adev->pm.dpm_enabled)
1442 amdgpu_dpm_enable_uvd(adev, false);
1447 static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
1448 int inst_idx, struct dpg_pause_state *new_state)
1450 struct amdgpu_ring *ring;
1451 uint32_t reg_data = 0;
1454 /* pause/unpause if state is changed */
1455 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1456 DRM_DEBUG("dpg pause state changed %d -> %d",
1457 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1458 reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1459 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1461 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1462 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1463 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1466 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1469 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1470 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1473 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1474 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1475 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1477 /* Stall DPG before WPTR/RPTR reset */
1478 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1479 UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1480 ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1483 fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1484 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1486 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1487 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1488 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1489 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1490 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1491 fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1493 fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1494 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1496 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1497 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1498 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1499 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1500 WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1501 fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1504 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1505 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1507 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1508 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1511 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1512 WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1513 SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1514 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1516 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1523 * vcn_v2_5_dec_ring_get_rptr - get read pointer
1525 * @ring: amdgpu_ring pointer
1527 * Returns the current hardware read pointer
1529 static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1531 struct amdgpu_device *adev = ring->adev;
1533 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1537 * vcn_v2_5_dec_ring_get_wptr - get write pointer
1539 * @ring: amdgpu_ring pointer
1541 * Returns the current hardware write pointer
1543 static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1545 struct amdgpu_device *adev = ring->adev;
1547 if (ring->use_doorbell)
1548 return *ring->wptr_cpu_addr;
1550 return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1554 * vcn_v2_5_dec_ring_set_wptr - set write pointer
1556 * @ring: amdgpu_ring pointer
1558 * Commits the write pointer to the hardware
1560 static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1562 struct amdgpu_device *adev = ring->adev;
1564 if (ring->use_doorbell) {
1565 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1566 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1568 WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1572 static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1573 .type = AMDGPU_RING_TYPE_VCN_DEC,
1575 .secure_submission_supported = true,
1576 .get_rptr = vcn_v2_5_dec_ring_get_rptr,
1577 .get_wptr = vcn_v2_5_dec_ring_get_wptr,
1578 .set_wptr = vcn_v2_5_dec_ring_set_wptr,
1580 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1581 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1582 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1583 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1585 .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1586 .emit_ib = vcn_v2_0_dec_ring_emit_ib,
1587 .emit_fence = vcn_v2_0_dec_ring_emit_fence,
1588 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1589 .test_ring = vcn_v2_0_dec_ring_test_ring,
1590 .test_ib = amdgpu_vcn_dec_ring_test_ib,
1591 .insert_nop = vcn_v2_0_dec_ring_insert_nop,
1592 .insert_start = vcn_v2_0_dec_ring_insert_start,
1593 .insert_end = vcn_v2_0_dec_ring_insert_end,
1594 .pad_ib = amdgpu_ring_generic_pad_ib,
1595 .begin_use = amdgpu_vcn_ring_begin_use,
1596 .end_use = amdgpu_vcn_ring_end_use,
1597 .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1598 .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1599 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1603 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1605 * @ring: amdgpu_ring pointer
1607 * Returns the current hardware enc read pointer
1609 static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1611 struct amdgpu_device *adev = ring->adev;
1613 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1614 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
1616 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
1620 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1622 * @ring: amdgpu_ring pointer
1624 * Returns the current hardware enc write pointer
1626 static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1628 struct amdgpu_device *adev = ring->adev;
1630 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1631 if (ring->use_doorbell)
1632 return *ring->wptr_cpu_addr;
1634 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
1636 if (ring->use_doorbell)
1637 return *ring->wptr_cpu_addr;
1639 return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
1644 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1646 * @ring: amdgpu_ring pointer
1648 * Commits the enc write pointer to the hardware
1650 static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1652 struct amdgpu_device *adev = ring->adev;
1654 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1655 if (ring->use_doorbell) {
1656 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1657 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1659 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1662 if (ring->use_doorbell) {
1663 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1664 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1666 WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1671 static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1672 .type = AMDGPU_RING_TYPE_VCN_ENC,
1674 .nop = VCN_ENC_CMD_NO_OP,
1675 .get_rptr = vcn_v2_5_enc_ring_get_rptr,
1676 .get_wptr = vcn_v2_5_enc_ring_get_wptr,
1677 .set_wptr = vcn_v2_5_enc_ring_set_wptr,
1679 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1680 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1681 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1682 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1683 1, /* vcn_v2_0_enc_ring_insert_end */
1684 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1685 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1686 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1687 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1688 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1689 .test_ib = amdgpu_vcn_enc_ring_test_ib,
1690 .insert_nop = amdgpu_ring_insert_nop,
1691 .insert_end = vcn_v2_0_enc_ring_insert_end,
1692 .pad_ib = amdgpu_ring_generic_pad_ib,
1693 .begin_use = amdgpu_vcn_ring_begin_use,
1694 .end_use = amdgpu_vcn_ring_end_use,
1695 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1696 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1697 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1700 static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1704 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1705 if (adev->vcn.harvest_config & (1 << i))
1707 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1708 adev->vcn.inst[i].ring_dec.me = i;
1709 DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1713 static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1717 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1718 if (adev->vcn.harvest_config & (1 << j))
1720 for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1721 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1722 adev->vcn.inst[j].ring_enc[i].me = j;
1724 DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1728 static bool vcn_v2_5_is_idle(void *handle)
1730 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1733 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1734 if (adev->vcn.harvest_config & (1 << i))
1736 ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1742 static int vcn_v2_5_wait_for_idle(void *handle)
1744 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1747 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1748 if (adev->vcn.harvest_config & (1 << i))
1750 ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1759 static int vcn_v2_5_set_clockgating_state(void *handle,
1760 enum amd_clockgating_state state)
1762 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1763 bool enable = (state == AMD_CG_STATE_GATE);
1765 if (amdgpu_sriov_vf(adev))
1769 if (!vcn_v2_5_is_idle(handle))
1771 vcn_v2_5_enable_clock_gating(adev);
1773 vcn_v2_5_disable_clock_gating(adev);
1779 static int vcn_v2_5_set_powergating_state(void *handle,
1780 enum amd_powergating_state state)
1782 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1785 if (amdgpu_sriov_vf(adev))
1788 if(state == adev->vcn.cur_state)
1791 if (state == AMD_PG_STATE_GATE)
1792 ret = vcn_v2_5_stop(adev);
1794 ret = vcn_v2_5_start(adev);
1797 adev->vcn.cur_state = state;
1802 static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1803 struct amdgpu_irq_src *source,
1805 enum amdgpu_interrupt_state state)
1810 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1811 struct amdgpu_irq_src *source,
1812 struct amdgpu_iv_entry *entry)
1814 uint32_t ip_instance;
1816 switch (entry->client_id) {
1817 case SOC15_IH_CLIENTID_VCN:
1820 case SOC15_IH_CLIENTID_VCN1:
1824 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1828 DRM_DEBUG("IH: VCN TRAP\n");
1830 switch (entry->src_id) {
1831 case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1832 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1834 case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1835 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1837 case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1838 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1840 case VCN_2_6__SRCID_UVD_POISON:
1841 amdgpu_vcn_process_poison_irq(adev, source, entry);
1844 DRM_ERROR("Unhandled interrupt: %d %d\n",
1845 entry->src_id, entry->src_data[0]);
1852 static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1853 .set = vcn_v2_5_set_interrupt_state,
1854 .process = vcn_v2_5_process_interrupt,
1857 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1861 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1862 if (adev->vcn.harvest_config & (1 << i))
1864 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1865 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1869 static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1871 .early_init = vcn_v2_5_early_init,
1873 .sw_init = vcn_v2_5_sw_init,
1874 .sw_fini = vcn_v2_5_sw_fini,
1875 .hw_init = vcn_v2_5_hw_init,
1876 .hw_fini = vcn_v2_5_hw_fini,
1877 .suspend = vcn_v2_5_suspend,
1878 .resume = vcn_v2_5_resume,
1879 .is_idle = vcn_v2_5_is_idle,
1880 .wait_for_idle = vcn_v2_5_wait_for_idle,
1881 .check_soft_reset = NULL,
1882 .pre_soft_reset = NULL,
1884 .post_soft_reset = NULL,
1885 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1886 .set_powergating_state = vcn_v2_5_set_powergating_state,
1889 static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
1891 .early_init = vcn_v2_5_early_init,
1893 .sw_init = vcn_v2_5_sw_init,
1894 .sw_fini = vcn_v2_5_sw_fini,
1895 .hw_init = vcn_v2_5_hw_init,
1896 .hw_fini = vcn_v2_5_hw_fini,
1897 .suspend = vcn_v2_5_suspend,
1898 .resume = vcn_v2_5_resume,
1899 .is_idle = vcn_v2_5_is_idle,
1900 .wait_for_idle = vcn_v2_5_wait_for_idle,
1901 .check_soft_reset = NULL,
1902 .pre_soft_reset = NULL,
1904 .post_soft_reset = NULL,
1905 .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1906 .set_powergating_state = vcn_v2_5_set_powergating_state,
1909 const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1911 .type = AMD_IP_BLOCK_TYPE_VCN,
1915 .funcs = &vcn_v2_5_ip_funcs,
1918 const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
1920 .type = AMD_IP_BLOCK_TYPE_VCN,
1924 .funcs = &vcn_v2_6_ip_funcs,
1927 static uint32_t vcn_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
1928 uint32_t instance, uint32_t sub_block)
1930 uint32_t poison_stat = 0, reg_value = 0;
1932 switch (sub_block) {
1933 case AMDGPU_VCN_V2_6_VCPU_VCODEC:
1934 reg_value = RREG32_SOC15(VCN, instance, mmUVD_RAS_VCPU_VCODEC_STATUS);
1935 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
1942 dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
1943 instance, sub_block);
1948 static bool vcn_v2_6_query_poison_status(struct amdgpu_device *adev)
1951 uint32_t poison_stat = 0;
1953 for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
1954 for (sub = 0; sub < AMDGPU_VCN_V2_6_MAX_SUB_BLOCK; sub++)
1956 vcn_v2_6_query_poison_by_instance(adev, inst, sub);
1958 return !!poison_stat;
1961 const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
1962 .query_poison_status = vcn_v2_6_query_poison_status,
1965 static struct amdgpu_vcn_ras vcn_v2_6_ras = {
1967 .hw_ops = &vcn_v2_6_ras_hw_ops,
1971 static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
1973 switch (adev->ip_versions[VCN_HWIP][0]) {
1974 case IP_VERSION(2, 6, 0):
1975 adev->vcn.ras = &vcn_v2_6_ras;