2 * Copyright 2021 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
26 #include "amdgpu_vcn.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_cs.h"
31 #include "soc15_hw_ip.h"
33 #include "mmsch_v4_0.h"
36 #include "vcn/vcn_4_0_0_offset.h"
37 #include "vcn/vcn_4_0_0_sh_mask.h"
38 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
40 #include <drm/drm_drv.h>
42 #define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL
43 #define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX
44 #define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA
45 #define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
47 #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
48 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300
50 #define VCN_HARVEST_MMSCH 0
52 #define RDECODE_MSG_CREATE 0x00000000
53 #define RDECODE_MESSAGE_CREATE 0x00000001
55 static int amdgpu_ih_clientid_vcns[] = {
56 SOC15_IH_CLIENTID_VCN,
57 SOC15_IH_CLIENTID_VCN1
60 static int vcn_v4_0_start_sriov(struct amdgpu_device *adev);
61 static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev);
62 static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev);
63 static int vcn_v4_0_set_powergating_state(void *handle,
64 enum amd_powergating_state state);
65 static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev,
66 int inst_idx, struct dpg_pause_state *new_state);
67 static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
68 static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
71 * vcn_v4_0_early_init - set function pointers and load microcode
73 * @handle: amdgpu_device pointer
75 * Set ring and irq function pointers
76 * Load microcode from filesystem
78 static int vcn_v4_0_early_init(void *handle)
80 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
83 if (amdgpu_sriov_vf(adev)) {
84 adev->vcn.harvest_config = VCN_HARVEST_MMSCH;
85 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
86 if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
87 adev->vcn.harvest_config |= 1 << i;
88 dev_info(adev->dev, "VCN%d is disabled by hypervisor\n", i);
93 /* re-use enc ring as unified ring */
94 adev->vcn.num_enc_rings = 1;
96 vcn_v4_0_set_unified_ring_funcs(adev);
97 vcn_v4_0_set_irq_funcs(adev);
98 vcn_v4_0_set_ras_funcs(adev);
100 return amdgpu_vcn_early_init(adev);
104 * vcn_v4_0_sw_init - sw init for VCN block
106 * @handle: amdgpu_device pointer
108 * Load firmware and sw initialization
110 static int vcn_v4_0_sw_init(void *handle)
112 struct amdgpu_ring *ring;
113 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
116 r = amdgpu_vcn_sw_init(adev);
120 amdgpu_vcn_setup_ucode(adev);
122 r = amdgpu_vcn_resume(adev);
126 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
127 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
129 if (adev->vcn.harvest_config & (1 << i))
132 atomic_set(&adev->vcn.inst[i].sched_score, 0);
134 /* VCN UNIFIED TRAP */
135 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
136 VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
140 /* VCN POISON TRAP */
141 r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
142 VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq);
146 ring = &adev->vcn.inst[i].ring_enc[0];
147 ring->use_doorbell = true;
148 if (amdgpu_sriov_vf(adev))
149 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + i * (adev->vcn.num_enc_rings + 1) + 1;
151 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + 8 * i;
152 ring->vm_hub = AMDGPU_MMHUB_0;
153 sprintf(ring->name, "vcn_unified_%d", i);
155 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
156 AMDGPU_RING_PRIO_0, &adev->vcn.inst[i].sched_score);
160 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
161 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
162 fw_shared->sq.is_enabled = 1;
164 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG);
165 fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ?
166 AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU;
168 if (amdgpu_sriov_vf(adev))
169 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
171 if (amdgpu_vcnfw_log)
172 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
175 if (amdgpu_sriov_vf(adev)) {
176 r = amdgpu_virt_alloc_mm_table(adev);
181 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
182 adev->vcn.pause_dpg_mode = vcn_v4_0_pause_dpg_mode;
184 r = amdgpu_vcn_ras_sw_init(adev);
192 * vcn_v4_0_sw_fini - sw fini for VCN block
194 * @handle: amdgpu_device pointer
196 * VCN suspend and free up sw allocation
198 static int vcn_v4_0_sw_fini(void *handle)
200 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
203 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
204 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
205 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
207 if (adev->vcn.harvest_config & (1 << i))
210 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
211 fw_shared->present_flag_0 = 0;
212 fw_shared->sq.is_enabled = 0;
218 if (amdgpu_sriov_vf(adev))
219 amdgpu_virt_free_mm_table(adev);
221 r = amdgpu_vcn_suspend(adev);
225 r = amdgpu_vcn_sw_fini(adev);
231 * vcn_v4_0_hw_init - start and test VCN block
233 * @handle: amdgpu_device pointer
235 * Initialize the hardware, boot up the VCPU and do some testing
237 static int vcn_v4_0_hw_init(void *handle)
239 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
240 struct amdgpu_ring *ring;
243 if (amdgpu_sriov_vf(adev)) {
244 r = vcn_v4_0_start_sriov(adev);
248 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
249 if (adev->vcn.harvest_config & (1 << i))
252 ring = &adev->vcn.inst[i].ring_enc[0];
255 vcn_v4_0_unified_ring_set_wptr(ring);
256 ring->sched.ready = true;
260 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
261 if (adev->vcn.harvest_config & (1 << i))
264 ring = &adev->vcn.inst[i].ring_enc[0];
266 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
267 ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i);
269 r = amdgpu_ring_test_helper(ring);
278 DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
279 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
285 * vcn_v4_0_hw_fini - stop the hardware block
287 * @handle: amdgpu_device pointer
289 * Stop the VCN block, mark ring as not ready any more
291 static int vcn_v4_0_hw_fini(void *handle)
293 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
296 cancel_delayed_work_sync(&adev->vcn.idle_work);
298 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
299 if (adev->vcn.harvest_config & (1 << i))
301 if (!amdgpu_sriov_vf(adev)) {
302 if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
303 (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
304 RREG32_SOC15(VCN, i, regUVD_STATUS))) {
305 vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
308 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
309 amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
316 * vcn_v4_0_suspend - suspend VCN block
318 * @handle: amdgpu_device pointer
320 * HW fini and suspend VCN block
322 static int vcn_v4_0_suspend(void *handle)
325 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
327 r = vcn_v4_0_hw_fini(adev);
331 r = amdgpu_vcn_suspend(adev);
337 * vcn_v4_0_resume - resume VCN block
339 * @handle: amdgpu_device pointer
341 * Resume firmware and hw init VCN block
343 static int vcn_v4_0_resume(void *handle)
346 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
348 r = amdgpu_vcn_resume(adev);
352 r = vcn_v4_0_hw_init(adev);
358 * vcn_v4_0_mc_resume - memory controller programming
360 * @adev: amdgpu_device pointer
361 * @inst: instance number
363 * Let the VCN memory controller know it's offsets
365 static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
367 uint32_t offset, size;
368 const struct common_firmware_header *hdr;
370 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
371 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
373 /* cache window 0: fw */
374 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
375 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
376 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo));
377 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
378 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi));
379 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0);
382 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
383 lower_32_bits(adev->vcn.inst[inst].gpu_addr));
384 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
385 upper_32_bits(adev->vcn.inst[inst].gpu_addr));
387 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
389 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size);
391 /* cache window 1: stack */
392 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
393 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
394 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
395 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset));
396 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0);
397 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
399 /* cache window 2: context */
400 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
401 lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
402 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
403 upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
404 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0);
405 WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
407 /* non-cache window */
408 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
409 lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
410 WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
411 upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr));
412 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
413 WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0,
414 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
418 * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode
420 * @adev: amdgpu_device pointer
421 * @inst_idx: instance number index
422 * @indirect: indirectly write sram
424 * Let the VCN memory controller know it's offsets with dpg mode
426 static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
428 uint32_t offset, size;
429 const struct common_firmware_header *hdr;
430 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
431 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
433 /* cache window 0: fw */
434 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
436 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
437 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
438 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
439 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
440 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
441 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
442 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
443 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
445 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
446 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
447 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
448 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
449 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
450 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
454 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
455 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
456 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
457 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
458 VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
459 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
461 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
462 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0),
463 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
467 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
468 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
470 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
471 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
473 /* cache window 1: stack */
475 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
476 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
477 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
478 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
479 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
480 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
481 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
482 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
484 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
485 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
486 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
487 VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
488 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
489 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
491 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
492 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
494 /* cache window 2: context */
495 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
496 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
497 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
498 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
499 VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
500 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
501 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
502 VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
503 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
504 VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
506 /* non-cache window */
507 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
508 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
509 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
510 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
511 VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
512 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
513 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
514 VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
515 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
516 VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0),
517 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
519 /* VCN global tiling registers */
520 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
521 VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
525 * vcn_v4_0_disable_static_power_gating - disable VCN static power gating
527 * @adev: amdgpu_device pointer
528 * @inst: instance number
530 * Disable static power gating for VCN block
532 static void vcn_v4_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
536 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
537 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
538 | 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT
539 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
540 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
541 | 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT
542 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
543 | 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT
544 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
545 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
546 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
547 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
548 | 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT
549 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
550 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
552 WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data);
553 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS,
554 UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF);
558 value = (inst) ? 0x2200800 : 0;
559 data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
560 | 1 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT
561 | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
562 | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
563 | 1 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT
564 | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
565 | 1 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT
566 | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
567 | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
568 | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
569 | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
570 | 1 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT
571 | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
572 | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
574 WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data);
575 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS, value, 0x3F3FFFFF);
578 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
580 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
581 data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON |
582 UVD_POWER_STATUS__UVD_PG_EN_MASK;
584 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
590 * vcn_v4_0_enable_static_power_gating - enable VCN static power gating
592 * @adev: amdgpu_device pointer
593 * @inst: instance number
595 * Enable static power gating for VCN block
597 static void vcn_v4_0_enable_static_power_gating(struct amdgpu_device *adev, int inst)
601 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
602 /* Before power off, this indicator has to be turned on */
603 data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS);
604 data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK;
605 data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
606 WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data);
608 data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
609 | 2 << UVD_PGFSM_CONFIG__UVDS_PWR_CONFIG__SHIFT
610 | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT
611 | 2 << UVD_PGFSM_CONFIG__UVDTC_PWR_CONFIG__SHIFT
612 | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT
613 | 2 << UVD_PGFSM_CONFIG__UVDTA_PWR_CONFIG__SHIFT
614 | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT
615 | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT
616 | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT
617 | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT
618 | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT
619 | 2 << UVD_PGFSM_CONFIG__UVDTB_PWR_CONFIG__SHIFT
620 | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT
621 | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT);
622 WREG32_SOC15(VCN, inst, regUVD_PGFSM_CONFIG, data);
624 data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT
625 | 2 << UVD_PGFSM_STATUS__UVDS_PWR_STATUS__SHIFT
626 | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT
627 | 2 << UVD_PGFSM_STATUS__UVDTC_PWR_STATUS__SHIFT
628 | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT
629 | 2 << UVD_PGFSM_STATUS__UVDTA_PWR_STATUS__SHIFT
630 | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT
631 | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT
632 | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT
633 | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT
634 | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT
635 | 2 << UVD_PGFSM_STATUS__UVDTB_PWR_STATUS__SHIFT
636 | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT
637 | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT);
638 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_PGFSM_STATUS, data, 0x3F3FFFFF);
645 * vcn_v4_0_disable_clock_gating - disable VCN clock gating
647 * @adev: amdgpu_device pointer
648 * @inst: instance number
650 * Disable clock gating for VCN block
652 static void vcn_v4_0_disable_clock_gating(struct amdgpu_device *adev, int inst)
656 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
659 /* VCN disable CGC */
660 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
661 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
662 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
663 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
664 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
666 data = RREG32_SOC15(VCN, inst, regUVD_CGC_GATE);
667 data &= ~(UVD_CGC_GATE__SYS_MASK
668 | UVD_CGC_GATE__UDEC_MASK
669 | UVD_CGC_GATE__MPEG2_MASK
670 | UVD_CGC_GATE__REGS_MASK
671 | UVD_CGC_GATE__RBC_MASK
672 | UVD_CGC_GATE__LMI_MC_MASK
673 | UVD_CGC_GATE__LMI_UMC_MASK
674 | UVD_CGC_GATE__IDCT_MASK
675 | UVD_CGC_GATE__MPRD_MASK
676 | UVD_CGC_GATE__MPC_MASK
677 | UVD_CGC_GATE__LBSI_MASK
678 | UVD_CGC_GATE__LRBBM_MASK
679 | UVD_CGC_GATE__UDEC_RE_MASK
680 | UVD_CGC_GATE__UDEC_CM_MASK
681 | UVD_CGC_GATE__UDEC_IT_MASK
682 | UVD_CGC_GATE__UDEC_DB_MASK
683 | UVD_CGC_GATE__UDEC_MP_MASK
684 | UVD_CGC_GATE__WCB_MASK
685 | UVD_CGC_GATE__VCPU_MASK
686 | UVD_CGC_GATE__MMSCH_MASK);
688 WREG32_SOC15(VCN, inst, regUVD_CGC_GATE, data);
689 SOC15_WAIT_ON_RREG(VCN, inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF);
691 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
692 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
693 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
694 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
695 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
696 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
697 | UVD_CGC_CTRL__SYS_MODE_MASK
698 | UVD_CGC_CTRL__UDEC_MODE_MASK
699 | UVD_CGC_CTRL__MPEG2_MODE_MASK
700 | UVD_CGC_CTRL__REGS_MODE_MASK
701 | UVD_CGC_CTRL__RBC_MODE_MASK
702 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
703 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
704 | UVD_CGC_CTRL__IDCT_MODE_MASK
705 | UVD_CGC_CTRL__MPRD_MODE_MASK
706 | UVD_CGC_CTRL__MPC_MODE_MASK
707 | UVD_CGC_CTRL__LBSI_MODE_MASK
708 | UVD_CGC_CTRL__LRBBM_MODE_MASK
709 | UVD_CGC_CTRL__WCB_MODE_MASK
710 | UVD_CGC_CTRL__VCPU_MODE_MASK
711 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
712 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
714 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE);
715 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
716 | UVD_SUVD_CGC_GATE__SIT_MASK
717 | UVD_SUVD_CGC_GATE__SMP_MASK
718 | UVD_SUVD_CGC_GATE__SCM_MASK
719 | UVD_SUVD_CGC_GATE__SDB_MASK
720 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
721 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
722 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
723 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
724 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
725 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
726 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
727 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
728 | UVD_SUVD_CGC_GATE__SCLR_MASK
729 | UVD_SUVD_CGC_GATE__UVD_SC_MASK
730 | UVD_SUVD_CGC_GATE__ENT_MASK
731 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
732 | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
733 | UVD_SUVD_CGC_GATE__SITE_MASK
734 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
735 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
736 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
737 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
738 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
739 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE, data);
741 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL);
742 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
743 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
744 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
745 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
746 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
747 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
748 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
749 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
750 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
751 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
752 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data);
756 * vcn_v4_0_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
758 * @adev: amdgpu_device pointer
759 * @sram_sel: sram select
760 * @inst_idx: instance number index
761 * @indirect: indirectly write sram
763 * Disable clock gating for VCN block with dpg mode
765 static void vcn_v4_0_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
766 int inst_idx, uint8_t indirect)
768 uint32_t reg_data = 0;
770 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
773 /* enable sw clock gating control */
774 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
775 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
776 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
777 reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
778 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
779 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
780 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
781 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
782 UVD_CGC_CTRL__SYS_MODE_MASK |
783 UVD_CGC_CTRL__UDEC_MODE_MASK |
784 UVD_CGC_CTRL__MPEG2_MODE_MASK |
785 UVD_CGC_CTRL__REGS_MODE_MASK |
786 UVD_CGC_CTRL__RBC_MODE_MASK |
787 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
788 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
789 UVD_CGC_CTRL__IDCT_MODE_MASK |
790 UVD_CGC_CTRL__MPRD_MODE_MASK |
791 UVD_CGC_CTRL__MPC_MODE_MASK |
792 UVD_CGC_CTRL__LBSI_MODE_MASK |
793 UVD_CGC_CTRL__LRBBM_MODE_MASK |
794 UVD_CGC_CTRL__WCB_MODE_MASK |
795 UVD_CGC_CTRL__VCPU_MODE_MASK);
796 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
797 VCN, inst_idx, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
799 /* turn off clock gating */
800 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
801 VCN, inst_idx, regUVD_CGC_GATE), 0, sram_sel, indirect);
803 /* turn on SUVD clock gating */
804 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
805 VCN, inst_idx, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
807 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
808 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
809 VCN, inst_idx, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
813 * vcn_v4_0_enable_clock_gating - enable VCN clock gating
815 * @adev: amdgpu_device pointer
816 * @inst: instance number
818 * Enable clock gating for VCN block
820 static void vcn_v4_0_enable_clock_gating(struct amdgpu_device *adev, int inst)
824 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
828 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
829 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
830 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
831 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
832 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
834 data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL);
835 data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
836 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK
837 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK
838 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK
839 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK
840 | UVD_CGC_CTRL__SYS_MODE_MASK
841 | UVD_CGC_CTRL__UDEC_MODE_MASK
842 | UVD_CGC_CTRL__MPEG2_MODE_MASK
843 | UVD_CGC_CTRL__REGS_MODE_MASK
844 | UVD_CGC_CTRL__RBC_MODE_MASK
845 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
846 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
847 | UVD_CGC_CTRL__IDCT_MODE_MASK
848 | UVD_CGC_CTRL__MPRD_MODE_MASK
849 | UVD_CGC_CTRL__MPC_MODE_MASK
850 | UVD_CGC_CTRL__LBSI_MODE_MASK
851 | UVD_CGC_CTRL__LRBBM_MODE_MASK
852 | UVD_CGC_CTRL__WCB_MODE_MASK
853 | UVD_CGC_CTRL__VCPU_MODE_MASK
854 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
855 WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data);
857 data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL);
858 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
859 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
860 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
861 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
862 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
863 | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
864 | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
865 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
866 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
867 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
868 WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data);
873 static void vcn_v4_0_enable_ras(struct amdgpu_device *adev, int inst_idx,
878 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
881 tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
882 VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
883 VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
884 VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
885 WREG32_SOC15_DPG_MODE(inst_idx,
886 SOC15_DPG_MODE_OFFSET(VCN, 0, regVCN_RAS_CNTL),
889 tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
890 WREG32_SOC15_DPG_MODE(inst_idx,
891 SOC15_DPG_MODE_OFFSET(VCN, 0, regUVD_SYS_INT_EN),
896 * vcn_v4_0_start_dpg_mode - VCN start with dpg mode
898 * @adev: amdgpu_device pointer
899 * @inst_idx: instance number index
900 * @indirect: indirectly write sram
902 * Start VCN block with dpg mode
904 static int vcn_v4_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
906 volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
907 struct amdgpu_ring *ring;
910 /* disable register anti-hang mechanism */
911 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
912 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
913 /* enable dynamic power gating mode */
914 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
915 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
916 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
917 WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
920 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
922 /* enable clock gating */
923 vcn_v4_0_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
925 /* enable VCPU clock */
926 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
927 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK;
928 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
929 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
931 /* disable master interupt */
932 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
933 VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect);
935 /* setup regUVD_LMI_CTRL */
936 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
937 UVD_LMI_CTRL__REQ_MODE_MASK |
938 UVD_LMI_CTRL__CRC_RESET_MASK |
939 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
940 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
941 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
942 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
944 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
945 VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect);
947 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
948 VCN, inst_idx, regUVD_MPC_CNTL),
949 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
951 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
952 VCN, inst_idx, regUVD_MPC_SET_MUXA0),
953 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
954 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
955 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
956 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
958 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
959 VCN, inst_idx, regUVD_MPC_SET_MUXB0),
960 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
961 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
962 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
963 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
965 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
966 VCN, inst_idx, regUVD_MPC_SET_MUX),
967 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
968 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
969 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
971 vcn_v4_0_mc_resume_dpg_mode(adev, inst_idx, indirect);
973 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
974 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
975 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
976 VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect);
978 /* enable LMI MC and UMC channels */
979 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
980 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
981 VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect);
983 vcn_v4_0_enable_ras(adev, inst_idx, indirect);
985 /* enable master interrupt */
986 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
987 VCN, inst_idx, regUVD_MASTINT_EN),
988 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
992 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
993 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
994 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
996 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
998 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr);
999 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1000 WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4);
1002 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
1003 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
1004 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
1005 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
1006 WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
1007 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
1009 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR);
1010 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp);
1011 ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
1013 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
1014 tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
1015 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
1016 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
1018 WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL,
1019 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
1020 VCN_RB1_DB_CTRL__EN_MASK);
1027 * vcn_v4_0_start - VCN start
1029 * @adev: amdgpu_device pointer
1033 static int vcn_v4_0_start(struct amdgpu_device *adev)
1035 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1036 struct amdgpu_ring *ring;
1040 if (adev->pm.dpm_enabled)
1041 amdgpu_dpm_enable_uvd(adev, true);
1043 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1044 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1046 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1047 r = vcn_v4_0_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
1051 /* disable VCN power gating */
1052 vcn_v4_0_disable_static_power_gating(adev, i);
1054 /* set VCN status busy */
1055 tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
1056 WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
1058 /*SW clock gating */
1059 vcn_v4_0_disable_clock_gating(adev, i);
1061 /* enable VCPU clock */
1062 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1063 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1065 /* disable master interrupt */
1066 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
1067 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1069 /* enable LMI MC and UMC channels */
1070 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
1071 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1073 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1074 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1075 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1076 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1078 /* setup regUVD_LMI_CTRL */
1079 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
1080 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
1081 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
1082 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1083 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1084 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1086 /* setup regUVD_MPC_CNTL */
1087 tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
1088 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1089 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1090 WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
1092 /* setup UVD_MPC_SET_MUXA0 */
1093 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
1094 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1095 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1096 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1097 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1099 /* setup UVD_MPC_SET_MUXB0 */
1100 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
1101 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1102 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1103 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1104 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1106 /* setup UVD_MPC_SET_MUX */
1107 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
1108 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1109 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1110 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1112 vcn_v4_0_mc_resume(adev, i);
1114 /* VCN global tiling registers */
1115 WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
1116 adev->gfx.config.gb_addr_config);
1118 /* unblock VCPU register access */
1119 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
1120 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1122 /* release VCPU reset to boot */
1123 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1124 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1126 for (j = 0; j < 10; ++j) {
1129 for (k = 0; k < 100; ++k) {
1130 status = RREG32_SOC15(VCN, i, regUVD_STATUS);
1134 if (amdgpu_emu_mode==1)
1138 if (amdgpu_emu_mode==1) {
1149 dev_err(adev->dev, "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i);
1150 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1151 UVD_VCPU_CNTL__BLK_RST_MASK,
1152 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1154 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1155 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1163 dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i);
1167 /* enable master interrupt */
1168 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
1169 UVD_MASTINT_EN__VCPU_EN_MASK,
1170 ~UVD_MASTINT_EN__VCPU_EN_MASK);
1172 /* clear the busy bit of VCN_STATUS */
1173 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
1174 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1176 ring = &adev->vcn.inst[i].ring_enc[0];
1177 WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL,
1178 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
1179 VCN_RB1_DB_CTRL__EN_MASK);
1181 WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr);
1182 WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1183 WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4);
1185 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
1186 tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK);
1187 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
1188 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
1189 WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
1190 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
1192 tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR);
1193 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp);
1194 ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
1196 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
1197 tmp |= VCN_RB_ENABLE__RB1_EN_MASK;
1198 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
1199 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
1205 static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
1208 struct amdgpu_ring *ring_enc;
1209 uint64_t cache_addr;
1210 uint64_t rb_enc_addr;
1212 uint32_t param, resp, expected;
1213 uint32_t offset, cache_size;
1214 uint32_t tmp, timeout;
1216 struct amdgpu_mm_table *table = &adev->virt.mm_table;
1217 uint32_t *table_loc;
1218 uint32_t table_size;
1219 uint32_t size, size_dw;
1220 uint32_t init_status;
1221 uint32_t enabled_vcn;
1223 struct mmsch_v4_0_cmd_direct_write
1224 direct_wt = { {0} };
1225 struct mmsch_v4_0_cmd_direct_read_modify_write
1226 direct_rd_mod_wt = { {0} };
1227 struct mmsch_v4_0_cmd_end end = { {0} };
1228 struct mmsch_v4_0_init_header header;
1230 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1231 volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
1233 direct_wt.cmd_header.command_type =
1234 MMSCH_COMMAND__DIRECT_REG_WRITE;
1235 direct_rd_mod_wt.cmd_header.command_type =
1236 MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1237 end.cmd_header.command_type =
1240 header.version = MMSCH_VERSION;
1241 header.total_size = sizeof(struct mmsch_v4_0_init_header) >> 2;
1242 for (i = 0; i < AMDGPU_MAX_VCN_INSTANCES; i++) {
1243 header.inst[i].init_status = 0;
1244 header.inst[i].table_offset = 0;
1245 header.inst[i].table_size = 0;
1248 table_loc = (uint32_t *)table->cpu_addr;
1249 table_loc += header.total_size;
1250 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
1251 if (adev->vcn.harvest_config & (1 << i))
1256 MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i,
1258 ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1260 cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
1262 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1263 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1264 regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1265 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1266 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1267 regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1268 adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1270 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1271 regUVD_VCPU_CACHE_OFFSET0),
1274 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1275 regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1276 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1277 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1278 regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1279 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1280 offset = cache_size;
1281 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1282 regUVD_VCPU_CACHE_OFFSET0),
1283 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1286 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1287 regUVD_VCPU_CACHE_SIZE0),
1290 cache_addr = adev->vcn.inst[i].gpu_addr + offset;
1291 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1292 regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1293 lower_32_bits(cache_addr));
1294 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1295 regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1296 upper_32_bits(cache_addr));
1297 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1298 regUVD_VCPU_CACHE_OFFSET1),
1300 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1301 regUVD_VCPU_CACHE_SIZE1),
1302 AMDGPU_VCN_STACK_SIZE);
1304 cache_addr = adev->vcn.inst[i].gpu_addr + offset +
1305 AMDGPU_VCN_STACK_SIZE;
1306 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1307 regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1308 lower_32_bits(cache_addr));
1309 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1310 regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1311 upper_32_bits(cache_addr));
1312 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1313 regUVD_VCPU_CACHE_OFFSET2),
1315 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1316 regUVD_VCPU_CACHE_SIZE2),
1317 AMDGPU_VCN_CONTEXT_SIZE);
1319 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1320 rb_setup = &fw_shared->rb_setup;
1322 ring_enc = &adev->vcn.inst[i].ring_enc[0];
1324 rb_enc_addr = ring_enc->gpu_addr;
1326 rb_setup->is_rb_enabled_flags |= RB_ENABLED;
1327 rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr);
1328 rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr);
1329 rb_setup->rb_size = ring_enc->ring_size / 4;
1330 fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG);
1332 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1333 regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
1334 lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
1335 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1336 regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
1337 upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
1338 MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
1339 regUVD_VCPU_NONCACHE_SIZE0),
1340 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
1342 /* add end packet */
1343 MMSCH_V4_0_INSERT_END();
1346 header.inst[i].init_status = 0;
1347 header.inst[i].table_offset = header.total_size;
1348 header.inst[i].table_size = table_size;
1349 header.total_size += table_size;
1352 /* Update init table header in memory */
1353 size = sizeof(struct mmsch_v4_0_init_header);
1354 table_loc = (uint32_t *)table->cpu_addr;
1355 memcpy((void *)table_loc, &header, size);
1357 /* message MMSCH (in VCN[0]) to initialize this client
1358 * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
1359 * of memory descriptor location
1361 ctx_addr = table->gpu_addr;
1362 WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr));
1363 WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr));
1365 /* 2, update vmid of descriptor */
1366 tmp = RREG32_SOC15(VCN, 0, regMMSCH_VF_VMID);
1367 tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1368 /* use domain0 for MM scheduler */
1369 tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1370 WREG32_SOC15(VCN, 0, regMMSCH_VF_VMID, tmp);
1372 /* 3, notify mmsch about the size of this descriptor */
1373 size = header.total_size;
1374 WREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE, size);
1376 /* 4, set resp to zero */
1377 WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP, 0);
1379 /* 5, kick off the initialization and wait until
1380 * MMSCH_VF_MAILBOX_RESP becomes non-zero
1383 WREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_HOST, param);
1387 expected = MMSCH_VF_MAILBOX_RESP__OK;
1388 while (resp != expected) {
1389 resp = RREG32_SOC15(VCN, 0, regMMSCH_VF_MAILBOX_RESP);
1395 if (tmp >= timeout) {
1396 DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\
1397 " waiting for regMMSCH_VF_MAILBOX_RESP "\
1398 "(expected=0x%08x, readback=0x%08x)\n",
1399 tmp, expected, resp);
1403 enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0;
1404 init_status = ((struct mmsch_v4_0_init_header *)(table_loc))->inst[enabled_vcn].init_status;
1405 if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE
1406 && init_status != MMSCH_VF_ENGINE_STATUS__PASS)
1407 DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\
1408 "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status);
1414 * vcn_v4_0_stop_dpg_mode - VCN stop with dpg mode
1416 * @adev: amdgpu_device pointer
1417 * @inst_idx: instance number index
1419 * Stop VCN block with dpg mode
1421 static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1425 /* Wait for power status to be 1 */
1426 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
1427 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1429 /* wait for read ptr to be equal to write ptr */
1430 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
1431 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1433 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
1434 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1436 /* disable dynamic power gating mode */
1437 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
1438 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1442 * vcn_v4_0_stop - VCN stop
1444 * @adev: amdgpu_device pointer
1448 static int vcn_v4_0_stop(struct amdgpu_device *adev)
1450 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1454 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1455 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1456 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1458 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1459 vcn_v4_0_stop_dpg_mode(adev, i);
1463 /* wait for vcn idle */
1464 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1468 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1469 UVD_LMI_STATUS__READ_CLEAN_MASK |
1470 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1471 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1472 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1476 /* disable LMI UMC channel */
1477 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
1478 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1479 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
1480 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1481 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1482 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1486 /* block VCPU register access */
1487 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
1488 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1489 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1492 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1493 UVD_VCPU_CNTL__BLK_RST_MASK,
1494 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1496 /* disable VCPU clock */
1497 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1498 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1500 /* apply soft reset */
1501 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1502 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1503 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1504 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1505 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1506 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1509 WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
1511 /* apply HW clock gating */
1512 vcn_v4_0_enable_clock_gating(adev, i);
1514 /* enable VCN power gating */
1515 vcn_v4_0_enable_static_power_gating(adev, i);
1518 if (adev->pm.dpm_enabled)
1519 amdgpu_dpm_enable_uvd(adev, false);
1525 * vcn_v4_0_pause_dpg_mode - VCN pause with dpg mode
1527 * @adev: amdgpu_device pointer
1528 * @inst_idx: instance number index
1529 * @new_state: pause state
1531 * Pause dpg mode for VCN block
1533 static int vcn_v4_0_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
1534 struct dpg_pause_state *new_state)
1536 uint32_t reg_data = 0;
1539 /* pause/unpause if state is changed */
1540 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1541 DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d",
1542 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1543 reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) &
1544 (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1546 if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1547 ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1,
1548 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1552 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1553 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1556 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE,
1557 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1558 UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1560 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS,
1561 UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1564 /* unpause dpg, no need to wait */
1565 reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1566 WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data);
1568 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1575 * vcn_v4_0_unified_ring_get_rptr - get unified read pointer
1577 * @ring: amdgpu_ring pointer
1579 * Returns the current hardware unified read pointer
1581 static uint64_t vcn_v4_0_unified_ring_get_rptr(struct amdgpu_ring *ring)
1583 struct amdgpu_device *adev = ring->adev;
1585 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1586 DRM_ERROR("wrong ring id is identified in %s", __func__);
1588 return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
1592 * vcn_v4_0_unified_ring_get_wptr - get unified write pointer
1594 * @ring: amdgpu_ring pointer
1596 * Returns the current hardware unified write pointer
1598 static uint64_t vcn_v4_0_unified_ring_get_wptr(struct amdgpu_ring *ring)
1600 struct amdgpu_device *adev = ring->adev;
1602 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1603 DRM_ERROR("wrong ring id is identified in %s", __func__);
1605 if (ring->use_doorbell)
1606 return *ring->wptr_cpu_addr;
1608 return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
1612 * vcn_v4_0_unified_ring_set_wptr - set enc write pointer
1614 * @ring: amdgpu_ring pointer
1616 * Commits the enc write pointer to the hardware
1618 static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
1620 struct amdgpu_device *adev = ring->adev;
1622 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1623 DRM_ERROR("wrong ring id is identified in %s", __func__);
1625 if (ring->use_doorbell) {
1626 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1627 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1629 WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
1633 static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
1634 struct amdgpu_job *job)
1636 struct drm_gpu_scheduler **scheds;
1638 /* The create msg must be in the first IB submitted */
1639 if (atomic_read(&job->base.entity->fence_seq))
1642 /* if VCN0 is harvested, we can't support AV1 */
1643 if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
1646 scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
1647 [AMDGPU_RING_PRIO_0].sched;
1648 drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
1652 static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
1655 struct ttm_operation_ctx ctx = { false, false };
1656 struct amdgpu_bo_va_mapping *map;
1657 uint32_t *msg, num_buffers;
1658 struct amdgpu_bo *bo;
1659 uint64_t start, end;
1664 addr &= AMDGPU_GMC_HOLE_MASK;
1665 r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
1667 DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
1671 start = map->start * AMDGPU_GPU_PAGE_SIZE;
1672 end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
1674 DRM_ERROR("VCN messages must be 8 byte aligned!\n");
1678 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1679 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
1680 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1682 DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
1686 r = amdgpu_bo_kmap(bo, &ptr);
1688 DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
1692 msg = ptr + addr - start;
1695 if (msg[1] > end - addr) {
1700 if (msg[3] != RDECODE_MSG_CREATE)
1703 num_buffers = msg[2];
1704 for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
1705 uint32_t offset, size, *create;
1707 if (msg[0] != RDECODE_MESSAGE_CREATE)
1713 if (offset + size > end) {
1718 create = ptr + addr + offset - start;
1720 /* H264, HEVC and VP9 can run on any instance */
1721 if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
1724 r = vcn_v4_0_limit_sched(p, job);
1730 amdgpu_bo_kunmap(bo);
1734 #define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002)
1735 #define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
1737 #define RADEON_VCN_ENGINE_INFO (0x30000001)
1738 #define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16
1740 #define RENCODE_ENCODE_STANDARD_AV1 2
1741 #define RENCODE_IB_PARAM_SESSION_INIT 0x00000003
1742 #define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64
1744 /* return the offset in ib if id is found, -1 otherwise
1745 * to speed up the searching we only search upto max_offset
1747 static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset)
1751 for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) {
1752 if (ib->ptr[i + 1] == id)
1758 static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
1759 struct amdgpu_job *job,
1760 struct amdgpu_ib *ib)
1762 struct amdgpu_ring *ring = amdgpu_job_ring(job);
1763 struct amdgpu_vcn_decode_buffer *decode_buffer;
1768 /* The first instance can decode anything */
1772 /* RADEON_VCN_ENGINE_INFO is at the top of ib block */
1773 idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO,
1774 RADEON_VCN_ENGINE_INFO_MAX_OFFSET);
1775 if (idx < 0) /* engine info is missing */
1778 val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
1779 if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
1780 decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
1782 if (!(decode_buffer->valid_buf_flag & 0x1))
1785 addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
1786 decode_buffer->msg_buffer_address_lo;
1787 return vcn_v4_0_dec_msg(p, job, addr);
1788 } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
1789 idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT,
1790 RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET);
1791 if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1)
1792 return vcn_v4_0_limit_sched(p, job);
1797 static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
1798 .type = AMDGPU_RING_TYPE_VCN_ENC,
1800 .nop = VCN_ENC_CMD_NO_OP,
1801 .get_rptr = vcn_v4_0_unified_ring_get_rptr,
1802 .get_wptr = vcn_v4_0_unified_ring_get_wptr,
1803 .set_wptr = vcn_v4_0_unified_ring_set_wptr,
1804 .patch_cs_in_place = vcn_v4_0_ring_patch_cs_in_place,
1806 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1807 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1808 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1809 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1810 1, /* vcn_v2_0_enc_ring_insert_end */
1811 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1812 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1813 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1814 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1815 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1816 .test_ib = amdgpu_vcn_unified_ring_test_ib,
1817 .insert_nop = amdgpu_ring_insert_nop,
1818 .insert_end = vcn_v2_0_enc_ring_insert_end,
1819 .pad_ib = amdgpu_ring_generic_pad_ib,
1820 .begin_use = amdgpu_vcn_ring_begin_use,
1821 .end_use = amdgpu_vcn_ring_end_use,
1822 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1823 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1824 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1828 * vcn_v4_0_set_unified_ring_funcs - set unified ring functions
1830 * @adev: amdgpu_device pointer
1832 * Set unified ring functions
1834 static void vcn_v4_0_set_unified_ring_funcs(struct amdgpu_device *adev)
1838 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1839 if (adev->vcn.harvest_config & (1 << i))
1842 adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_unified_ring_vm_funcs;
1843 adev->vcn.inst[i].ring_enc[0].me = i;
1845 DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n", i);
1850 * vcn_v4_0_is_idle - check VCN block is idle
1852 * @handle: amdgpu_device pointer
1854 * Check whether VCN block is idle
1856 static bool vcn_v4_0_is_idle(void *handle)
1858 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1861 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1862 if (adev->vcn.harvest_config & (1 << i))
1865 ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
1872 * vcn_v4_0_wait_for_idle - wait for VCN block idle
1874 * @handle: amdgpu_device pointer
1876 * Wait for VCN block idle
1878 static int vcn_v4_0_wait_for_idle(void *handle)
1880 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1883 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1884 if (adev->vcn.harvest_config & (1 << i))
1887 ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
1897 * vcn_v4_0_set_clockgating_state - set VCN block clockgating state
1899 * @handle: amdgpu_device pointer
1900 * @state: clock gating state
1902 * Set VCN block clockgating state
1904 static int vcn_v4_0_set_clockgating_state(void *handle, enum amd_clockgating_state state)
1906 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1907 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1910 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1911 if (adev->vcn.harvest_config & (1 << i))
1915 if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
1917 vcn_v4_0_enable_clock_gating(adev, i);
1919 vcn_v4_0_disable_clock_gating(adev, i);
1927 * vcn_v4_0_set_powergating_state - set VCN block powergating state
1929 * @handle: amdgpu_device pointer
1930 * @state: power gating state
1932 * Set VCN block powergating state
1934 static int vcn_v4_0_set_powergating_state(void *handle, enum amd_powergating_state state)
1936 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1939 /* for SRIOV, guest should not control VCN Power-gating
1940 * MMSCH FW should control Power-gating and clock-gating
1941 * guest should avoid touching CGC and PG
1943 if (amdgpu_sriov_vf(adev)) {
1944 adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
1948 if(state == adev->vcn.cur_state)
1951 if (state == AMD_PG_STATE_GATE)
1952 ret = vcn_v4_0_stop(adev);
1954 ret = vcn_v4_0_start(adev);
1957 adev->vcn.cur_state = state;
1963 * vcn_v4_0_set_interrupt_state - set VCN block interrupt state
1965 * @adev: amdgpu_device pointer
1966 * @source: interrupt sources
1967 * @type: interrupt types
1968 * @state: interrupt states
1970 * Set VCN block interrupt state
1972 static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
1973 unsigned type, enum amdgpu_interrupt_state state)
1979 * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
1981 * @adev: amdgpu_device pointer
1982 * @source: interrupt sources
1983 * @type: interrupt types
1984 * @state: interrupt states
1986 * Set VCN block RAS interrupt state
1988 static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
1989 struct amdgpu_irq_src *source,
1991 enum amdgpu_interrupt_state state)
1997 * vcn_v4_0_process_interrupt - process VCN block interrupt
1999 * @adev: amdgpu_device pointer
2000 * @source: interrupt sources
2001 * @entry: interrupt entry from clients and sources
2003 * Process VCN block interrupt
2005 static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
2006 struct amdgpu_iv_entry *entry)
2008 uint32_t ip_instance;
2010 switch (entry->client_id) {
2011 case SOC15_IH_CLIENTID_VCN:
2014 case SOC15_IH_CLIENTID_VCN1:
2018 DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
2022 DRM_DEBUG("IH: VCN TRAP\n");
2024 switch (entry->src_id) {
2025 case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
2026 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
2029 DRM_ERROR("Unhandled interrupt: %d %d\n",
2030 entry->src_id, entry->src_data[0]);
2037 static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
2038 .set = vcn_v4_0_set_interrupt_state,
2039 .process = vcn_v4_0_process_interrupt,
2042 static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = {
2043 .set = vcn_v4_0_set_ras_interrupt_state,
2044 .process = amdgpu_vcn_process_poison_irq,
2048 * vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions
2050 * @adev: amdgpu_device pointer
2052 * Set VCN block interrupt irq functions
2054 static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
2058 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
2059 if (adev->vcn.harvest_config & (1 << i))
2062 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
2063 adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs;
2065 adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
2066 adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs;
2070 static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
2072 .early_init = vcn_v4_0_early_init,
2074 .sw_init = vcn_v4_0_sw_init,
2075 .sw_fini = vcn_v4_0_sw_fini,
2076 .hw_init = vcn_v4_0_hw_init,
2077 .hw_fini = vcn_v4_0_hw_fini,
2078 .suspend = vcn_v4_0_suspend,
2079 .resume = vcn_v4_0_resume,
2080 .is_idle = vcn_v4_0_is_idle,
2081 .wait_for_idle = vcn_v4_0_wait_for_idle,
2082 .check_soft_reset = NULL,
2083 .pre_soft_reset = NULL,
2085 .post_soft_reset = NULL,
2086 .set_clockgating_state = vcn_v4_0_set_clockgating_state,
2087 .set_powergating_state = vcn_v4_0_set_powergating_state,
2090 const struct amdgpu_ip_block_version vcn_v4_0_ip_block =
2092 .type = AMD_IP_BLOCK_TYPE_VCN,
2096 .funcs = &vcn_v4_0_ip_funcs,
2099 static uint32_t vcn_v4_0_query_poison_by_instance(struct amdgpu_device *adev,
2100 uint32_t instance, uint32_t sub_block)
2102 uint32_t poison_stat = 0, reg_value = 0;
2104 switch (sub_block) {
2105 case AMDGPU_VCN_V4_0_VCPU_VCODEC:
2106 reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS);
2107 poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
2114 dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
2115 instance, sub_block);
2120 static bool vcn_v4_0_query_ras_poison_status(struct amdgpu_device *adev)
2123 uint32_t poison_stat = 0;
2125 for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
2126 for (sub = 0; sub < AMDGPU_VCN_V4_0_MAX_SUB_BLOCK; sub++)
2128 vcn_v4_0_query_poison_by_instance(adev, inst, sub);
2130 return !!poison_stat;
2133 const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = {
2134 .query_poison_status = vcn_v4_0_query_ras_poison_status,
2137 static struct amdgpu_vcn_ras vcn_v4_0_ras = {
2139 .hw_ops = &vcn_v4_0_ras_hw_ops,
2140 .ras_late_init = amdgpu_vcn_ras_late_init,
2144 static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev)
2146 switch (adev->ip_versions[VCN_HWIP][0]) {
2147 case IP_VERSION(4, 0, 0):
2148 adev->vcn.ras = &vcn_v4_0_ras;