2 * Copyright 2022 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
32 #include "soc15_hw_ip.h"
35 #include "vcn/vcn_4_0_3_offset.h"
36 #include "vcn/vcn_4_0_3_sh_mask.h"
37 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
39 #define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL
40 #define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX
41 #define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA
42 #define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
44 #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
45 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300
47 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
48 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
49 static int vcn_v4_0_3_set_powergating_state(void *handle,
50 enum amd_powergating_state state);
51 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
52 int inst_idx, struct dpg_pause_state *new_state);
53 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
56 * vcn_v4_0_3_early_init - set function pointers
58 * @handle: amdgpu_device pointer
60 * Set ring and irq function pointers
62 static int vcn_v4_0_3_early_init(void *handle)
64 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
66 /* re-use enc ring as unified ring */
67 adev->vcn.num_enc_rings = 1;
69 vcn_v4_0_3_set_unified_ring_funcs(adev);
70 vcn_v4_0_3_set_irq_funcs(adev);
72 return amdgpu_vcn_early_init(adev);
76 * vcn_v4_0_3_sw_init - sw init for VCN block
78 * @handle: amdgpu_device pointer
80 * Load firmware and sw initialization
82 static int vcn_v4_0_3_sw_init(void *handle)
84 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
85 struct amdgpu_ring *ring;
88 r = amdgpu_vcn_sw_init(adev);
92 amdgpu_vcn_setup_ucode(adev);
94 r = amdgpu_vcn_resume(adev);
99 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
100 VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
104 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
105 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
107 vcn_inst = GET_INST(VCN, i);
109 ring = &adev->vcn.inst[i].ring_enc[0];
110 ring->use_doorbell = true;
111 ring->doorbell_index =
112 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
114 ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
115 sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
116 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
117 AMDGPU_RING_PRIO_DEFAULT,
118 &adev->vcn.inst[i].sched_score);
122 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
123 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
124 fw_shared->sq.is_enabled = cpu_to_le32(true);
126 if (amdgpu_vcnfw_log)
127 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
130 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
131 adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
137 * vcn_v4_0_3_sw_fini - sw fini for VCN block
139 * @handle: amdgpu_device pointer
141 * VCN suspend and free up sw allocation
143 static int vcn_v4_0_3_sw_fini(void *handle)
145 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
148 if (drm_dev_enter(&adev->ddev, &idx)) {
149 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
150 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
152 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
153 fw_shared->present_flag_0 = 0;
154 fw_shared->sq.is_enabled = cpu_to_le32(false);
159 r = amdgpu_vcn_suspend(adev);
163 r = amdgpu_vcn_sw_fini(adev);
169 * vcn_v4_0_3_hw_init - start and test VCN block
171 * @handle: amdgpu_device pointer
173 * Initialize the hardware, boot up the VCPU and do some testing
175 static int vcn_v4_0_3_hw_init(void *handle)
177 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
178 struct amdgpu_ring *ring;
181 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
182 vcn_inst = GET_INST(VCN, i);
183 ring = &adev->vcn.inst[i].ring_enc[0];
185 if (ring->use_doorbell) {
186 adev->nbio.funcs->vcn_doorbell_range(
187 adev, ring->use_doorbell,
188 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
190 adev->vcn.inst[i].aid_id);
193 VCN, GET_INST(VCN, ring->me),
196 << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
197 VCN_RB1_DB_CTRL__EN_MASK);
200 r = amdgpu_ring_test_helper(ring);
207 DRM_DEV_INFO(adev->dev, "VCN decode initialized successfully(under %s).\n",
208 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
214 * vcn_v4_0_3_hw_fini - stop the hardware block
216 * @handle: amdgpu_device pointer
218 * Stop the VCN block, mark ring as not ready any more
220 static int vcn_v4_0_3_hw_fini(void *handle)
222 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
224 cancel_delayed_work_sync(&adev->vcn.idle_work);
226 if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
227 vcn_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
233 * vcn_v4_0_3_suspend - suspend VCN block
235 * @handle: amdgpu_device pointer
237 * HW fini and suspend VCN block
239 static int vcn_v4_0_3_suspend(void *handle)
241 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
244 r = vcn_v4_0_3_hw_fini(adev);
248 r = amdgpu_vcn_suspend(adev);
254 * vcn_v4_0_3_resume - resume VCN block
256 * @handle: amdgpu_device pointer
258 * Resume firmware and hw init VCN block
260 static int vcn_v4_0_3_resume(void *handle)
262 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
265 r = amdgpu_vcn_resume(adev);
269 r = vcn_v4_0_3_hw_init(adev);
275 * vcn_v4_0_3_mc_resume - memory controller programming
277 * @adev: amdgpu_device pointer
278 * @inst_idx: instance number
280 * Let the VCN memory controller know it's offsets
282 static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
284 uint32_t offset, size, vcn_inst;
285 const struct common_firmware_header *hdr;
287 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
288 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
290 vcn_inst = GET_INST(VCN, inst_idx);
291 /* cache window 0: fw */
292 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
294 VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
295 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
298 VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
299 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
301 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
304 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
305 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
306 WREG32_SOC15(VCN, vcn_inst,
307 regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
308 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
310 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
311 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
313 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
315 /* cache window 1: stack */
316 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
317 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
318 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
319 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
320 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
321 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1,
322 AMDGPU_VCN_STACK_SIZE);
324 /* cache window 2: context */
325 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
326 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
327 AMDGPU_VCN_STACK_SIZE));
328 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
329 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
330 AMDGPU_VCN_STACK_SIZE));
331 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
332 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2,
333 AMDGPU_VCN_CONTEXT_SIZE);
335 /* non-cache window */
337 VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
338 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
340 VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
341 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
342 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
344 VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
345 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
349 * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode
351 * @adev: amdgpu_device pointer
352 * @inst_idx: instance number index
353 * @indirect: indirectly write sram
355 * Let the VCN memory controller know it's offsets with dpg mode
357 static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
359 uint32_t offset, size;
360 const struct common_firmware_header *hdr;
362 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
363 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
365 /* cache window 0: fw */
366 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
368 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
369 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
370 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
371 inst_idx].tmr_mc_addr_lo), 0, indirect);
372 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
373 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
374 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
375 inst_idx].tmr_mc_addr_hi), 0, indirect);
376 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
377 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
379 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
380 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
381 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
382 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
383 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
384 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
388 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
389 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
390 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
391 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
392 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
393 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
395 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
396 VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
397 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
401 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
402 VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
404 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
405 VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
407 /* cache window 1: stack */
409 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
410 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
411 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
412 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
413 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
414 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
415 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
416 VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
418 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
419 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
420 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
421 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
422 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
423 VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
425 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
426 VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
428 /* cache window 2: context */
429 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
430 VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
431 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
432 AMDGPU_VCN_STACK_SIZE), 0, indirect);
433 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
434 VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
435 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
436 AMDGPU_VCN_STACK_SIZE), 0, indirect);
437 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
438 VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
439 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
440 VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
442 /* non-cache window */
443 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
444 VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
445 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
446 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
447 VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
448 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
449 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
450 VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
451 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
452 VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
453 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
455 /* VCN global tiling registers */
456 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
457 VCN, 0, regUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
458 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
459 VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
463 * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
465 * @adev: amdgpu_device pointer
466 * @inst_idx: instance number
468 * Disable clock gating for VCN block
470 static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
475 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
478 vcn_inst = GET_INST(VCN, inst_idx);
480 /* VCN disable CGC */
481 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
482 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
483 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
484 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
485 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
487 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE);
488 data &= ~(UVD_CGC_GATE__SYS_MASK
489 | UVD_CGC_GATE__MPEG2_MASK
490 | UVD_CGC_GATE__REGS_MASK
491 | UVD_CGC_GATE__RBC_MASK
492 | UVD_CGC_GATE__LMI_MC_MASK
493 | UVD_CGC_GATE__LMI_UMC_MASK
494 | UVD_CGC_GATE__MPC_MASK
495 | UVD_CGC_GATE__LBSI_MASK
496 | UVD_CGC_GATE__LRBBM_MASK
497 | UVD_CGC_GATE__WCB_MASK
498 | UVD_CGC_GATE__VCPU_MASK
499 | UVD_CGC_GATE__MMSCH_MASK);
501 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE, data);
502 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF);
504 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
505 data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK
506 | UVD_CGC_CTRL__MPEG2_MODE_MASK
507 | UVD_CGC_CTRL__REGS_MODE_MASK
508 | UVD_CGC_CTRL__RBC_MODE_MASK
509 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
510 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
511 | UVD_CGC_CTRL__MPC_MODE_MASK
512 | UVD_CGC_CTRL__LBSI_MODE_MASK
513 | UVD_CGC_CTRL__LRBBM_MODE_MASK
514 | UVD_CGC_CTRL__WCB_MODE_MASK
515 | UVD_CGC_CTRL__VCPU_MODE_MASK
516 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
517 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
519 data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE);
520 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
521 | UVD_SUVD_CGC_GATE__SIT_MASK
522 | UVD_SUVD_CGC_GATE__SMP_MASK
523 | UVD_SUVD_CGC_GATE__SCM_MASK
524 | UVD_SUVD_CGC_GATE__SDB_MASK
525 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
526 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
527 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
528 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
529 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
530 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
531 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
532 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
533 | UVD_SUVD_CGC_GATE__ENT_MASK
534 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
535 | UVD_SUVD_CGC_GATE__SITE_MASK
536 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
537 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
538 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
539 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
540 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
541 WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE, data);
543 data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
544 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
545 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
546 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
547 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
548 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
549 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
550 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
551 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
552 WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
556 * vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
558 * @adev: amdgpu_device pointer
559 * @sram_sel: sram select
560 * @inst_idx: instance number index
561 * @indirect: indirectly write sram
563 * Disable clock gating for VCN block with dpg mode
565 static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
566 int inst_idx, uint8_t indirect)
568 uint32_t reg_data = 0;
570 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
573 /* enable sw clock gating control */
574 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
575 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
576 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
577 reg_data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK |
578 UVD_CGC_CTRL__MPEG2_MODE_MASK |
579 UVD_CGC_CTRL__REGS_MODE_MASK |
580 UVD_CGC_CTRL__RBC_MODE_MASK |
581 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
582 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
583 UVD_CGC_CTRL__IDCT_MODE_MASK |
584 UVD_CGC_CTRL__MPRD_MODE_MASK |
585 UVD_CGC_CTRL__MPC_MODE_MASK |
586 UVD_CGC_CTRL__LBSI_MODE_MASK |
587 UVD_CGC_CTRL__LRBBM_MODE_MASK |
588 UVD_CGC_CTRL__WCB_MODE_MASK |
589 UVD_CGC_CTRL__VCPU_MODE_MASK);
590 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
591 VCN, 0, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
593 /* turn off clock gating */
594 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
595 VCN, 0, regUVD_CGC_GATE), 0, sram_sel, indirect);
597 /* turn on SUVD clock gating */
598 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
599 VCN, 0, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
601 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
602 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
603 VCN, 0, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
607 * vcn_v4_0_enable_clock_gating - enable VCN clock gating
609 * @adev: amdgpu_device pointer
610 * @inst_idx: instance number
612 * Enable clock gating for VCN block
614 static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
619 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
622 vcn_inst = GET_INST(VCN, inst_idx);
625 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
626 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
627 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
628 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
629 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
631 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
632 data |= (UVD_CGC_CTRL__SYS_MODE_MASK
633 | UVD_CGC_CTRL__MPEG2_MODE_MASK
634 | UVD_CGC_CTRL__REGS_MODE_MASK
635 | UVD_CGC_CTRL__RBC_MODE_MASK
636 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
637 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
638 | UVD_CGC_CTRL__MPC_MODE_MASK
639 | UVD_CGC_CTRL__LBSI_MODE_MASK
640 | UVD_CGC_CTRL__LRBBM_MODE_MASK
641 | UVD_CGC_CTRL__WCB_MODE_MASK
642 | UVD_CGC_CTRL__VCPU_MODE_MASK);
643 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
645 data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
646 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
647 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
648 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
649 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
650 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
651 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
652 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
653 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
654 WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
658 * vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
660 * @adev: amdgpu_device pointer
661 * @inst_idx: instance number index
662 * @indirect: indirectly write sram
664 * Start VCN block with dpg mode
666 static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
668 volatile struct amdgpu_vcn4_fw_shared *fw_shared =
669 adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
670 struct amdgpu_ring *ring;
674 vcn_inst = GET_INST(VCN, inst_idx);
675 /* disable register anti-hang mechanism */
676 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
677 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
678 /* enable dynamic power gating mode */
679 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
680 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
681 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
682 WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
685 DRM_DEV_DEBUG(adev->dev, "VCN %d start: on AID %d",
686 inst_idx, adev->vcn.inst[inst_idx].aid_id);
687 adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
688 (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
689 /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
690 WREG32_SOC15_DPG_MODE(inst_idx, 0xDEADBEEF,
691 adev->vcn.inst[inst_idx].aid_id, 0, true);
694 /* enable clock gating */
695 vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
697 /* enable VCPU clock */
698 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
699 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
700 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
702 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
703 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
705 /* disable master interrupt */
706 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
707 VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
709 /* setup regUVD_LMI_CTRL */
710 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
711 UVD_LMI_CTRL__REQ_MODE_MASK |
712 UVD_LMI_CTRL__CRC_RESET_MASK |
713 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
714 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
715 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
716 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
718 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
719 VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
721 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
722 VCN, 0, regUVD_MPC_CNTL),
723 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
725 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
726 VCN, 0, regUVD_MPC_SET_MUXA0),
727 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
728 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
729 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
730 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
732 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
733 VCN, 0, regUVD_MPC_SET_MUXB0),
734 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
735 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
736 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
737 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
739 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
740 VCN, 0, regUVD_MPC_SET_MUX),
741 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
742 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
743 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
745 vcn_v4_0_3_mc_resume_dpg_mode(adev, inst_idx, indirect);
747 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
748 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
749 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
750 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
752 /* enable LMI MC and UMC channels */
753 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
754 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
755 VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
757 /* enable master interrupt */
758 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
759 VCN, 0, regUVD_MASTINT_EN),
760 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
763 psp_update_vcn_sram(adev, 0, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
764 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
765 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
767 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
769 /* program the RB_BASE for ring buffer */
770 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
771 lower_32_bits(ring->gpu_addr));
772 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
773 upper_32_bits(ring->gpu_addr));
775 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
776 ring->ring_size / sizeof(uint32_t));
778 /* resetting ring, fw should not check RB ring */
779 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
780 tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
781 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
782 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
784 /* Initialize the ring buffer's read and write pointers */
785 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
786 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
787 ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
789 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
790 tmp |= VCN_RB_ENABLE__RB_EN_MASK;
791 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
792 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
794 /*resetting done, fw can check RB ring */
795 fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
801 * vcn_v4_0_3_start - VCN start
803 * @adev: amdgpu_device pointer
807 static int vcn_v4_0_3_start(struct amdgpu_device *adev)
809 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
810 struct amdgpu_ring *ring;
811 int i, j, k, r, vcn_inst;
814 if (adev->pm.dpm_enabled)
815 amdgpu_dpm_enable_uvd(adev, true);
817 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
818 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
819 r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
823 vcn_inst = GET_INST(VCN, i);
824 /* set VCN status busy */
825 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
826 UVD_STATUS__UVD_BUSY;
827 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
830 vcn_v4_0_3_disable_clock_gating(adev, i);
832 /* enable VCPU clock */
833 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
834 UVD_VCPU_CNTL__CLK_EN_MASK,
835 ~UVD_VCPU_CNTL__CLK_EN_MASK);
837 /* disable master interrupt */
838 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
839 ~UVD_MASTINT_EN__VCPU_EN_MASK);
841 /* enable LMI MC and UMC channels */
842 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
843 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
845 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
846 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
847 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
848 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
850 /* setup regUVD_LMI_CTRL */
851 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
852 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
853 tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
854 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
855 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
856 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
858 /* setup regUVD_MPC_CNTL */
859 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
860 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
861 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
862 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
864 /* setup UVD_MPC_SET_MUXA0 */
865 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
866 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
867 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
868 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
869 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
871 /* setup UVD_MPC_SET_MUXB0 */
872 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
873 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
874 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
875 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
876 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
878 /* setup UVD_MPC_SET_MUX */
879 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
880 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
881 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
882 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
884 vcn_v4_0_3_mc_resume(adev, i);
886 /* VCN global tiling registers */
887 WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
888 adev->gfx.config.gb_addr_config);
889 WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
890 adev->gfx.config.gb_addr_config);
892 /* unblock VCPU register access */
893 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
894 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
896 /* release VCPU reset to boot */
897 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
898 ~UVD_VCPU_CNTL__BLK_RST_MASK);
900 for (j = 0; j < 10; ++j) {
903 for (k = 0; k < 100; ++k) {
904 status = RREG32_SOC15(VCN, vcn_inst,
914 DRM_DEV_ERROR(adev->dev,
915 "VCN decode not responding, trying to reset the VCPU!!!\n");
916 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
918 UVD_VCPU_CNTL__BLK_RST_MASK,
919 ~UVD_VCPU_CNTL__BLK_RST_MASK);
921 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
923 0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
930 DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
934 /* enable master interrupt */
935 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
936 UVD_MASTINT_EN__VCPU_EN_MASK,
937 ~UVD_MASTINT_EN__VCPU_EN_MASK);
939 /* clear the busy bit of VCN_STATUS */
940 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
941 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
943 ring = &adev->vcn.inst[i].ring_enc[0];
944 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
946 /* program the RB_BASE for ring buffer */
947 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
948 lower_32_bits(ring->gpu_addr));
949 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
950 upper_32_bits(ring->gpu_addr));
952 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
953 ring->ring_size / sizeof(uint32_t));
955 /* resetting ring, fw should not check RB ring */
956 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
957 tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
958 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
960 /* Initialize the ring buffer's read and write pointers */
961 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
962 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
964 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
965 tmp |= VCN_RB_ENABLE__RB_EN_MASK;
966 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
968 ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
969 fw_shared->sq.queue_mode &=
970 cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
977 * vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
979 * @adev: amdgpu_device pointer
980 * @inst_idx: instance number index
982 * Stop VCN block with dpg mode
984 static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
989 vcn_inst = GET_INST(VCN, inst_idx);
991 /* Wait for power status to be 1 */
992 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
993 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
995 /* wait for read ptr to be equal to write ptr */
996 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
997 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
999 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
1000 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1002 /* disable dynamic power gating mode */
1003 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
1004 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1009 * vcn_v4_0_3_stop - VCN stop
1011 * @adev: amdgpu_device pointer
1015 static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
1017 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1018 int i, r = 0, vcn_inst;
1021 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1022 vcn_inst = GET_INST(VCN, i);
1024 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1025 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1027 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1028 vcn_v4_0_3_stop_dpg_mode(adev, i);
1032 /* wait for vcn idle */
1033 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
1034 UVD_STATUS__IDLE, 0x7);
1038 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1039 UVD_LMI_STATUS__READ_CLEAN_MASK |
1040 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1041 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1042 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1047 /* stall UMC channel */
1048 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
1049 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1050 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
1051 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1052 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1053 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1058 /* Unblock VCPU Register access */
1059 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
1060 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1061 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1063 /* release VCPU reset to boot */
1064 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
1065 UVD_VCPU_CNTL__BLK_RST_MASK,
1066 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1068 /* disable VCPU clock */
1069 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
1070 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1072 /* reset LMI UMC/LMI/VCPU */
1073 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1074 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1075 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1077 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1078 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1079 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1081 /* clear VCN status */
1082 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
1084 /* apply HW clock gating */
1085 vcn_v4_0_3_enable_clock_gating(adev, i);
1088 if (adev->pm.dpm_enabled)
1089 amdgpu_dpm_enable_uvd(adev, false);
1095 * vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode
1097 * @adev: amdgpu_device pointer
1098 * @inst_idx: instance number index
1099 * @new_state: pause state
1101 * Pause dpg mode for VCN block
1103 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
1104 struct dpg_pause_state *new_state)
1111 * vcn_v4_0_3_unified_ring_get_rptr - get unified read pointer
1113 * @ring: amdgpu_ring pointer
1115 * Returns the current hardware unified read pointer
1117 static uint64_t vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring *ring)
1119 struct amdgpu_device *adev = ring->adev;
1121 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1122 DRM_ERROR("wrong ring id is identified in %s", __func__);
1124 return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
1128 * vcn_v4_0_3_unified_ring_get_wptr - get unified write pointer
1130 * @ring: amdgpu_ring pointer
1132 * Returns the current hardware unified write pointer
1134 static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
1136 struct amdgpu_device *adev = ring->adev;
1138 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1139 DRM_ERROR("wrong ring id is identified in %s", __func__);
1141 if (ring->use_doorbell)
1142 return *ring->wptr_cpu_addr;
1144 return RREG32_SOC15(VCN, GET_INST(VCN, ring->me),
1149 * vcn_v4_0_3_unified_ring_set_wptr - set enc write pointer
1151 * @ring: amdgpu_ring pointer
1153 * Commits the enc write pointer to the hardware
1155 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
1157 struct amdgpu_device *adev = ring->adev;
1159 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1160 DRM_ERROR("wrong ring id is identified in %s", __func__);
1162 if (ring->use_doorbell) {
1163 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1164 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1166 WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
1167 lower_32_bits(ring->wptr));
1171 static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
1172 .type = AMDGPU_RING_TYPE_VCN_ENC,
1174 .nop = VCN_ENC_CMD_NO_OP,
1175 .get_rptr = vcn_v4_0_3_unified_ring_get_rptr,
1176 .get_wptr = vcn_v4_0_3_unified_ring_get_wptr,
1177 .set_wptr = vcn_v4_0_3_unified_ring_set_wptr,
1179 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1180 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1181 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1182 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1183 1, /* vcn_v2_0_enc_ring_insert_end */
1184 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1185 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1186 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1187 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1188 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1189 .test_ib = amdgpu_vcn_unified_ring_test_ib,
1190 .insert_nop = amdgpu_ring_insert_nop,
1191 .insert_end = vcn_v2_0_enc_ring_insert_end,
1192 .pad_ib = amdgpu_ring_generic_pad_ib,
1193 .begin_use = amdgpu_vcn_ring_begin_use,
1194 .end_use = amdgpu_vcn_ring_end_use,
1195 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1196 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1197 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1201 * vcn_v4_0_3_set_unified_ring_funcs - set unified ring functions
1203 * @adev: amdgpu_device pointer
1205 * Set unified ring functions
1207 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
1211 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1212 adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_3_unified_ring_vm_funcs;
1213 adev->vcn.inst[i].ring_enc[0].me = i;
1214 vcn_inst = GET_INST(VCN, i);
1215 adev->vcn.inst[i].aid_id =
1216 vcn_inst / adev->vcn.num_inst_per_aid;
1218 DRM_DEV_INFO(adev->dev, "VCN decode is enabled in VM mode\n");
1222 * vcn_v4_0_3_is_idle - check VCN block is idle
1224 * @handle: amdgpu_device pointer
1226 * Check whether VCN block is idle
1228 static bool vcn_v4_0_3_is_idle(void *handle)
1230 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1233 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1234 ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) ==
1242 * vcn_v4_0_3_wait_for_idle - wait for VCN block idle
1244 * @handle: amdgpu_device pointer
1246 * Wait for VCN block idle
1248 static int vcn_v4_0_3_wait_for_idle(void *handle)
1250 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1253 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1254 ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS,
1255 UVD_STATUS__IDLE, UVD_STATUS__IDLE);
1263 /* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state
1265 * @handle: amdgpu_device pointer
1266 * @state: clock gating state
1268 * Set VCN block clockgating state
1270 static int vcn_v4_0_3_set_clockgating_state(void *handle,
1271 enum amd_clockgating_state state)
1273 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1274 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1277 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1279 if (RREG32_SOC15(VCN, GET_INST(VCN, i),
1280 regUVD_STATUS) != UVD_STATUS__IDLE)
1282 vcn_v4_0_3_enable_clock_gating(adev, i);
1284 vcn_v4_0_3_disable_clock_gating(adev, i);
1291 * vcn_v4_0_3_set_powergating_state - set VCN block powergating state
1293 * @handle: amdgpu_device pointer
1294 * @state: power gating state
1296 * Set VCN block powergating state
1298 static int vcn_v4_0_3_set_powergating_state(void *handle,
1299 enum amd_powergating_state state)
1301 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1304 if (state == adev->vcn.cur_state)
1307 if (state == AMD_PG_STATE_GATE)
1308 ret = vcn_v4_0_3_stop(adev);
1310 ret = vcn_v4_0_3_start(adev);
1313 adev->vcn.cur_state = state;
1319 * vcn_v4_0_3_set_interrupt_state - set VCN block interrupt state
1321 * @adev: amdgpu_device pointer
1322 * @source: interrupt sources
1323 * @type: interrupt types
1324 * @state: interrupt states
1326 * Set VCN block interrupt state
1328 static int vcn_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
1329 struct amdgpu_irq_src *source,
1331 enum amdgpu_interrupt_state state)
1337 * vcn_v4_0_3_process_interrupt - process VCN block interrupt
1339 * @adev: amdgpu_device pointer
1340 * @source: interrupt sources
1341 * @entry: interrupt entry from clients and sources
1343 * Process VCN block interrupt
1345 static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
1346 struct amdgpu_irq_src *source,
1347 struct amdgpu_iv_entry *entry)
1351 i = node_id_to_phys_map[entry->node_id];
1353 DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
1355 for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
1356 if (adev->vcn.inst[inst].aid_id == i)
1359 if (inst >= adev->vcn.num_vcn_inst) {
1360 dev_WARN_ONCE(adev->dev, 1,
1361 "Interrupt received for unknown VCN instance %d",
1366 switch (entry->src_id) {
1367 case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1368 amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
1371 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1372 entry->src_id, entry->src_data[0]);
1379 static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
1380 .set = vcn_v4_0_3_set_interrupt_state,
1381 .process = vcn_v4_0_3_process_interrupt,
1385 * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
1387 * @adev: amdgpu_device pointer
1389 * Set VCN block interrupt irq functions
1391 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
1395 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1396 adev->vcn.inst->irq.num_types++;
1398 adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
1401 static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
1402 .name = "vcn_v4_0_3",
1403 .early_init = vcn_v4_0_3_early_init,
1405 .sw_init = vcn_v4_0_3_sw_init,
1406 .sw_fini = vcn_v4_0_3_sw_fini,
1407 .hw_init = vcn_v4_0_3_hw_init,
1408 .hw_fini = vcn_v4_0_3_hw_fini,
1409 .suspend = vcn_v4_0_3_suspend,
1410 .resume = vcn_v4_0_3_resume,
1411 .is_idle = vcn_v4_0_3_is_idle,
1412 .wait_for_idle = vcn_v4_0_3_wait_for_idle,
1413 .check_soft_reset = NULL,
1414 .pre_soft_reset = NULL,
1416 .post_soft_reset = NULL,
1417 .set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
1418 .set_powergating_state = vcn_v4_0_3_set_powergating_state,
1421 const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
1422 .type = AMD_IP_BLOCK_TYPE_VCN,
1426 .funcs = &vcn_v4_0_3_ip_funcs,