2 * Copyright 2022 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
32 #include "soc15_hw_ip.h"
35 #include "vcn/vcn_4_0_3_offset.h"
36 #include "vcn/vcn_4_0_3_sh_mask.h"
37 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
39 #define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL
40 #define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX
41 #define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA
42 #define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
44 #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
45 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300
47 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
48 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
49 static int vcn_v4_0_3_set_powergating_state(void *handle,
50 enum amd_powergating_state state);
51 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
52 int inst_idx, struct dpg_pause_state *new_state);
53 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
56 * vcn_v4_0_3_early_init - set function pointers
58 * @handle: amdgpu_device pointer
60 * Set ring and irq function pointers
62 static int vcn_v4_0_3_early_init(void *handle)
64 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
66 /* re-use enc ring as unified ring */
67 adev->vcn.num_enc_rings = 1;
69 vcn_v4_0_3_set_unified_ring_funcs(adev);
70 vcn_v4_0_3_set_irq_funcs(adev);
76 * vcn_v4_0_3_sw_init - sw init for VCN block
78 * @handle: amdgpu_device pointer
80 * Load firmware and sw initialization
82 static int vcn_v4_0_3_sw_init(void *handle)
84 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
85 struct amdgpu_ring *ring;
88 r = amdgpu_vcn_sw_init(adev);
92 amdgpu_vcn_setup_ucode(adev);
94 r = amdgpu_vcn_resume(adev);
99 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
100 VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
104 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
105 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
107 if (adev->vcn.harvest_config & (1 << i))
110 ring = &adev->vcn.inst[i].ring_enc[0];
111 ring->use_doorbell = true;
112 ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i;
113 ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
114 sprintf(ring->name, "vcn_unified_%d", i);
115 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
116 AMDGPU_RING_PRIO_DEFAULT,
117 &adev->vcn.inst[i].sched_score);
121 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
122 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
123 fw_shared->sq.is_enabled = cpu_to_le32(true);
125 if (amdgpu_vcnfw_log)
126 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
129 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
130 adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
136 * vcn_v4_0_3_sw_fini - sw fini for VCN block
138 * @handle: amdgpu_device pointer
140 * VCN suspend and free up sw allocation
142 static int vcn_v4_0_3_sw_fini(void *handle)
144 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
147 if (drm_dev_enter(&adev->ddev, &idx)) {
148 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
149 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
151 if (adev->vcn.harvest_config & (1 << i))
153 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
154 fw_shared->present_flag_0 = 0;
155 fw_shared->sq.is_enabled = cpu_to_le32(false);
160 r = amdgpu_vcn_suspend(adev);
164 r = amdgpu_vcn_sw_fini(adev);
170 * vcn_v4_0_3_hw_init - start and test VCN block
172 * @handle: amdgpu_device pointer
174 * Initialize the hardware, boot up the VCPU and do some testing
176 static int vcn_v4_0_3_hw_init(void *handle)
178 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
179 struct amdgpu_ring *ring;
182 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
183 if (adev->vcn.harvest_config & (1 << i))
185 ring = &adev->vcn.inst[i].ring_enc[0];
187 if (ring->use_doorbell) {
188 adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
189 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 9 * i,
190 adev->vcn.inst[i].aid_id);
192 WREG32_SOC15(VCN, ring->me, regVCN_RB1_DB_CTRL,
193 ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
194 VCN_RB1_DB_CTRL__EN_MASK);
197 r = amdgpu_ring_test_helper(ring);
204 DRM_DEV_INFO(adev->dev, "VCN decode initialized successfully(under %s).\n",
205 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
211 * vcn_v4_0_3_hw_fini - stop the hardware block
213 * @handle: amdgpu_device pointer
215 * Stop the VCN block, mark ring as not ready any more
217 static int vcn_v4_0_3_hw_fini(void *handle)
219 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
221 cancel_delayed_work_sync(&adev->vcn.idle_work);
223 if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
224 vcn_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
230 * vcn_v4_0_3_suspend - suspend VCN block
232 * @handle: amdgpu_device pointer
234 * HW fini and suspend VCN block
236 static int vcn_v4_0_3_suspend(void *handle)
238 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
241 r = vcn_v4_0_3_hw_fini(adev);
245 r = amdgpu_vcn_suspend(adev);
251 * vcn_v4_0_3_resume - resume VCN block
253 * @handle: amdgpu_device pointer
255 * Resume firmware and hw init VCN block
257 static int vcn_v4_0_3_resume(void *handle)
259 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
262 r = amdgpu_vcn_resume(adev);
266 r = vcn_v4_0_3_hw_init(adev);
272 * vcn_v4_0_3_mc_resume - memory controller programming
274 * @adev: amdgpu_device pointer
275 * @inst_idx: instance number
277 * Let the VCN memory controller know it's offsets
279 static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
281 uint32_t offset, size;
282 const struct common_firmware_header *hdr;
284 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
285 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
287 /* cache window 0: fw */
288 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
289 WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
290 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo));
291 WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
292 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi));
293 WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0, 0);
296 WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
297 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
298 WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
299 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
301 WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0,
302 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
304 WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0, size);
306 /* cache window 1: stack */
307 WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
308 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
309 WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
310 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
311 WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1, 0);
312 WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
314 /* cache window 2: context */
315 WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
316 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
317 WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
318 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
319 WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2, 0);
320 WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
322 /* non-cache window */
323 WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
324 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
325 WREG32_SOC15(VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
326 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
327 WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0, 0);
328 WREG32_SOC15(VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0,
329 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
333 * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode
335 * @adev: amdgpu_device pointer
336 * @inst_idx: instance number index
337 * @indirect: indirectly write sram
339 * Let the VCN memory controller know it's offsets with dpg mode
341 static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
343 uint32_t offset, size;
344 const struct common_firmware_header *hdr;
346 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
347 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
349 /* cache window 0: fw */
350 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
352 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
353 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
354 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
355 inst_idx].tmr_mc_addr_lo), 0, indirect);
356 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
357 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
358 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
359 inst_idx].tmr_mc_addr_hi), 0, indirect);
360 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
361 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
363 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
364 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
365 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
366 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
367 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
368 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
372 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
373 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
374 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
375 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
376 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
377 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
379 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
380 VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
381 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
385 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
386 VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
388 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
389 VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
391 /* cache window 1: stack */
393 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
394 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
395 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
396 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
397 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
398 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
399 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
400 VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
402 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
403 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
404 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
405 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
406 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
407 VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
409 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
410 VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
412 /* cache window 2: context */
413 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
414 VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
415 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
416 AMDGPU_VCN_STACK_SIZE), 0, indirect);
417 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
418 VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
419 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
420 AMDGPU_VCN_STACK_SIZE), 0, indirect);
421 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
422 VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
423 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
424 VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
426 /* non-cache window */
427 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
428 VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
429 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
430 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
431 VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
432 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
433 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
434 VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
435 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
436 VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
437 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
439 /* VCN global tiling registers */
440 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
441 VCN, 0, regUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
442 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
443 VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
447 * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
449 * @adev: amdgpu_device pointer
450 * @inst_idx: instance number
452 * Disable clock gating for VCN block
454 static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
458 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
461 /* VCN disable CGC */
462 data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL);
463 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
464 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
465 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
466 WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data);
468 data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_GATE);
469 data &= ~(UVD_CGC_GATE__SYS_MASK
470 | UVD_CGC_GATE__MPEG2_MASK
471 | UVD_CGC_GATE__REGS_MASK
472 | UVD_CGC_GATE__RBC_MASK
473 | UVD_CGC_GATE__LMI_MC_MASK
474 | UVD_CGC_GATE__LMI_UMC_MASK
475 | UVD_CGC_GATE__MPC_MASK
476 | UVD_CGC_GATE__LBSI_MASK
477 | UVD_CGC_GATE__LRBBM_MASK
478 | UVD_CGC_GATE__WCB_MASK
479 | UVD_CGC_GATE__VCPU_MASK
480 | UVD_CGC_GATE__MMSCH_MASK);
482 WREG32_SOC15(VCN, inst_idx, regUVD_CGC_GATE, data);
483 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_CGC_GATE, 0, 0xFFFFFFFF);
485 data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL);
486 data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK
487 | UVD_CGC_CTRL__MPEG2_MODE_MASK
488 | UVD_CGC_CTRL__REGS_MODE_MASK
489 | UVD_CGC_CTRL__RBC_MODE_MASK
490 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
491 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
492 | UVD_CGC_CTRL__MPC_MODE_MASK
493 | UVD_CGC_CTRL__LBSI_MODE_MASK
494 | UVD_CGC_CTRL__LRBBM_MODE_MASK
495 | UVD_CGC_CTRL__WCB_MODE_MASK
496 | UVD_CGC_CTRL__VCPU_MODE_MASK
497 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
498 WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data);
500 data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_GATE);
501 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
502 | UVD_SUVD_CGC_GATE__SIT_MASK
503 | UVD_SUVD_CGC_GATE__SMP_MASK
504 | UVD_SUVD_CGC_GATE__SCM_MASK
505 | UVD_SUVD_CGC_GATE__SDB_MASK
506 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
507 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
508 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
509 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
510 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
511 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
512 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
513 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
514 | UVD_SUVD_CGC_GATE__ENT_MASK
515 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
516 | UVD_SUVD_CGC_GATE__SITE_MASK
517 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
518 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
519 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
520 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
521 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
522 WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_GATE, data);
524 data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL);
525 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
526 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
527 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
528 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
529 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
530 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
531 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
532 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
533 WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL, data);
537 * vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
539 * @adev: amdgpu_device pointer
540 * @sram_sel: sram select
541 * @inst_idx: instance number index
542 * @indirect: indirectly write sram
544 * Disable clock gating for VCN block with dpg mode
546 static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
547 int inst_idx, uint8_t indirect)
549 uint32_t reg_data = 0;
551 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
554 /* enable sw clock gating control */
555 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
556 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
557 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
558 reg_data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK |
559 UVD_CGC_CTRL__MPEG2_MODE_MASK |
560 UVD_CGC_CTRL__REGS_MODE_MASK |
561 UVD_CGC_CTRL__RBC_MODE_MASK |
562 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
563 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
564 UVD_CGC_CTRL__IDCT_MODE_MASK |
565 UVD_CGC_CTRL__MPRD_MODE_MASK |
566 UVD_CGC_CTRL__MPC_MODE_MASK |
567 UVD_CGC_CTRL__LBSI_MODE_MASK |
568 UVD_CGC_CTRL__LRBBM_MODE_MASK |
569 UVD_CGC_CTRL__WCB_MODE_MASK |
570 UVD_CGC_CTRL__VCPU_MODE_MASK);
571 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
572 VCN, 0, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
574 /* turn off clock gating */
575 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
576 VCN, 0, regUVD_CGC_GATE), 0, sram_sel, indirect);
578 /* turn on SUVD clock gating */
579 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
580 VCN, 0, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
582 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
583 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
584 VCN, 0, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
588 * vcn_v4_0_enable_clock_gating - enable VCN clock gating
590 * @adev: amdgpu_device pointer
591 * @inst_idx: instance number
593 * Enable clock gating for VCN block
595 static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
599 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
603 data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL);
604 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
605 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
606 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
607 WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data);
609 data = RREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL);
610 data |= (UVD_CGC_CTRL__SYS_MODE_MASK
611 | UVD_CGC_CTRL__MPEG2_MODE_MASK
612 | UVD_CGC_CTRL__REGS_MODE_MASK
613 | UVD_CGC_CTRL__RBC_MODE_MASK
614 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
615 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
616 | UVD_CGC_CTRL__MPC_MODE_MASK
617 | UVD_CGC_CTRL__LBSI_MODE_MASK
618 | UVD_CGC_CTRL__LRBBM_MODE_MASK
619 | UVD_CGC_CTRL__WCB_MODE_MASK
620 | UVD_CGC_CTRL__VCPU_MODE_MASK);
621 WREG32_SOC15(VCN, inst_idx, regUVD_CGC_CTRL, data);
623 data = RREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL);
624 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
625 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
626 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
627 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
628 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
629 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
630 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
631 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
632 WREG32_SOC15(VCN, inst_idx, regUVD_SUVD_CGC_CTRL, data);
636 * vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
638 * @adev: amdgpu_device pointer
639 * @inst_idx: instance number index
640 * @indirect: indirectly write sram
642 * Start VCN block with dpg mode
644 static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
646 volatile struct amdgpu_vcn4_fw_shared *fw_shared =
647 adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
648 struct amdgpu_ring *ring;
651 /* disable register anti-hang mechanism */
652 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
653 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
654 /* enable dynamic power gating mode */
655 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS);
656 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
657 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
658 WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp);
661 adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
662 (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
664 /* enable clock gating */
665 vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
667 /* enable VCPU clock */
668 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
669 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
670 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
672 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
673 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
675 /* disable master interrupt */
676 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
677 VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
679 /* setup regUVD_LMI_CTRL */
680 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
681 UVD_LMI_CTRL__REQ_MODE_MASK |
682 UVD_LMI_CTRL__CRC_RESET_MASK |
683 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
684 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
685 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
686 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
688 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
689 VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
691 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
692 VCN, 0, regUVD_MPC_CNTL),
693 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
695 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
696 VCN, 0, regUVD_MPC_SET_MUXA0),
697 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
698 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
699 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
700 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
702 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
703 VCN, 0, regUVD_MPC_SET_MUXB0),
704 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
705 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
706 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
707 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
709 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
710 VCN, 0, regUVD_MPC_SET_MUX),
711 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
712 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
713 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
715 vcn_v4_0_3_mc_resume_dpg_mode(adev, inst_idx, indirect);
717 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
718 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
719 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
720 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
722 /* enable LMI MC and UMC channels */
723 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
724 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
725 VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
727 /* enable master interrupt */
728 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
729 VCN, 0, regUVD_MASTINT_EN),
730 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
733 psp_update_vcn_sram(adev, inst_idx, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
734 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
735 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
737 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
739 /* program the RB_BASE for ring buffer */
740 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO,
741 lower_32_bits(ring->gpu_addr));
742 WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI,
743 upper_32_bits(ring->gpu_addr));
745 WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t));
747 /* resetting ring, fw should not check RB ring */
748 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
749 tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
750 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
751 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
753 /* Initialize the ring buffer's read and write pointers */
754 WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0);
755 WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0);
756 ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
758 tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE);
759 tmp |= VCN_RB_ENABLE__RB_EN_MASK;
760 WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp);
761 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
763 /*resetting done, fw can check RB ring */
764 fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
770 * vcn_v4_0_3_start - VCN start
772 * @adev: amdgpu_device pointer
776 static int vcn_v4_0_3_start(struct amdgpu_device *adev)
778 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
779 struct amdgpu_ring *ring;
783 if (adev->pm.dpm_enabled)
784 amdgpu_dpm_enable_uvd(adev, true);
786 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
787 if (adev->vcn.harvest_config & (1 << i))
789 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
790 r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
794 /* set VCN status busy */
795 tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY;
796 WREG32_SOC15(VCN, i, regUVD_STATUS, tmp);
799 vcn_v4_0_3_disable_clock_gating(adev, i);
801 /* enable VCPU clock */
802 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
803 UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
805 /* disable master interrupt */
806 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0,
807 ~UVD_MASTINT_EN__VCPU_EN_MASK);
809 /* enable LMI MC and UMC channels */
810 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0,
811 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
813 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
814 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
815 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
816 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
818 /* setup regUVD_LMI_CTRL */
819 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL);
820 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp |
821 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
822 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
823 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
824 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
826 /* setup regUVD_MPC_CNTL */
827 tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL);
828 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
829 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
830 WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp);
832 /* setup UVD_MPC_SET_MUXA0 */
833 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0,
834 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
835 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
836 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
837 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
839 /* setup UVD_MPC_SET_MUXB0 */
840 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0,
841 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
842 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
843 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
844 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
846 /* setup UVD_MPC_SET_MUX */
847 WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX,
848 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
849 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
850 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
852 vcn_v4_0_3_mc_resume(adev, i);
854 /* VCN global tiling registers */
855 WREG32_SOC15(VCN, i, regUVD_GFX8_ADDR_CONFIG,
856 adev->gfx.config.gb_addr_config);
857 WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG,
858 adev->gfx.config.gb_addr_config);
860 /* unblock VCPU register access */
861 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0,
862 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
864 /* release VCPU reset to boot */
865 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
866 ~UVD_VCPU_CNTL__BLK_RST_MASK);
868 for (j = 0; j < 10; ++j) {
871 for (k = 0; k < 100; ++k) {
872 status = RREG32_SOC15(VCN, i, regUVD_STATUS);
881 DRM_DEV_ERROR(adev->dev,
882 "VCN decode not responding, trying to reset the VCPU!!!\n");
883 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
884 UVD_VCPU_CNTL__BLK_RST_MASK,
885 ~UVD_VCPU_CNTL__BLK_RST_MASK);
887 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
888 ~UVD_VCPU_CNTL__BLK_RST_MASK);
895 DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
899 /* enable master interrupt */
900 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN),
901 UVD_MASTINT_EN__VCPU_EN_MASK,
902 ~UVD_MASTINT_EN__VCPU_EN_MASK);
904 /* clear the busy bit of VCN_STATUS */
905 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0,
906 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
908 ring = &adev->vcn.inst[i].ring_enc[0];
909 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
911 /* program the RB_BASE for ring buffer */
912 WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO,
913 lower_32_bits(ring->gpu_addr));
914 WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI,
915 upper_32_bits(ring->gpu_addr));
917 WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t));
919 /* resetting ring, fw should not check RB ring */
920 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
921 tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
922 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
924 /* Initialize the ring buffer's read and write pointers */
925 WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0);
926 WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0);
928 tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE);
929 tmp |= VCN_RB_ENABLE__RB_EN_MASK;
930 WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp);
932 ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR);
933 fw_shared->sq.queue_mode &=
934 cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
941 * vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
943 * @adev: amdgpu_device pointer
944 * @inst_idx: instance number index
946 * Stop VCN block with dpg mode
948 static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
952 /* Wait for power status to be 1 */
953 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
954 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
956 /* wait for read ptr to be equal to write ptr */
957 tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR);
958 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
960 SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1,
961 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
963 /* disable dynamic power gating mode */
964 WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
965 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
970 * vcn_v4_0_3_stop - VCN stop
972 * @adev: amdgpu_device pointer
976 static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
978 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
982 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
983 if (adev->vcn.harvest_config & (1 << i))
986 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
987 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
989 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
990 vcn_v4_0_3_stop_dpg_mode(adev, i);
994 /* wait for vcn idle */
995 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7);
999 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1000 UVD_LMI_STATUS__READ_CLEAN_MASK |
1001 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1002 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1003 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1007 /* stall UMC channel */
1008 tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2);
1009 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1010 WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp);
1011 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1012 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1013 r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp);
1017 /* Unblock VCPU Register access */
1018 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL),
1019 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1020 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1022 /* release VCPU reset to boot */
1023 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL),
1024 UVD_VCPU_CNTL__BLK_RST_MASK,
1025 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1027 /* disable VCPU clock */
1028 WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0,
1029 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1031 /* reset LMI UMC/LMI/VCPU */
1032 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1033 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1034 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1036 tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET);
1037 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1038 WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp);
1040 /* clear VCN status */
1041 WREG32_SOC15(VCN, i, regUVD_STATUS, 0);
1043 /* apply HW clock gating */
1044 vcn_v4_0_3_enable_clock_gating(adev, i);
1047 if (adev->pm.dpm_enabled)
1048 amdgpu_dpm_enable_uvd(adev, false);
1054 * vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode
1056 * @adev: amdgpu_device pointer
1057 * @inst_idx: instance number index
1058 * @new_state: pause state
1060 * Pause dpg mode for VCN block
1062 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
1063 struct dpg_pause_state *new_state)
1070 * vcn_v4_0_3_unified_ring_get_rptr - get unified read pointer
1072 * @ring: amdgpu_ring pointer
1074 * Returns the current hardware unified read pointer
1076 static uint64_t vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring *ring)
1078 struct amdgpu_device *adev = ring->adev;
1080 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1081 DRM_ERROR("wrong ring id is identified in %s", __func__);
1083 return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR);
1087 * vcn_v4_0_3_unified_ring_get_wptr - get unified write pointer
1089 * @ring: amdgpu_ring pointer
1091 * Returns the current hardware unified write pointer
1093 static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
1095 struct amdgpu_device *adev = ring->adev;
1097 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1098 DRM_ERROR("wrong ring id is identified in %s", __func__);
1100 if (ring->use_doorbell)
1101 return *ring->wptr_cpu_addr;
1103 return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR);
1107 * vcn_v4_0_3_unified_ring_set_wptr - set enc write pointer
1109 * @ring: amdgpu_ring pointer
1111 * Commits the enc write pointer to the hardware
1113 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
1115 struct amdgpu_device *adev = ring->adev;
1117 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1118 DRM_ERROR("wrong ring id is identified in %s", __func__);
1120 if (ring->use_doorbell) {
1121 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1122 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1124 WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr));
1128 static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
1129 .type = AMDGPU_RING_TYPE_VCN_ENC,
1131 .nop = VCN_ENC_CMD_NO_OP,
1132 .get_rptr = vcn_v4_0_3_unified_ring_get_rptr,
1133 .get_wptr = vcn_v4_0_3_unified_ring_get_wptr,
1134 .set_wptr = vcn_v4_0_3_unified_ring_set_wptr,
1136 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1137 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1138 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1139 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1140 1, /* vcn_v2_0_enc_ring_insert_end */
1141 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1142 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1143 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1144 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1145 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1146 .test_ib = amdgpu_vcn_unified_ring_test_ib,
1147 .insert_nop = amdgpu_ring_insert_nop,
1148 .insert_end = vcn_v2_0_enc_ring_insert_end,
1149 .pad_ib = amdgpu_ring_generic_pad_ib,
1150 .begin_use = amdgpu_vcn_ring_begin_use,
1151 .end_use = amdgpu_vcn_ring_end_use,
1152 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1153 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1154 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1158 * vcn_v4_0_3_set_unified_ring_funcs - set unified ring functions
1160 * @adev: amdgpu_device pointer
1162 * Set unified ring functions
1164 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
1168 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1169 if (adev->vcn.harvest_config & (1 << i))
1171 adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_3_unified_ring_vm_funcs;
1172 adev->vcn.inst[i].ring_enc[0].me = i;
1173 adev->vcn.inst[i].aid_id = i / adev->vcn.num_inst_per_aid;
1175 DRM_DEV_INFO(adev->dev, "VCN decode is enabled in VM mode\n");
1179 * vcn_v4_0_3_is_idle - check VCN block is idle
1181 * @handle: amdgpu_device pointer
1183 * Check whether VCN block is idle
1185 static bool vcn_v4_0_3_is_idle(void *handle)
1187 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1190 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1191 if (adev->vcn.harvest_config & (1 << i))
1193 ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE);
1200 * vcn_v4_0_3_wait_for_idle - wait for VCN block idle
1202 * @handle: amdgpu_device pointer
1204 * Wait for VCN block idle
1206 static int vcn_v4_0_3_wait_for_idle(void *handle)
1208 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1211 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1212 if (adev->vcn.harvest_config & (1 << i))
1214 ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE,
1223 /* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state
1225 * @handle: amdgpu_device pointer
1226 * @state: clock gating state
1228 * Set VCN block clockgating state
1230 static int vcn_v4_0_3_set_clockgating_state(void *handle,
1231 enum amd_clockgating_state state)
1233 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1234 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1237 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1238 if (adev->vcn.harvest_config & (1 << i))
1241 if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE)
1243 vcn_v4_0_3_enable_clock_gating(adev, i);
1245 vcn_v4_0_3_disable_clock_gating(adev, i);
1252 * vcn_v4_0_3_set_powergating_state - set VCN block powergating state
1254 * @handle: amdgpu_device pointer
1255 * @state: power gating state
1257 * Set VCN block powergating state
1259 static int vcn_v4_0_3_set_powergating_state(void *handle,
1260 enum amd_powergating_state state)
1262 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1265 if (state == adev->vcn.cur_state)
1268 if (state == AMD_PG_STATE_GATE)
1269 ret = vcn_v4_0_3_stop(adev);
1271 ret = vcn_v4_0_3_start(adev);
1274 adev->vcn.cur_state = state;
1280 * vcn_v4_0_3_set_interrupt_state - set VCN block interrupt state
1282 * @adev: amdgpu_device pointer
1283 * @source: interrupt sources
1284 * @type: interrupt types
1285 * @state: interrupt states
1287 * Set VCN block interrupt state
1289 static int vcn_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
1290 struct amdgpu_irq_src *source,
1292 enum amdgpu_interrupt_state state)
1298 * vcn_v4_0_3_process_interrupt - process VCN block interrupt
1300 * @adev: amdgpu_device pointer
1301 * @source: interrupt sources
1302 * @entry: interrupt entry from clients and sources
1304 * Process VCN block interrupt
1306 static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
1307 struct amdgpu_irq_src *source,
1308 struct amdgpu_iv_entry *entry)
1312 i = node_id_to_phys_map[entry->node_id];
1314 DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
1316 switch (entry->src_id) {
1317 case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1318 amdgpu_fence_process(&adev->vcn.inst[i].ring_enc[0]);
1321 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1322 entry->src_id, entry->src_data[0]);
1329 static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
1330 .set = vcn_v4_0_3_set_interrupt_state,
1331 .process = vcn_v4_0_3_process_interrupt,
1335 * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
1337 * @adev: amdgpu_device pointer
1339 * Set VCN block interrupt irq functions
1341 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
1345 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1346 if (adev->vcn.harvest_config & (1 << i))
1349 adev->vcn.inst->irq.num_types++;
1351 adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
1354 static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
1355 .name = "vcn_v4_0_3",
1356 .early_init = vcn_v4_0_3_early_init,
1358 .sw_init = vcn_v4_0_3_sw_init,
1359 .sw_fini = vcn_v4_0_3_sw_fini,
1360 .hw_init = vcn_v4_0_3_hw_init,
1361 .hw_fini = vcn_v4_0_3_hw_fini,
1362 .suspend = vcn_v4_0_3_suspend,
1363 .resume = vcn_v4_0_3_resume,
1364 .is_idle = vcn_v4_0_3_is_idle,
1365 .wait_for_idle = vcn_v4_0_3_wait_for_idle,
1366 .check_soft_reset = NULL,
1367 .pre_soft_reset = NULL,
1369 .post_soft_reset = NULL,
1370 .set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
1371 .set_powergating_state = vcn_v4_0_3_set_powergating_state,
1374 const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
1375 .type = AMD_IP_BLOCK_TYPE_VCN,
1379 .funcs = &vcn_v4_0_3_ip_funcs,