2 * Copyright 2022 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
32 #include "soc15_hw_ip.h"
35 #include "vcn/vcn_4_0_3_offset.h"
36 #include "vcn/vcn_4_0_3_sh_mask.h"
37 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
39 #define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL
40 #define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX
41 #define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA
42 #define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
44 #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
45 #define VCN1_VID_SOC_ADDRESS_3_0 0x48300
47 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
48 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
49 static int vcn_v4_0_3_set_powergating_state(void *handle,
50 enum amd_powergating_state state);
51 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
52 int inst_idx, struct dpg_pause_state *new_state);
53 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
56 * vcn_v4_0_3_early_init - set function pointers
58 * @handle: amdgpu_device pointer
60 * Set ring and irq function pointers
62 static int vcn_v4_0_3_early_init(void *handle)
64 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
66 /* re-use enc ring as unified ring */
67 adev->vcn.num_enc_rings = 1;
69 vcn_v4_0_3_set_unified_ring_funcs(adev);
70 vcn_v4_0_3_set_irq_funcs(adev);
72 return amdgpu_vcn_early_init(adev);
76 * vcn_v4_0_3_sw_init - sw init for VCN block
78 * @handle: amdgpu_device pointer
80 * Load firmware and sw initialization
82 static int vcn_v4_0_3_sw_init(void *handle)
84 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
85 struct amdgpu_ring *ring;
88 r = amdgpu_vcn_sw_init(adev);
92 amdgpu_vcn_setup_ucode(adev);
94 r = amdgpu_vcn_resume(adev);
99 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
100 VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
104 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
105 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
107 vcn_inst = GET_INST(VCN, i);
109 ring = &adev->vcn.inst[i].ring_enc[0];
110 ring->use_doorbell = true;
111 ring->doorbell_index =
112 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
114 ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
115 sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
116 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
117 AMDGPU_RING_PRIO_DEFAULT,
118 &adev->vcn.inst[i].sched_score);
122 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
123 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
124 fw_shared->sq.is_enabled = cpu_to_le32(true);
126 if (amdgpu_vcnfw_log)
127 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
130 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
131 adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
137 * vcn_v4_0_3_sw_fini - sw fini for VCN block
139 * @handle: amdgpu_device pointer
141 * VCN suspend and free up sw allocation
143 static int vcn_v4_0_3_sw_fini(void *handle)
145 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
148 if (drm_dev_enter(&adev->ddev, &idx)) {
149 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
150 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
152 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
153 fw_shared->present_flag_0 = 0;
154 fw_shared->sq.is_enabled = cpu_to_le32(false);
159 r = amdgpu_vcn_suspend(adev);
163 r = amdgpu_vcn_sw_fini(adev);
169 * vcn_v4_0_3_hw_init - start and test VCN block
171 * @handle: amdgpu_device pointer
173 * Initialize the hardware, boot up the VCPU and do some testing
175 static int vcn_v4_0_3_hw_init(void *handle)
177 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
178 struct amdgpu_ring *ring;
181 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
182 vcn_inst = GET_INST(VCN, i);
183 ring = &adev->vcn.inst[i].ring_enc[0];
185 if (ring->use_doorbell) {
186 adev->nbio.funcs->vcn_doorbell_range(
187 adev, ring->use_doorbell,
188 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
190 adev->vcn.inst[i].aid_id);
193 VCN, GET_INST(VCN, ring->me),
196 << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
197 VCN_RB1_DB_CTRL__EN_MASK);
199 /* Read DB_CTRL to flush the write DB_CTRL command. */
201 VCN, GET_INST(VCN, ring->me),
205 r = amdgpu_ring_test_helper(ring);
212 DRM_DEV_INFO(adev->dev, "VCN decode initialized successfully(under %s).\n",
213 (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
219 * vcn_v4_0_3_hw_fini - stop the hardware block
221 * @handle: amdgpu_device pointer
223 * Stop the VCN block, mark ring as not ready any more
225 static int vcn_v4_0_3_hw_fini(void *handle)
227 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
229 cancel_delayed_work_sync(&adev->vcn.idle_work);
231 if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
232 vcn_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
238 * vcn_v4_0_3_suspend - suspend VCN block
240 * @handle: amdgpu_device pointer
242 * HW fini and suspend VCN block
244 static int vcn_v4_0_3_suspend(void *handle)
246 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
249 r = vcn_v4_0_3_hw_fini(adev);
253 r = amdgpu_vcn_suspend(adev);
259 * vcn_v4_0_3_resume - resume VCN block
261 * @handle: amdgpu_device pointer
263 * Resume firmware and hw init VCN block
265 static int vcn_v4_0_3_resume(void *handle)
267 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
270 r = amdgpu_vcn_resume(adev);
274 r = vcn_v4_0_3_hw_init(adev);
280 * vcn_v4_0_3_mc_resume - memory controller programming
282 * @adev: amdgpu_device pointer
283 * @inst_idx: instance number
285 * Let the VCN memory controller know it's offsets
287 static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
289 uint32_t offset, size, vcn_inst;
290 const struct common_firmware_header *hdr;
292 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
293 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
295 vcn_inst = GET_INST(VCN, inst_idx);
296 /* cache window 0: fw */
297 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
299 VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
300 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
303 VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
304 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
306 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
309 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
310 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
311 WREG32_SOC15(VCN, vcn_inst,
312 regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
313 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
315 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
316 AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
318 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
320 /* cache window 1: stack */
321 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
322 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
323 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
324 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
325 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
326 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1,
327 AMDGPU_VCN_STACK_SIZE);
329 /* cache window 2: context */
330 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
331 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
332 AMDGPU_VCN_STACK_SIZE));
333 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
334 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
335 AMDGPU_VCN_STACK_SIZE));
336 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
337 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2,
338 AMDGPU_VCN_CONTEXT_SIZE);
340 /* non-cache window */
342 VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
343 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
345 VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
346 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
347 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
349 VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
350 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
354 * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode
356 * @adev: amdgpu_device pointer
357 * @inst_idx: instance number index
358 * @indirect: indirectly write sram
360 * Let the VCN memory controller know it's offsets with dpg mode
362 static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
364 uint32_t offset, size;
365 const struct common_firmware_header *hdr;
367 hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
368 size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
370 /* cache window 0: fw */
371 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
373 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
374 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
375 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
376 inst_idx].tmr_mc_addr_lo), 0, indirect);
377 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
378 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
379 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
380 inst_idx].tmr_mc_addr_hi), 0, indirect);
381 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
382 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
384 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
385 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
386 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
387 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
388 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
389 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
393 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
394 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
395 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
396 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
397 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
398 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
400 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
401 VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
402 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
406 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
407 VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
409 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
410 VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
412 /* cache window 1: stack */
414 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
415 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
416 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
417 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
418 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
419 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
420 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
421 VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
423 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
424 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
425 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
426 VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
427 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
428 VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
430 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
431 VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
433 /* cache window 2: context */
434 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
435 VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
436 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
437 AMDGPU_VCN_STACK_SIZE), 0, indirect);
438 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
439 VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
440 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
441 AMDGPU_VCN_STACK_SIZE), 0, indirect);
442 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
443 VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
444 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
445 VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
447 /* non-cache window */
448 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
449 VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
450 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
451 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
452 VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
453 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
454 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
455 VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
456 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
457 VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
458 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
460 /* VCN global tiling registers */
461 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
462 VCN, 0, regUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
463 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
464 VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
468 * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
470 * @adev: amdgpu_device pointer
471 * @inst_idx: instance number
473 * Disable clock gating for VCN block
475 static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
480 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
483 vcn_inst = GET_INST(VCN, inst_idx);
485 /* VCN disable CGC */
486 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
487 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
488 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
489 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
490 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
492 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE);
493 data &= ~(UVD_CGC_GATE__SYS_MASK
494 | UVD_CGC_GATE__MPEG2_MASK
495 | UVD_CGC_GATE__REGS_MASK
496 | UVD_CGC_GATE__RBC_MASK
497 | UVD_CGC_GATE__LMI_MC_MASK
498 | UVD_CGC_GATE__LMI_UMC_MASK
499 | UVD_CGC_GATE__MPC_MASK
500 | UVD_CGC_GATE__LBSI_MASK
501 | UVD_CGC_GATE__LRBBM_MASK
502 | UVD_CGC_GATE__WCB_MASK
503 | UVD_CGC_GATE__VCPU_MASK
504 | UVD_CGC_GATE__MMSCH_MASK);
506 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE, data);
507 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF);
509 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
510 data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK
511 | UVD_CGC_CTRL__MPEG2_MODE_MASK
512 | UVD_CGC_CTRL__REGS_MODE_MASK
513 | UVD_CGC_CTRL__RBC_MODE_MASK
514 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
515 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
516 | UVD_CGC_CTRL__MPC_MODE_MASK
517 | UVD_CGC_CTRL__LBSI_MODE_MASK
518 | UVD_CGC_CTRL__LRBBM_MODE_MASK
519 | UVD_CGC_CTRL__WCB_MODE_MASK
520 | UVD_CGC_CTRL__VCPU_MODE_MASK
521 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
522 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
524 data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE);
525 data |= (UVD_SUVD_CGC_GATE__SRE_MASK
526 | UVD_SUVD_CGC_GATE__SIT_MASK
527 | UVD_SUVD_CGC_GATE__SMP_MASK
528 | UVD_SUVD_CGC_GATE__SCM_MASK
529 | UVD_SUVD_CGC_GATE__SDB_MASK
530 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
531 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
532 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
533 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
534 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
535 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
536 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
537 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
538 | UVD_SUVD_CGC_GATE__ENT_MASK
539 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
540 | UVD_SUVD_CGC_GATE__SITE_MASK
541 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
542 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
543 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
544 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
545 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
546 WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE, data);
548 data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
549 data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
550 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
551 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
552 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
553 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
554 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
555 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
556 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
557 WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
561 * vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
563 * @adev: amdgpu_device pointer
564 * @sram_sel: sram select
565 * @inst_idx: instance number index
566 * @indirect: indirectly write sram
568 * Disable clock gating for VCN block with dpg mode
570 static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
571 int inst_idx, uint8_t indirect)
573 uint32_t reg_data = 0;
575 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
578 /* enable sw clock gating control */
579 reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
580 reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
581 reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
582 reg_data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK |
583 UVD_CGC_CTRL__MPEG2_MODE_MASK |
584 UVD_CGC_CTRL__REGS_MODE_MASK |
585 UVD_CGC_CTRL__RBC_MODE_MASK |
586 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
587 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
588 UVD_CGC_CTRL__IDCT_MODE_MASK |
589 UVD_CGC_CTRL__MPRD_MODE_MASK |
590 UVD_CGC_CTRL__MPC_MODE_MASK |
591 UVD_CGC_CTRL__LBSI_MODE_MASK |
592 UVD_CGC_CTRL__LRBBM_MODE_MASK |
593 UVD_CGC_CTRL__WCB_MODE_MASK |
594 UVD_CGC_CTRL__VCPU_MODE_MASK);
595 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
596 VCN, 0, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
598 /* turn off clock gating */
599 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
600 VCN, 0, regUVD_CGC_GATE), 0, sram_sel, indirect);
602 /* turn on SUVD clock gating */
603 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
604 VCN, 0, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
606 /* turn on sw mode in UVD_SUVD_CGC_CTRL */
607 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
608 VCN, 0, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
612 * vcn_v4_0_enable_clock_gating - enable VCN clock gating
614 * @adev: amdgpu_device pointer
615 * @inst_idx: instance number
617 * Enable clock gating for VCN block
619 static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
624 if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
627 vcn_inst = GET_INST(VCN, inst_idx);
630 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
631 data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
632 data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
633 data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
634 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
636 data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
637 data |= (UVD_CGC_CTRL__SYS_MODE_MASK
638 | UVD_CGC_CTRL__MPEG2_MODE_MASK
639 | UVD_CGC_CTRL__REGS_MODE_MASK
640 | UVD_CGC_CTRL__RBC_MODE_MASK
641 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
642 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
643 | UVD_CGC_CTRL__MPC_MODE_MASK
644 | UVD_CGC_CTRL__LBSI_MODE_MASK
645 | UVD_CGC_CTRL__LRBBM_MODE_MASK
646 | UVD_CGC_CTRL__WCB_MODE_MASK
647 | UVD_CGC_CTRL__VCPU_MODE_MASK);
648 WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
650 data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
651 data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
652 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
653 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
654 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
655 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
656 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
657 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
658 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
659 WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
663 * vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
665 * @adev: amdgpu_device pointer
666 * @inst_idx: instance number index
667 * @indirect: indirectly write sram
669 * Start VCN block with dpg mode
671 static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
673 volatile struct amdgpu_vcn4_fw_shared *fw_shared =
674 adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
675 struct amdgpu_ring *ring;
679 vcn_inst = GET_INST(VCN, inst_idx);
680 /* disable register anti-hang mechanism */
681 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
682 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
683 /* enable dynamic power gating mode */
684 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
685 tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
686 tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
687 WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
690 DRM_DEV_DEBUG(adev->dev, "VCN %d start: on AID %d",
691 inst_idx, adev->vcn.inst[inst_idx].aid_id);
692 adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
693 (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
694 /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
695 WREG32_SOC15_DPG_MODE(inst_idx, 0xDEADBEEF,
696 adev->vcn.inst[inst_idx].aid_id, 0, true);
699 /* enable clock gating */
700 vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
702 /* enable VCPU clock */
703 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
704 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
705 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
707 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
708 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
710 /* disable master interrupt */
711 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
712 VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
714 /* setup regUVD_LMI_CTRL */
715 tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
716 UVD_LMI_CTRL__REQ_MODE_MASK |
717 UVD_LMI_CTRL__CRC_RESET_MASK |
718 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
719 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
720 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
721 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
723 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
724 VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
726 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
727 VCN, 0, regUVD_MPC_CNTL),
728 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
730 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
731 VCN, 0, regUVD_MPC_SET_MUXA0),
732 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
733 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
734 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
735 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
737 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
738 VCN, 0, regUVD_MPC_SET_MUXB0),
739 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
740 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
741 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
742 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
744 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
745 VCN, 0, regUVD_MPC_SET_MUX),
746 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
747 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
748 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
750 vcn_v4_0_3_mc_resume_dpg_mode(adev, inst_idx, indirect);
752 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
753 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
754 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
755 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
757 /* enable LMI MC and UMC channels */
758 tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
759 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
760 VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
762 /* enable master interrupt */
763 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
764 VCN, 0, regUVD_MASTINT_EN),
765 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
768 psp_update_vcn_sram(adev, 0, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
769 (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
770 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
772 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
774 /* program the RB_BASE for ring buffer */
775 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
776 lower_32_bits(ring->gpu_addr));
777 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
778 upper_32_bits(ring->gpu_addr));
780 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
781 ring->ring_size / sizeof(uint32_t));
783 /* resetting ring, fw should not check RB ring */
784 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
785 tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
786 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
787 fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
789 /* Initialize the ring buffer's read and write pointers */
790 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
791 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
792 ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
794 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
795 tmp |= VCN_RB_ENABLE__RB_EN_MASK;
796 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
797 fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
799 /*resetting done, fw can check RB ring */
800 fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
806 * vcn_v4_0_3_start - VCN start
808 * @adev: amdgpu_device pointer
812 static int vcn_v4_0_3_start(struct amdgpu_device *adev)
814 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
815 struct amdgpu_ring *ring;
816 int i, j, k, r, vcn_inst;
819 if (adev->pm.dpm_enabled)
820 amdgpu_dpm_enable_uvd(adev, true);
822 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
823 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
824 r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
828 vcn_inst = GET_INST(VCN, i);
829 /* set VCN status busy */
830 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
831 UVD_STATUS__UVD_BUSY;
832 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
835 vcn_v4_0_3_disable_clock_gating(adev, i);
837 /* enable VCPU clock */
838 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
839 UVD_VCPU_CNTL__CLK_EN_MASK,
840 ~UVD_VCPU_CNTL__CLK_EN_MASK);
842 /* disable master interrupt */
843 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
844 ~UVD_MASTINT_EN__VCPU_EN_MASK);
846 /* enable LMI MC and UMC channels */
847 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
848 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
850 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
851 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
852 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
853 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
855 /* setup regUVD_LMI_CTRL */
856 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
857 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
858 tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
859 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
860 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
861 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
863 /* setup regUVD_MPC_CNTL */
864 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
865 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
866 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
867 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
869 /* setup UVD_MPC_SET_MUXA0 */
870 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
871 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
872 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
873 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
874 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
876 /* setup UVD_MPC_SET_MUXB0 */
877 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
878 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
879 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
880 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
881 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
883 /* setup UVD_MPC_SET_MUX */
884 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
885 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
886 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
887 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
889 vcn_v4_0_3_mc_resume(adev, i);
891 /* VCN global tiling registers */
892 WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
893 adev->gfx.config.gb_addr_config);
894 WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
895 adev->gfx.config.gb_addr_config);
897 /* unblock VCPU register access */
898 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
899 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
901 /* release VCPU reset to boot */
902 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
903 ~UVD_VCPU_CNTL__BLK_RST_MASK);
905 for (j = 0; j < 10; ++j) {
908 for (k = 0; k < 100; ++k) {
909 status = RREG32_SOC15(VCN, vcn_inst,
919 DRM_DEV_ERROR(adev->dev,
920 "VCN decode not responding, trying to reset the VCPU!!!\n");
921 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
923 UVD_VCPU_CNTL__BLK_RST_MASK,
924 ~UVD_VCPU_CNTL__BLK_RST_MASK);
926 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
928 0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
935 DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
939 /* enable master interrupt */
940 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
941 UVD_MASTINT_EN__VCPU_EN_MASK,
942 ~UVD_MASTINT_EN__VCPU_EN_MASK);
944 /* clear the busy bit of VCN_STATUS */
945 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
946 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
948 ring = &adev->vcn.inst[i].ring_enc[0];
949 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
951 /* program the RB_BASE for ring buffer */
952 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
953 lower_32_bits(ring->gpu_addr));
954 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
955 upper_32_bits(ring->gpu_addr));
957 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
958 ring->ring_size / sizeof(uint32_t));
960 /* resetting ring, fw should not check RB ring */
961 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
962 tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
963 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
965 /* Initialize the ring buffer's read and write pointers */
966 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
967 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
969 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
970 tmp |= VCN_RB_ENABLE__RB_EN_MASK;
971 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
973 ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
974 fw_shared->sq.queue_mode &=
975 cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
982 * vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
984 * @adev: amdgpu_device pointer
985 * @inst_idx: instance number index
987 * Stop VCN block with dpg mode
989 static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
994 vcn_inst = GET_INST(VCN, inst_idx);
996 /* Wait for power status to be 1 */
997 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
998 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1000 /* wait for read ptr to be equal to write ptr */
1001 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
1002 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1004 SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
1005 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1007 /* disable dynamic power gating mode */
1008 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
1009 ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1014 * vcn_v4_0_3_stop - VCN stop
1016 * @adev: amdgpu_device pointer
1020 static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
1022 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1023 int i, r = 0, vcn_inst;
1026 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1027 vcn_inst = GET_INST(VCN, i);
1029 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1030 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1032 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1033 vcn_v4_0_3_stop_dpg_mode(adev, i);
1037 /* wait for vcn idle */
1038 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
1039 UVD_STATUS__IDLE, 0x7);
1043 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1044 UVD_LMI_STATUS__READ_CLEAN_MASK |
1045 UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1046 UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1047 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1052 /* stall UMC channel */
1053 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
1054 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1055 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
1056 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1057 UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1058 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1063 /* Unblock VCPU Register access */
1064 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
1065 UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1066 ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1068 /* release VCPU reset to boot */
1069 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
1070 UVD_VCPU_CNTL__BLK_RST_MASK,
1071 ~UVD_VCPU_CNTL__BLK_RST_MASK);
1073 /* disable VCPU clock */
1074 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
1075 ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1077 /* reset LMI UMC/LMI/VCPU */
1078 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1079 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1080 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1082 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1083 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1084 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1086 /* clear VCN status */
1087 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
1089 /* apply HW clock gating */
1090 vcn_v4_0_3_enable_clock_gating(adev, i);
1093 if (adev->pm.dpm_enabled)
1094 amdgpu_dpm_enable_uvd(adev, false);
1100 * vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode
1102 * @adev: amdgpu_device pointer
1103 * @inst_idx: instance number index
1104 * @new_state: pause state
1106 * Pause dpg mode for VCN block
1108 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
1109 struct dpg_pause_state *new_state)
1116 * vcn_v4_0_3_unified_ring_get_rptr - get unified read pointer
1118 * @ring: amdgpu_ring pointer
1120 * Returns the current hardware unified read pointer
1122 static uint64_t vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring *ring)
1124 struct amdgpu_device *adev = ring->adev;
1126 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1127 DRM_ERROR("wrong ring id is identified in %s", __func__);
1129 return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
1133 * vcn_v4_0_3_unified_ring_get_wptr - get unified write pointer
1135 * @ring: amdgpu_ring pointer
1137 * Returns the current hardware unified write pointer
1139 static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
1141 struct amdgpu_device *adev = ring->adev;
1143 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1144 DRM_ERROR("wrong ring id is identified in %s", __func__);
1146 if (ring->use_doorbell)
1147 return *ring->wptr_cpu_addr;
1149 return RREG32_SOC15(VCN, GET_INST(VCN, ring->me),
1154 * vcn_v4_0_3_unified_ring_set_wptr - set enc write pointer
1156 * @ring: amdgpu_ring pointer
1158 * Commits the enc write pointer to the hardware
1160 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
1162 struct amdgpu_device *adev = ring->adev;
1164 if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1165 DRM_ERROR("wrong ring id is identified in %s", __func__);
1167 if (ring->use_doorbell) {
1168 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1169 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1171 WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
1172 lower_32_bits(ring->wptr));
1176 static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
1177 .type = AMDGPU_RING_TYPE_VCN_ENC,
1179 .nop = VCN_ENC_CMD_NO_OP,
1180 .get_rptr = vcn_v4_0_3_unified_ring_get_rptr,
1181 .get_wptr = vcn_v4_0_3_unified_ring_get_wptr,
1182 .set_wptr = vcn_v4_0_3_unified_ring_set_wptr,
1184 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1185 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1186 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1187 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1188 1, /* vcn_v2_0_enc_ring_insert_end */
1189 .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1190 .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1191 .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1192 .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1193 .test_ring = amdgpu_vcn_enc_ring_test_ring,
1194 .test_ib = amdgpu_vcn_unified_ring_test_ib,
1195 .insert_nop = amdgpu_ring_insert_nop,
1196 .insert_end = vcn_v2_0_enc_ring_insert_end,
1197 .pad_ib = amdgpu_ring_generic_pad_ib,
1198 .begin_use = amdgpu_vcn_ring_begin_use,
1199 .end_use = amdgpu_vcn_ring_end_use,
1200 .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1201 .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1202 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1206 * vcn_v4_0_3_set_unified_ring_funcs - set unified ring functions
1208 * @adev: amdgpu_device pointer
1210 * Set unified ring functions
1212 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
1216 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1217 adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_3_unified_ring_vm_funcs;
1218 adev->vcn.inst[i].ring_enc[0].me = i;
1219 vcn_inst = GET_INST(VCN, i);
1220 adev->vcn.inst[i].aid_id =
1221 vcn_inst / adev->vcn.num_inst_per_aid;
1223 DRM_DEV_INFO(adev->dev, "VCN decode is enabled in VM mode\n");
1227 * vcn_v4_0_3_is_idle - check VCN block is idle
1229 * @handle: amdgpu_device pointer
1231 * Check whether VCN block is idle
1233 static bool vcn_v4_0_3_is_idle(void *handle)
1235 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1238 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1239 ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) ==
1247 * vcn_v4_0_3_wait_for_idle - wait for VCN block idle
1249 * @handle: amdgpu_device pointer
1251 * Wait for VCN block idle
1253 static int vcn_v4_0_3_wait_for_idle(void *handle)
1255 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1258 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1259 ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS,
1260 UVD_STATUS__IDLE, UVD_STATUS__IDLE);
1268 /* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state
1270 * @handle: amdgpu_device pointer
1271 * @state: clock gating state
1273 * Set VCN block clockgating state
1275 static int vcn_v4_0_3_set_clockgating_state(void *handle,
1276 enum amd_clockgating_state state)
1278 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1279 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1282 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1284 if (RREG32_SOC15(VCN, GET_INST(VCN, i),
1285 regUVD_STATUS) != UVD_STATUS__IDLE)
1287 vcn_v4_0_3_enable_clock_gating(adev, i);
1289 vcn_v4_0_3_disable_clock_gating(adev, i);
1296 * vcn_v4_0_3_set_powergating_state - set VCN block powergating state
1298 * @handle: amdgpu_device pointer
1299 * @state: power gating state
1301 * Set VCN block powergating state
1303 static int vcn_v4_0_3_set_powergating_state(void *handle,
1304 enum amd_powergating_state state)
1306 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1309 if (state == adev->vcn.cur_state)
1312 if (state == AMD_PG_STATE_GATE)
1313 ret = vcn_v4_0_3_stop(adev);
1315 ret = vcn_v4_0_3_start(adev);
1318 adev->vcn.cur_state = state;
1324 * vcn_v4_0_3_set_interrupt_state - set VCN block interrupt state
1326 * @adev: amdgpu_device pointer
1327 * @source: interrupt sources
1328 * @type: interrupt types
1329 * @state: interrupt states
1331 * Set VCN block interrupt state
1333 static int vcn_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
1334 struct amdgpu_irq_src *source,
1336 enum amdgpu_interrupt_state state)
1342 * vcn_v4_0_3_process_interrupt - process VCN block interrupt
1344 * @adev: amdgpu_device pointer
1345 * @source: interrupt sources
1346 * @entry: interrupt entry from clients and sources
1348 * Process VCN block interrupt
1350 static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
1351 struct amdgpu_irq_src *source,
1352 struct amdgpu_iv_entry *entry)
1356 i = node_id_to_phys_map[entry->node_id];
1358 DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
1360 for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
1361 if (adev->vcn.inst[inst].aid_id == i)
1364 if (inst >= adev->vcn.num_vcn_inst) {
1365 dev_WARN_ONCE(adev->dev, 1,
1366 "Interrupt received for unknown VCN instance %d",
1371 switch (entry->src_id) {
1372 case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1373 amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
1376 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1377 entry->src_id, entry->src_data[0]);
1384 static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
1385 .set = vcn_v4_0_3_set_interrupt_state,
1386 .process = vcn_v4_0_3_process_interrupt,
1390 * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
1392 * @adev: amdgpu_device pointer
1394 * Set VCN block interrupt irq functions
1396 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
1400 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1401 adev->vcn.inst->irq.num_types++;
1403 adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
1406 static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
1407 .name = "vcn_v4_0_3",
1408 .early_init = vcn_v4_0_3_early_init,
1410 .sw_init = vcn_v4_0_3_sw_init,
1411 .sw_fini = vcn_v4_0_3_sw_fini,
1412 .hw_init = vcn_v4_0_3_hw_init,
1413 .hw_fini = vcn_v4_0_3_hw_fini,
1414 .suspend = vcn_v4_0_3_suspend,
1415 .resume = vcn_v4_0_3_resume,
1416 .is_idle = vcn_v4_0_3_is_idle,
1417 .wait_for_idle = vcn_v4_0_3_wait_for_idle,
1418 .check_soft_reset = NULL,
1419 .pre_soft_reset = NULL,
1421 .post_soft_reset = NULL,
1422 .set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
1423 .set_powergating_state = vcn_v4_0_3_set_powergating_state,
1426 const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
1427 .type = AMD_IP_BLOCK_TYPE_VCN,
1431 .funcs = &vcn_v4_0_3_ip_funcs,