b0e28d611f2d1137d23e07f00302f4704210542b
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / amdgpu / vcn_v4_0_3.c
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
26
27 #include "amdgpu.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "soc15_hw_ip.h"
33 #include "vcn_v2_0.h"
34
35 #include "vcn/vcn_4_0_3_offset.h"
36 #include "vcn/vcn_4_0_3_sh_mask.h"
37 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
38
39 #define mmUVD_DPG_LMA_CTL               regUVD_DPG_LMA_CTL
40 #define mmUVD_DPG_LMA_CTL_BASE_IDX      regUVD_DPG_LMA_CTL_BASE_IDX
41 #define mmUVD_DPG_LMA_DATA              regUVD_DPG_LMA_DATA
42 #define mmUVD_DPG_LMA_DATA_BASE_IDX     regUVD_DPG_LMA_DATA_BASE_IDX
43
44 #define VCN_VID_SOC_ADDRESS_2_0         0x1fb00
45 #define VCN1_VID_SOC_ADDRESS_3_0        0x48300
46
47 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
48 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
49 static int vcn_v4_0_3_set_powergating_state(void *handle,
50                 enum amd_powergating_state state);
51 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
52                 int inst_idx, struct dpg_pause_state *new_state);
53 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
54
55 /**
56  * vcn_v4_0_3_early_init - set function pointers
57  *
58  * @handle: amdgpu_device pointer
59  *
60  * Set ring and irq function pointers
61  */
62 static int vcn_v4_0_3_early_init(void *handle)
63 {
64         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
65
66         /* re-use enc ring as unified ring */
67         adev->vcn.num_enc_rings = 1;
68
69         vcn_v4_0_3_set_unified_ring_funcs(adev);
70         vcn_v4_0_3_set_irq_funcs(adev);
71
72         return amdgpu_vcn_early_init(adev);
73 }
74
75 /**
76  * vcn_v4_0_3_sw_init - sw init for VCN block
77  *
78  * @handle: amdgpu_device pointer
79  *
80  * Load firmware and sw initialization
81  */
82 static int vcn_v4_0_3_sw_init(void *handle)
83 {
84         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
85         struct amdgpu_ring *ring;
86         int i, r, vcn_inst;
87
88         r = amdgpu_vcn_sw_init(adev);
89         if (r)
90                 return r;
91
92         amdgpu_vcn_setup_ucode(adev);
93
94         r = amdgpu_vcn_resume(adev);
95         if (r)
96                 return r;
97
98         /* VCN DEC TRAP */
99         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
100                 VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
101         if (r)
102                 return r;
103
104         for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
105                 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
106
107                 vcn_inst = GET_INST(VCN, i);
108
109                 ring = &adev->vcn.inst[i].ring_enc[0];
110                 ring->use_doorbell = true;
111                 ring->doorbell_index =
112                         (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
113                         9 * vcn_inst;
114                 ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
115                 sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
116                 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
117                                      AMDGPU_RING_PRIO_DEFAULT,
118                                      &adev->vcn.inst[i].sched_score);
119                 if (r)
120                         return r;
121
122                 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
123                 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
124                 fw_shared->sq.is_enabled = cpu_to_le32(true);
125
126                 if (amdgpu_vcnfw_log)
127                         amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
128         }
129
130         if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
131                 adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
132
133         return 0;
134 }
135
136 /**
137  * vcn_v4_0_3_sw_fini - sw fini for VCN block
138  *
139  * @handle: amdgpu_device pointer
140  *
141  * VCN suspend and free up sw allocation
142  */
143 static int vcn_v4_0_3_sw_fini(void *handle)
144 {
145         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
146         int i, r, idx;
147
148         if (drm_dev_enter(&adev->ddev, &idx)) {
149                 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
150                         volatile struct amdgpu_vcn4_fw_shared *fw_shared;
151
152                         fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
153                         fw_shared->present_flag_0 = 0;
154                         fw_shared->sq.is_enabled = cpu_to_le32(false);
155                 }
156                 drm_dev_exit(idx);
157         }
158
159         r = amdgpu_vcn_suspend(adev);
160         if (r)
161                 return r;
162
163         r = amdgpu_vcn_sw_fini(adev);
164
165         return r;
166 }
167
168 /**
169  * vcn_v4_0_3_hw_init - start and test VCN block
170  *
171  * @handle: amdgpu_device pointer
172  *
173  * Initialize the hardware, boot up the VCPU and do some testing
174  */
175 static int vcn_v4_0_3_hw_init(void *handle)
176 {
177         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
178         struct amdgpu_ring *ring;
179         int i, r, vcn_inst;
180
181         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
182                 vcn_inst = GET_INST(VCN, i);
183                 ring = &adev->vcn.inst[i].ring_enc[0];
184
185                 if (ring->use_doorbell) {
186                         adev->nbio.funcs->vcn_doorbell_range(
187                                 adev, ring->use_doorbell,
188                                 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
189                                         9 * vcn_inst,
190                                 adev->vcn.inst[i].aid_id);
191
192                         WREG32_SOC15(
193                                 VCN, GET_INST(VCN, ring->me),
194                                 regVCN_RB1_DB_CTRL,
195                                 ring->doorbell_index
196                                                 << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
197                                         VCN_RB1_DB_CTRL__EN_MASK);
198                 }
199
200                 r = amdgpu_ring_test_helper(ring);
201                 if (r)
202                         goto done;
203         }
204
205 done:
206         if (!r)
207                 DRM_DEV_INFO(adev->dev, "VCN decode initialized successfully(under %s).\n",
208                         (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
209
210         return r;
211 }
212
213 /**
214  * vcn_v4_0_3_hw_fini - stop the hardware block
215  *
216  * @handle: amdgpu_device pointer
217  *
218  * Stop the VCN block, mark ring as not ready any more
219  */
220 static int vcn_v4_0_3_hw_fini(void *handle)
221 {
222         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
223
224         cancel_delayed_work_sync(&adev->vcn.idle_work);
225
226         if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
227                 vcn_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
228
229         return 0;
230 }
231
232 /**
233  * vcn_v4_0_3_suspend - suspend VCN block
234  *
235  * @handle: amdgpu_device pointer
236  *
237  * HW fini and suspend VCN block
238  */
239 static int vcn_v4_0_3_suspend(void *handle)
240 {
241         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
242         int r;
243
244         r = vcn_v4_0_3_hw_fini(adev);
245         if (r)
246                 return r;
247
248         r = amdgpu_vcn_suspend(adev);
249
250         return r;
251 }
252
253 /**
254  * vcn_v4_0_3_resume - resume VCN block
255  *
256  * @handle: amdgpu_device pointer
257  *
258  * Resume firmware and hw init VCN block
259  */
260 static int vcn_v4_0_3_resume(void *handle)
261 {
262         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
263         int r;
264
265         r = amdgpu_vcn_resume(adev);
266         if (r)
267                 return r;
268
269         r = vcn_v4_0_3_hw_init(adev);
270
271         return r;
272 }
273
274 /**
275  * vcn_v4_0_3_mc_resume - memory controller programming
276  *
277  * @adev: amdgpu_device pointer
278  * @inst_idx: instance number
279  *
280  * Let the VCN memory controller know it's offsets
281  */
282 static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
283 {
284         uint32_t offset, size, vcn_inst;
285         const struct common_firmware_header *hdr;
286
287         hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
288         size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
289
290         vcn_inst = GET_INST(VCN, inst_idx);
291         /* cache window 0: fw */
292         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
293                 WREG32_SOC15(
294                         VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
295                         (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
296                                  .tmr_mc_addr_lo));
297                 WREG32_SOC15(
298                         VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
299                         (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
300                                  .tmr_mc_addr_hi));
301                 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
302                 offset = 0;
303         } else {
304                 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
305                              lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
306                 WREG32_SOC15(VCN, vcn_inst,
307                              regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
308                              upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
309                 offset = size;
310                 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
311                              AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
312         }
313         WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
314
315         /* cache window 1: stack */
316         WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
317                      lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
318         WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
319                      upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
320         WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
321         WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1,
322                      AMDGPU_VCN_STACK_SIZE);
323
324         /* cache window 2: context */
325         WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
326                      lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
327                                    AMDGPU_VCN_STACK_SIZE));
328         WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
329                      upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
330                                    AMDGPU_VCN_STACK_SIZE));
331         WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
332         WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2,
333                      AMDGPU_VCN_CONTEXT_SIZE);
334
335         /* non-cache window */
336         WREG32_SOC15(
337                 VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
338                 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
339         WREG32_SOC15(
340                 VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
341                 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
342         WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
343         WREG32_SOC15(
344                 VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
345                 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
346 }
347
348 /**
349  * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode
350  *
351  * @adev: amdgpu_device pointer
352  * @inst_idx: instance number index
353  * @indirect: indirectly write sram
354  *
355  * Let the VCN memory controller know it's offsets with dpg mode
356  */
357 static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
358 {
359         uint32_t offset, size;
360         const struct common_firmware_header *hdr;
361
362         hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
363         size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
364
365         /* cache window 0: fw */
366         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
367                 if (!indirect) {
368                         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
369                                 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
370                                 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
371                                         inst_idx].tmr_mc_addr_lo), 0, indirect);
372                         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
373                                 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
374                                 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
375                                         inst_idx].tmr_mc_addr_hi), 0, indirect);
376                         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
377                                 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
378                 } else {
379                         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
380                                 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
381                         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
382                                 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
383                         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
384                                 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
385                 }
386                 offset = 0;
387         } else {
388                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
389                         VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
390                         lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
391                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
392                         VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
393                         upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
394                 offset = size;
395                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
396                         VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
397                         AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
398         }
399
400         if (!indirect)
401                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
402                         VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
403         else
404                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
405                         VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
406
407         /* cache window 1: stack */
408         if (!indirect) {
409                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
410                         VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
411                         lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
412                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
413                         VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
414                         upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
415                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
416                         VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
417         } else {
418                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
419                         VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
420                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
421                         VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
422                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
423                         VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
424         }
425         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
426                         VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
427
428         /* cache window 2: context */
429         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
430                         VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
431                         lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
432                                 AMDGPU_VCN_STACK_SIZE), 0, indirect);
433         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
434                         VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
435                         upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
436                                 AMDGPU_VCN_STACK_SIZE), 0, indirect);
437         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
438                         VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
439         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
440                         VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
441
442         /* non-cache window */
443         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
444                         VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
445                         lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
446         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
447                         VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
448                         upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
449         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
450                         VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
451         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
452                         VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
453                         AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
454
455         /* VCN global tiling registers */
456         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
457                 VCN, 0, regUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
458         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
459                 VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
460 }
461
462 /**
463  * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
464  *
465  * @adev: amdgpu_device pointer
466  * @inst_idx: instance number
467  *
468  * Disable clock gating for VCN block
469  */
470 static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
471 {
472         uint32_t data;
473         int vcn_inst;
474
475         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
476                 return;
477
478         vcn_inst = GET_INST(VCN, inst_idx);
479
480         /* VCN disable CGC */
481         data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
482         data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
483         data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
484         data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
485         WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
486
487         data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE);
488         data &= ~(UVD_CGC_GATE__SYS_MASK
489                 | UVD_CGC_GATE__MPEG2_MASK
490                 | UVD_CGC_GATE__REGS_MASK
491                 | UVD_CGC_GATE__RBC_MASK
492                 | UVD_CGC_GATE__LMI_MC_MASK
493                 | UVD_CGC_GATE__LMI_UMC_MASK
494                 | UVD_CGC_GATE__MPC_MASK
495                 | UVD_CGC_GATE__LBSI_MASK
496                 | UVD_CGC_GATE__LRBBM_MASK
497                 | UVD_CGC_GATE__WCB_MASK
498                 | UVD_CGC_GATE__VCPU_MASK
499                 | UVD_CGC_GATE__MMSCH_MASK);
500
501         WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE, data);
502         SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF);
503
504         data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
505         data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK
506                 | UVD_CGC_CTRL__MPEG2_MODE_MASK
507                 | UVD_CGC_CTRL__REGS_MODE_MASK
508                 | UVD_CGC_CTRL__RBC_MODE_MASK
509                 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
510                 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
511                 | UVD_CGC_CTRL__MPC_MODE_MASK
512                 | UVD_CGC_CTRL__LBSI_MODE_MASK
513                 | UVD_CGC_CTRL__LRBBM_MODE_MASK
514                 | UVD_CGC_CTRL__WCB_MODE_MASK
515                 | UVD_CGC_CTRL__VCPU_MODE_MASK
516                 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
517         WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
518
519         data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE);
520         data |= (UVD_SUVD_CGC_GATE__SRE_MASK
521                 | UVD_SUVD_CGC_GATE__SIT_MASK
522                 | UVD_SUVD_CGC_GATE__SMP_MASK
523                 | UVD_SUVD_CGC_GATE__SCM_MASK
524                 | UVD_SUVD_CGC_GATE__SDB_MASK
525                 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
526                 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
527                 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
528                 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
529                 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
530                 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
531                 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
532                 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
533                 | UVD_SUVD_CGC_GATE__ENT_MASK
534                 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
535                 | UVD_SUVD_CGC_GATE__SITE_MASK
536                 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
537                 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
538                 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
539                 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
540                 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
541         WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE, data);
542
543         data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
544         data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
545                 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
546                 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
547                 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
548                 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
549                 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
550                 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
551                 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
552         WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
553 }
554
555 /**
556  * vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
557  *
558  * @adev: amdgpu_device pointer
559  * @sram_sel: sram select
560  * @inst_idx: instance number index
561  * @indirect: indirectly write sram
562  *
563  * Disable clock gating for VCN block with dpg mode
564  */
565 static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
566                                 int inst_idx, uint8_t indirect)
567 {
568         uint32_t reg_data = 0;
569
570         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
571                 return;
572
573         /* enable sw clock gating control */
574         reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
575         reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
576         reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
577         reg_data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK |
578                  UVD_CGC_CTRL__MPEG2_MODE_MASK |
579                  UVD_CGC_CTRL__REGS_MODE_MASK |
580                  UVD_CGC_CTRL__RBC_MODE_MASK |
581                  UVD_CGC_CTRL__LMI_MC_MODE_MASK |
582                  UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
583                  UVD_CGC_CTRL__IDCT_MODE_MASK |
584                  UVD_CGC_CTRL__MPRD_MODE_MASK |
585                  UVD_CGC_CTRL__MPC_MODE_MASK |
586                  UVD_CGC_CTRL__LBSI_MODE_MASK |
587                  UVD_CGC_CTRL__LRBBM_MODE_MASK |
588                  UVD_CGC_CTRL__WCB_MODE_MASK |
589                  UVD_CGC_CTRL__VCPU_MODE_MASK);
590         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
591                 VCN, 0, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
592
593         /* turn off clock gating */
594         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
595                 VCN, 0, regUVD_CGC_GATE), 0, sram_sel, indirect);
596
597         /* turn on SUVD clock gating */
598         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
599                 VCN, 0, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
600
601         /* turn on sw mode in UVD_SUVD_CGC_CTRL */
602         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
603                 VCN, 0, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
604 }
605
606 /**
607  * vcn_v4_0_enable_clock_gating - enable VCN clock gating
608  *
609  * @adev: amdgpu_device pointer
610  * @inst_idx: instance number
611  *
612  * Enable clock gating for VCN block
613  */
614 static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
615 {
616         uint32_t data;
617         int vcn_inst;
618
619         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
620                 return;
621
622         vcn_inst = GET_INST(VCN, inst_idx);
623
624         /* enable VCN CGC */
625         data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
626         data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
627         data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
628         data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
629         WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
630
631         data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
632         data |= (UVD_CGC_CTRL__SYS_MODE_MASK
633                 | UVD_CGC_CTRL__MPEG2_MODE_MASK
634                 | UVD_CGC_CTRL__REGS_MODE_MASK
635                 | UVD_CGC_CTRL__RBC_MODE_MASK
636                 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
637                 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
638                 | UVD_CGC_CTRL__MPC_MODE_MASK
639                 | UVD_CGC_CTRL__LBSI_MODE_MASK
640                 | UVD_CGC_CTRL__LRBBM_MODE_MASK
641                 | UVD_CGC_CTRL__WCB_MODE_MASK
642                 | UVD_CGC_CTRL__VCPU_MODE_MASK);
643         WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
644
645         data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
646         data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
647                 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
648                 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
649                 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
650                 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
651                 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
652                 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
653                 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
654         WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
655 }
656
657 /**
658  * vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
659  *
660  * @adev: amdgpu_device pointer
661  * @inst_idx: instance number index
662  * @indirect: indirectly write sram
663  *
664  * Start VCN block with dpg mode
665  */
666 static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
667 {
668         volatile struct amdgpu_vcn4_fw_shared *fw_shared =
669                                                 adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
670         struct amdgpu_ring *ring;
671         int vcn_inst;
672         uint32_t tmp;
673
674         vcn_inst = GET_INST(VCN, inst_idx);
675         /* disable register anti-hang mechanism */
676         WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
677                  ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
678         /* enable dynamic power gating mode */
679         tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
680         tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
681         tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
682         WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
683
684         if (indirect) {
685                 DRM_DEV_DEBUG(adev->dev, "VCN %d start: on AID %d",
686                         inst_idx, adev->vcn.inst[inst_idx].aid_id);
687                 adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
688                                 (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
689                 /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
690                 WREG32_SOC15_DPG_MODE(inst_idx, 0xDEADBEEF,
691                         adev->vcn.inst[inst_idx].aid_id, 0, true);
692         }
693
694         /* enable clock gating */
695         vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
696
697         /* enable VCPU clock */
698         tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
699         tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
700         tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
701
702         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
703                 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
704
705         /* disable master interrupt */
706         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
707                 VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
708
709         /* setup regUVD_LMI_CTRL */
710         tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
711                 UVD_LMI_CTRL__REQ_MODE_MASK |
712                 UVD_LMI_CTRL__CRC_RESET_MASK |
713                 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
714                 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
715                 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
716                 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
717                 0x00100000L);
718         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
719                 VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
720
721         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
722                 VCN, 0, regUVD_MPC_CNTL),
723                 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
724
725         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
726                 VCN, 0, regUVD_MPC_SET_MUXA0),
727                 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
728                  (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
729                  (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
730                  (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
731
732         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
733                 VCN, 0, regUVD_MPC_SET_MUXB0),
734                  ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
735                  (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
736                  (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
737                  (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
738
739         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
740                 VCN, 0, regUVD_MPC_SET_MUX),
741                 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
742                  (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
743                  (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
744
745         vcn_v4_0_3_mc_resume_dpg_mode(adev, inst_idx, indirect);
746
747         tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
748         tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
749         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
750                 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
751
752         /* enable LMI MC and UMC channels */
753         tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
754         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
755                 VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
756
757         /* enable master interrupt */
758         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
759                 VCN, 0, regUVD_MASTINT_EN),
760                 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
761
762         if (indirect)
763                 psp_update_vcn_sram(adev, 0, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
764                         (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
765                                 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
766
767         ring = &adev->vcn.inst[inst_idx].ring_enc[0];
768
769         /* program the RB_BASE for ring buffer */
770         WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
771                      lower_32_bits(ring->gpu_addr));
772         WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
773                      upper_32_bits(ring->gpu_addr));
774
775         WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
776                      ring->ring_size / sizeof(uint32_t));
777
778         /* resetting ring, fw should not check RB ring */
779         tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
780         tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
781         WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
782         fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
783
784         /* Initialize the ring buffer's read and write pointers */
785         WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
786         WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
787         ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
788
789         tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
790         tmp |= VCN_RB_ENABLE__RB_EN_MASK;
791         WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
792         fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
793
794         /*resetting done, fw can check RB ring */
795         fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
796
797         return 0;
798 }
799
800 /**
801  * vcn_v4_0_3_start - VCN start
802  *
803  * @adev: amdgpu_device pointer
804  *
805  * Start VCN block
806  */
807 static int vcn_v4_0_3_start(struct amdgpu_device *adev)
808 {
809         volatile struct amdgpu_vcn4_fw_shared *fw_shared;
810         struct amdgpu_ring *ring;
811         int i, j, k, r, vcn_inst;
812         uint32_t tmp;
813
814         if (adev->pm.dpm_enabled)
815                 amdgpu_dpm_enable_uvd(adev, true);
816
817         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
818                 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
819                         r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
820                         continue;
821                 }
822
823                 vcn_inst = GET_INST(VCN, i);
824                 /* set VCN status busy */
825                 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
826                       UVD_STATUS__UVD_BUSY;
827                 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
828
829                 /*SW clock gating */
830                 vcn_v4_0_3_disable_clock_gating(adev, i);
831
832                 /* enable VCPU clock */
833                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
834                          UVD_VCPU_CNTL__CLK_EN_MASK,
835                          ~UVD_VCPU_CNTL__CLK_EN_MASK);
836
837                 /* disable master interrupt */
838                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
839                          ~UVD_MASTINT_EN__VCPU_EN_MASK);
840
841                 /* enable LMI MC and UMC channels */
842                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
843                          ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
844
845                 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
846                 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
847                 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
848                 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
849
850                 /* setup regUVD_LMI_CTRL */
851                 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
852                 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
853                              tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
854                                      UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
855                                      UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
856                                      UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
857
858                 /* setup regUVD_MPC_CNTL */
859                 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
860                 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
861                 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
862                 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
863
864                 /* setup UVD_MPC_SET_MUXA0 */
865                 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
866                              ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
867                               (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
868                               (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
869                               (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
870
871                 /* setup UVD_MPC_SET_MUXB0 */
872                 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
873                              ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
874                               (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
875                               (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
876                               (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
877
878                 /* setup UVD_MPC_SET_MUX */
879                 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
880                              ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
881                               (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
882                               (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
883
884                 vcn_v4_0_3_mc_resume(adev, i);
885
886                 /* VCN global tiling registers */
887                 WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
888                              adev->gfx.config.gb_addr_config);
889                 WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
890                              adev->gfx.config.gb_addr_config);
891
892                 /* unblock VCPU register access */
893                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
894                          ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
895
896                 /* release VCPU reset to boot */
897                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
898                          ~UVD_VCPU_CNTL__BLK_RST_MASK);
899
900                 for (j = 0; j < 10; ++j) {
901                         uint32_t status;
902
903                         for (k = 0; k < 100; ++k) {
904                                 status = RREG32_SOC15(VCN, vcn_inst,
905                                                       regUVD_STATUS);
906                                 if (status & 2)
907                                         break;
908                                 mdelay(10);
909                         }
910                         r = 0;
911                         if (status & 2)
912                                 break;
913
914                         DRM_DEV_ERROR(adev->dev,
915                                 "VCN decode not responding, trying to reset the VCPU!!!\n");
916                         WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
917                                                   regUVD_VCPU_CNTL),
918                                  UVD_VCPU_CNTL__BLK_RST_MASK,
919                                  ~UVD_VCPU_CNTL__BLK_RST_MASK);
920                         mdelay(10);
921                         WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
922                                                   regUVD_VCPU_CNTL),
923                                  0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
924
925                         mdelay(10);
926                         r = -1;
927                 }
928
929                 if (r) {
930                         DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
931                         return r;
932                 }
933
934                 /* enable master interrupt */
935                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
936                          UVD_MASTINT_EN__VCPU_EN_MASK,
937                          ~UVD_MASTINT_EN__VCPU_EN_MASK);
938
939                 /* clear the busy bit of VCN_STATUS */
940                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
941                          ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
942
943                 ring = &adev->vcn.inst[i].ring_enc[0];
944                 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
945
946                 /* program the RB_BASE for ring buffer */
947                 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
948                              lower_32_bits(ring->gpu_addr));
949                 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
950                              upper_32_bits(ring->gpu_addr));
951
952                 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
953                              ring->ring_size / sizeof(uint32_t));
954
955                 /* resetting ring, fw should not check RB ring */
956                 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
957                 tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
958                 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
959
960                 /* Initialize the ring buffer's read and write pointers */
961                 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
962                 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
963
964                 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
965                 tmp |= VCN_RB_ENABLE__RB_EN_MASK;
966                 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
967
968                 ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
969                 fw_shared->sq.queue_mode &=
970                         cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
971
972         }
973         return 0;
974 }
975
976 /**
977  * vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
978  *
979  * @adev: amdgpu_device pointer
980  * @inst_idx: instance number index
981  *
982  * Stop VCN block with dpg mode
983  */
984 static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
985 {
986         uint32_t tmp;
987         int vcn_inst;
988
989         vcn_inst = GET_INST(VCN, inst_idx);
990
991         /* Wait for power status to be 1 */
992         SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
993                            UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
994
995         /* wait for read ptr to be equal to write ptr */
996         tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
997         SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
998
999         SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
1000                            UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1001
1002         /* disable dynamic power gating mode */
1003         WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
1004                  ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1005         return 0;
1006 }
1007
1008 /**
1009  * vcn_v4_0_3_stop - VCN stop
1010  *
1011  * @adev: amdgpu_device pointer
1012  *
1013  * Stop VCN block
1014  */
1015 static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
1016 {
1017         volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1018         int i, r = 0, vcn_inst;
1019         uint32_t tmp;
1020
1021         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1022                 vcn_inst = GET_INST(VCN, i);
1023
1024                 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1025                 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1026
1027                 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1028                         vcn_v4_0_3_stop_dpg_mode(adev, i);
1029                         continue;
1030                 }
1031
1032                 /* wait for vcn idle */
1033                 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
1034                                        UVD_STATUS__IDLE, 0x7);
1035                 if (r)
1036                         goto Done;
1037
1038                 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1039                         UVD_LMI_STATUS__READ_CLEAN_MASK |
1040                         UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1041                         UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1042                 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1043                                        tmp);
1044                 if (r)
1045                         goto Done;
1046
1047                 /* stall UMC channel */
1048                 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
1049                 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1050                 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
1051                 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1052                         UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1053                 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1054                                        tmp);
1055                 if (r)
1056                         goto Done;
1057
1058                 /* Unblock VCPU Register access */
1059                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
1060                          UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1061                          ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1062
1063                 /* release VCPU reset to boot */
1064                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
1065                          UVD_VCPU_CNTL__BLK_RST_MASK,
1066                          ~UVD_VCPU_CNTL__BLK_RST_MASK);
1067
1068                 /* disable VCPU clock */
1069                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
1070                          ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1071
1072                 /* reset LMI UMC/LMI/VCPU */
1073                 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1074                 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1075                 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1076
1077                 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1078                 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1079                 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1080
1081                 /* clear VCN status */
1082                 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
1083
1084                 /* apply HW clock gating */
1085                 vcn_v4_0_3_enable_clock_gating(adev, i);
1086         }
1087 Done:
1088         if (adev->pm.dpm_enabled)
1089                 amdgpu_dpm_enable_uvd(adev, false);
1090
1091         return 0;
1092 }
1093
1094 /**
1095  * vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode
1096  *
1097  * @adev: amdgpu_device pointer
1098  * @inst_idx: instance number index
1099  * @new_state: pause state
1100  *
1101  * Pause dpg mode for VCN block
1102  */
1103 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
1104                                 struct dpg_pause_state *new_state)
1105 {
1106
1107         return 0;
1108 }
1109
1110 /**
1111  * vcn_v4_0_3_unified_ring_get_rptr - get unified read pointer
1112  *
1113  * @ring: amdgpu_ring pointer
1114  *
1115  * Returns the current hardware unified read pointer
1116  */
1117 static uint64_t vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring *ring)
1118 {
1119         struct amdgpu_device *adev = ring->adev;
1120
1121         if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1122                 DRM_ERROR("wrong ring id is identified in %s", __func__);
1123
1124         return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
1125 }
1126
1127 /**
1128  * vcn_v4_0_3_unified_ring_get_wptr - get unified write pointer
1129  *
1130  * @ring: amdgpu_ring pointer
1131  *
1132  * Returns the current hardware unified write pointer
1133  */
1134 static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
1135 {
1136         struct amdgpu_device *adev = ring->adev;
1137
1138         if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1139                 DRM_ERROR("wrong ring id is identified in %s", __func__);
1140
1141         if (ring->use_doorbell)
1142                 return *ring->wptr_cpu_addr;
1143         else
1144                 return RREG32_SOC15(VCN, GET_INST(VCN, ring->me),
1145                                     regUVD_RB_WPTR);
1146 }
1147
1148 /**
1149  * vcn_v4_0_3_unified_ring_set_wptr - set enc write pointer
1150  *
1151  * @ring: amdgpu_ring pointer
1152  *
1153  * Commits the enc write pointer to the hardware
1154  */
1155 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
1156 {
1157         struct amdgpu_device *adev = ring->adev;
1158
1159         if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1160                 DRM_ERROR("wrong ring id is identified in %s", __func__);
1161
1162         if (ring->use_doorbell) {
1163                 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1164                 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1165         } else {
1166                 WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
1167                              lower_32_bits(ring->wptr));
1168         }
1169 }
1170
1171 static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
1172         .type = AMDGPU_RING_TYPE_VCN_ENC,
1173         .align_mask = 0x3f,
1174         .nop = VCN_ENC_CMD_NO_OP,
1175         .get_rptr = vcn_v4_0_3_unified_ring_get_rptr,
1176         .get_wptr = vcn_v4_0_3_unified_ring_get_wptr,
1177         .set_wptr = vcn_v4_0_3_unified_ring_set_wptr,
1178         .emit_frame_size =
1179                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1180                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1181                 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1182                 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1183                 1, /* vcn_v2_0_enc_ring_insert_end */
1184         .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1185         .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1186         .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1187         .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1188         .test_ring = amdgpu_vcn_enc_ring_test_ring,
1189         .test_ib = amdgpu_vcn_unified_ring_test_ib,
1190         .insert_nop = amdgpu_ring_insert_nop,
1191         .insert_end = vcn_v2_0_enc_ring_insert_end,
1192         .pad_ib = amdgpu_ring_generic_pad_ib,
1193         .begin_use = amdgpu_vcn_ring_begin_use,
1194         .end_use = amdgpu_vcn_ring_end_use,
1195         .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1196         .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1197         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1198 };
1199
1200 /**
1201  * vcn_v4_0_3_set_unified_ring_funcs - set unified ring functions
1202  *
1203  * @adev: amdgpu_device pointer
1204  *
1205  * Set unified ring functions
1206  */
1207 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
1208 {
1209         int i, vcn_inst;
1210
1211         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1212                 adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_3_unified_ring_vm_funcs;
1213                 adev->vcn.inst[i].ring_enc[0].me = i;
1214                 vcn_inst = GET_INST(VCN, i);
1215                 adev->vcn.inst[i].aid_id =
1216                         vcn_inst / adev->vcn.num_inst_per_aid;
1217         }
1218         DRM_DEV_INFO(adev->dev, "VCN decode is enabled in VM mode\n");
1219 }
1220
1221 /**
1222  * vcn_v4_0_3_is_idle - check VCN block is idle
1223  *
1224  * @handle: amdgpu_device pointer
1225  *
1226  * Check whether VCN block is idle
1227  */
1228 static bool vcn_v4_0_3_is_idle(void *handle)
1229 {
1230         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1231         int i, ret = 1;
1232
1233         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1234                 ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) ==
1235                         UVD_STATUS__IDLE);
1236         }
1237
1238         return ret;
1239 }
1240
1241 /**
1242  * vcn_v4_0_3_wait_for_idle - wait for VCN block idle
1243  *
1244  * @handle: amdgpu_device pointer
1245  *
1246  * Wait for VCN block idle
1247  */
1248 static int vcn_v4_0_3_wait_for_idle(void *handle)
1249 {
1250         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1251         int i, ret = 0;
1252
1253         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1254                 ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS,
1255                                          UVD_STATUS__IDLE, UVD_STATUS__IDLE);
1256                 if (ret)
1257                         return ret;
1258         }
1259
1260         return ret;
1261 }
1262
1263 /* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state
1264  *
1265  * @handle: amdgpu_device pointer
1266  * @state: clock gating state
1267  *
1268  * Set VCN block clockgating state
1269  */
1270 static int vcn_v4_0_3_set_clockgating_state(void *handle,
1271                                           enum amd_clockgating_state state)
1272 {
1273         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1274         bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1275         int i;
1276
1277         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1278                 if (enable) {
1279                         if (RREG32_SOC15(VCN, GET_INST(VCN, i),
1280                                          regUVD_STATUS) != UVD_STATUS__IDLE)
1281                                 return -EBUSY;
1282                         vcn_v4_0_3_enable_clock_gating(adev, i);
1283                 } else {
1284                         vcn_v4_0_3_disable_clock_gating(adev, i);
1285                 }
1286         }
1287         return 0;
1288 }
1289
1290 /**
1291  * vcn_v4_0_3_set_powergating_state - set VCN block powergating state
1292  *
1293  * @handle: amdgpu_device pointer
1294  * @state: power gating state
1295  *
1296  * Set VCN block powergating state
1297  */
1298 static int vcn_v4_0_3_set_powergating_state(void *handle,
1299                                           enum amd_powergating_state state)
1300 {
1301         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1302         int ret;
1303
1304         if (state == adev->vcn.cur_state)
1305                 return 0;
1306
1307         if (state == AMD_PG_STATE_GATE)
1308                 ret = vcn_v4_0_3_stop(adev);
1309         else
1310                 ret = vcn_v4_0_3_start(adev);
1311
1312         if (!ret)
1313                 adev->vcn.cur_state = state;
1314
1315         return ret;
1316 }
1317
1318 /**
1319  * vcn_v4_0_3_set_interrupt_state - set VCN block interrupt state
1320  *
1321  * @adev: amdgpu_device pointer
1322  * @source: interrupt sources
1323  * @type: interrupt types
1324  * @state: interrupt states
1325  *
1326  * Set VCN block interrupt state
1327  */
1328 static int vcn_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
1329                                         struct amdgpu_irq_src *source,
1330                                         unsigned int type,
1331                                         enum amdgpu_interrupt_state state)
1332 {
1333         return 0;
1334 }
1335
1336 /**
1337  * vcn_v4_0_3_process_interrupt - process VCN block interrupt
1338  *
1339  * @adev: amdgpu_device pointer
1340  * @source: interrupt sources
1341  * @entry: interrupt entry from clients and sources
1342  *
1343  * Process VCN block interrupt
1344  */
1345 static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
1346                                       struct amdgpu_irq_src *source,
1347                                       struct amdgpu_iv_entry *entry)
1348 {
1349         uint32_t i, inst;
1350
1351         i = node_id_to_phys_map[entry->node_id];
1352
1353         DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
1354
1355         for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
1356                 if (adev->vcn.inst[inst].aid_id == i)
1357                         break;
1358
1359         if (inst >= adev->vcn.num_vcn_inst) {
1360                 dev_WARN_ONCE(adev->dev, 1,
1361                               "Interrupt received for unknown VCN instance %d",
1362                               entry->node_id);
1363                 return 0;
1364         }
1365
1366         switch (entry->src_id) {
1367         case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1368                 amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
1369                 break;
1370         default:
1371                 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1372                           entry->src_id, entry->src_data[0]);
1373                 break;
1374         }
1375
1376         return 0;
1377 }
1378
1379 static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
1380         .set = vcn_v4_0_3_set_interrupt_state,
1381         .process = vcn_v4_0_3_process_interrupt,
1382 };
1383
1384 /**
1385  * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
1386  *
1387  * @adev: amdgpu_device pointer
1388  *
1389  * Set VCN block interrupt irq functions
1390  */
1391 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
1392 {
1393         int i;
1394
1395         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1396                 adev->vcn.inst->irq.num_types++;
1397         }
1398         adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
1399 }
1400
1401 static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
1402         .name = "vcn_v4_0_3",
1403         .early_init = vcn_v4_0_3_early_init,
1404         .late_init = NULL,
1405         .sw_init = vcn_v4_0_3_sw_init,
1406         .sw_fini = vcn_v4_0_3_sw_fini,
1407         .hw_init = vcn_v4_0_3_hw_init,
1408         .hw_fini = vcn_v4_0_3_hw_fini,
1409         .suspend = vcn_v4_0_3_suspend,
1410         .resume = vcn_v4_0_3_resume,
1411         .is_idle = vcn_v4_0_3_is_idle,
1412         .wait_for_idle = vcn_v4_0_3_wait_for_idle,
1413         .check_soft_reset = NULL,
1414         .pre_soft_reset = NULL,
1415         .soft_reset = NULL,
1416         .post_soft_reset = NULL,
1417         .set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
1418         .set_powergating_state = vcn_v4_0_3_set_powergating_state,
1419 };
1420
1421 const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
1422         .type = AMD_IP_BLOCK_TYPE_VCN,
1423         .major = 4,
1424         .minor = 0,
1425         .rev = 3,
1426         .funcs = &vcn_v4_0_3_ip_funcs,
1427 };