drm/amdgpu: Add a read after write DB_CTRL for vcn_v4_0_3
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / amd / amdgpu / vcn_v4_0_3.c
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include <drm/drm_drv.h>
26
27 #include "amdgpu.h"
28 #include "amdgpu_vcn.h"
29 #include "amdgpu_pm.h"
30 #include "soc15.h"
31 #include "soc15d.h"
32 #include "soc15_hw_ip.h"
33 #include "vcn_v2_0.h"
34
35 #include "vcn/vcn_4_0_3_offset.h"
36 #include "vcn/vcn_4_0_3_sh_mask.h"
37 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
38
39 #define mmUVD_DPG_LMA_CTL               regUVD_DPG_LMA_CTL
40 #define mmUVD_DPG_LMA_CTL_BASE_IDX      regUVD_DPG_LMA_CTL_BASE_IDX
41 #define mmUVD_DPG_LMA_DATA              regUVD_DPG_LMA_DATA
42 #define mmUVD_DPG_LMA_DATA_BASE_IDX     regUVD_DPG_LMA_DATA_BASE_IDX
43
44 #define VCN_VID_SOC_ADDRESS_2_0         0x1fb00
45 #define VCN1_VID_SOC_ADDRESS_3_0        0x48300
46
47 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev);
48 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev);
49 static int vcn_v4_0_3_set_powergating_state(void *handle,
50                 enum amd_powergating_state state);
51 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev,
52                 int inst_idx, struct dpg_pause_state *new_state);
53 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
54
55 /**
56  * vcn_v4_0_3_early_init - set function pointers
57  *
58  * @handle: amdgpu_device pointer
59  *
60  * Set ring and irq function pointers
61  */
62 static int vcn_v4_0_3_early_init(void *handle)
63 {
64         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
65
66         /* re-use enc ring as unified ring */
67         adev->vcn.num_enc_rings = 1;
68
69         vcn_v4_0_3_set_unified_ring_funcs(adev);
70         vcn_v4_0_3_set_irq_funcs(adev);
71
72         return amdgpu_vcn_early_init(adev);
73 }
74
75 /**
76  * vcn_v4_0_3_sw_init - sw init for VCN block
77  *
78  * @handle: amdgpu_device pointer
79  *
80  * Load firmware and sw initialization
81  */
82 static int vcn_v4_0_3_sw_init(void *handle)
83 {
84         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
85         struct amdgpu_ring *ring;
86         int i, r, vcn_inst;
87
88         r = amdgpu_vcn_sw_init(adev);
89         if (r)
90                 return r;
91
92         amdgpu_vcn_setup_ucode(adev);
93
94         r = amdgpu_vcn_resume(adev);
95         if (r)
96                 return r;
97
98         /* VCN DEC TRAP */
99         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
100                 VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq);
101         if (r)
102                 return r;
103
104         for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
105                 volatile struct amdgpu_vcn4_fw_shared *fw_shared;
106
107                 vcn_inst = GET_INST(VCN, i);
108
109                 ring = &adev->vcn.inst[i].ring_enc[0];
110                 ring->use_doorbell = true;
111                 ring->doorbell_index =
112                         (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
113                         9 * vcn_inst;
114                 ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
115                 sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
116                 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
117                                      AMDGPU_RING_PRIO_DEFAULT,
118                                      &adev->vcn.inst[i].sched_score);
119                 if (r)
120                         return r;
121
122                 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
123                 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
124                 fw_shared->sq.is_enabled = cpu_to_le32(true);
125
126                 if (amdgpu_vcnfw_log)
127                         amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
128         }
129
130         if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
131                 adev->vcn.pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
132
133         return 0;
134 }
135
136 /**
137  * vcn_v4_0_3_sw_fini - sw fini for VCN block
138  *
139  * @handle: amdgpu_device pointer
140  *
141  * VCN suspend and free up sw allocation
142  */
143 static int vcn_v4_0_3_sw_fini(void *handle)
144 {
145         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
146         int i, r, idx;
147
148         if (drm_dev_enter(&adev->ddev, &idx)) {
149                 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
150                         volatile struct amdgpu_vcn4_fw_shared *fw_shared;
151
152                         fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
153                         fw_shared->present_flag_0 = 0;
154                         fw_shared->sq.is_enabled = cpu_to_le32(false);
155                 }
156                 drm_dev_exit(idx);
157         }
158
159         r = amdgpu_vcn_suspend(adev);
160         if (r)
161                 return r;
162
163         r = amdgpu_vcn_sw_fini(adev);
164
165         return r;
166 }
167
168 /**
169  * vcn_v4_0_3_hw_init - start and test VCN block
170  *
171  * @handle: amdgpu_device pointer
172  *
173  * Initialize the hardware, boot up the VCPU and do some testing
174  */
175 static int vcn_v4_0_3_hw_init(void *handle)
176 {
177         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
178         struct amdgpu_ring *ring;
179         int i, r, vcn_inst;
180
181         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
182                 vcn_inst = GET_INST(VCN, i);
183                 ring = &adev->vcn.inst[i].ring_enc[0];
184
185                 if (ring->use_doorbell) {
186                         adev->nbio.funcs->vcn_doorbell_range(
187                                 adev, ring->use_doorbell,
188                                 (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
189                                         9 * vcn_inst,
190                                 adev->vcn.inst[i].aid_id);
191
192                         WREG32_SOC15(
193                                 VCN, GET_INST(VCN, ring->me),
194                                 regVCN_RB1_DB_CTRL,
195                                 ring->doorbell_index
196                                                 << VCN_RB1_DB_CTRL__OFFSET__SHIFT |
197                                         VCN_RB1_DB_CTRL__EN_MASK);
198
199                         /* Read DB_CTRL to flush the write DB_CTRL command. */
200                         RREG32_SOC15(
201                                 VCN, GET_INST(VCN, ring->me),
202                                 regVCN_RB1_DB_CTRL);
203                 }
204
205                 r = amdgpu_ring_test_helper(ring);
206                 if (r)
207                         goto done;
208         }
209
210 done:
211         if (!r)
212                 DRM_DEV_INFO(adev->dev, "VCN decode initialized successfully(under %s).\n",
213                         (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
214
215         return r;
216 }
217
218 /**
219  * vcn_v4_0_3_hw_fini - stop the hardware block
220  *
221  * @handle: amdgpu_device pointer
222  *
223  * Stop the VCN block, mark ring as not ready any more
224  */
225 static int vcn_v4_0_3_hw_fini(void *handle)
226 {
227         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
228
229         cancel_delayed_work_sync(&adev->vcn.idle_work);
230
231         if (adev->vcn.cur_state != AMD_PG_STATE_GATE)
232                 vcn_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
233
234         return 0;
235 }
236
237 /**
238  * vcn_v4_0_3_suspend - suspend VCN block
239  *
240  * @handle: amdgpu_device pointer
241  *
242  * HW fini and suspend VCN block
243  */
244 static int vcn_v4_0_3_suspend(void *handle)
245 {
246         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
247         int r;
248
249         r = vcn_v4_0_3_hw_fini(adev);
250         if (r)
251                 return r;
252
253         r = amdgpu_vcn_suspend(adev);
254
255         return r;
256 }
257
258 /**
259  * vcn_v4_0_3_resume - resume VCN block
260  *
261  * @handle: amdgpu_device pointer
262  *
263  * Resume firmware and hw init VCN block
264  */
265 static int vcn_v4_0_3_resume(void *handle)
266 {
267         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
268         int r;
269
270         r = amdgpu_vcn_resume(adev);
271         if (r)
272                 return r;
273
274         r = vcn_v4_0_3_hw_init(adev);
275
276         return r;
277 }
278
279 /**
280  * vcn_v4_0_3_mc_resume - memory controller programming
281  *
282  * @adev: amdgpu_device pointer
283  * @inst_idx: instance number
284  *
285  * Let the VCN memory controller know it's offsets
286  */
287 static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
288 {
289         uint32_t offset, size, vcn_inst;
290         const struct common_firmware_header *hdr;
291
292         hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
293         size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
294
295         vcn_inst = GET_INST(VCN, inst_idx);
296         /* cache window 0: fw */
297         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
298                 WREG32_SOC15(
299                         VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
300                         (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
301                                  .tmr_mc_addr_lo));
302                 WREG32_SOC15(
303                         VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
304                         (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx]
305                                  .tmr_mc_addr_hi));
306                 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0);
307                 offset = 0;
308         } else {
309                 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
310                              lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
311                 WREG32_SOC15(VCN, vcn_inst,
312                              regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
313                              upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr));
314                 offset = size;
315                 WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0,
316                              AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
317         }
318         WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size);
319
320         /* cache window 1: stack */
321         WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
322                      lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
323         WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
324                      upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset));
325         WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0);
326         WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1,
327                      AMDGPU_VCN_STACK_SIZE);
328
329         /* cache window 2: context */
330         WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
331                      lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
332                                    AMDGPU_VCN_STACK_SIZE));
333         WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
334                      upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
335                                    AMDGPU_VCN_STACK_SIZE));
336         WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0);
337         WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2,
338                      AMDGPU_VCN_CONTEXT_SIZE);
339
340         /* non-cache window */
341         WREG32_SOC15(
342                 VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
343                 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
344         WREG32_SOC15(
345                 VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
346                 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr));
347         WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0);
348         WREG32_SOC15(
349                 VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0,
350                 AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)));
351 }
352
353 /**
354  * vcn_v4_0_mc_resume_dpg_mode - memory controller programming for dpg mode
355  *
356  * @adev: amdgpu_device pointer
357  * @inst_idx: instance number index
358  * @indirect: indirectly write sram
359  *
360  * Let the VCN memory controller know it's offsets with dpg mode
361  */
362 static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
363 {
364         uint32_t offset, size;
365         const struct common_firmware_header *hdr;
366
367         hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
368         size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
369
370         /* cache window 0: fw */
371         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
372                 if (!indirect) {
373                         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
374                                 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
375                                 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
376                                         inst_idx].tmr_mc_addr_lo), 0, indirect);
377                         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
378                                 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
379                                 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN +
380                                         inst_idx].tmr_mc_addr_hi), 0, indirect);
381                         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
382                                 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
383                 } else {
384                         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
385                                 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
386                         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
387                                 VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
388                         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
389                                 VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
390                 }
391                 offset = 0;
392         } else {
393                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
394                         VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
395                         lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
396                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
397                         VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
398                         upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
399                 offset = size;
400                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
401                         VCN, 0, regUVD_VCPU_CACHE_OFFSET0),
402                         AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
403         }
404
405         if (!indirect)
406                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
407                         VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
408         else
409                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
410                         VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
411
412         /* cache window 1: stack */
413         if (!indirect) {
414                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
415                         VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
416                         lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
417                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
418                         VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
419                         upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
420                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
421                         VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
422         } else {
423                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
424                         VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
425                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
426                         VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
427                 WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
428                         VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
429         }
430         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
431                         VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
432
433         /* cache window 2: context */
434         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
435                         VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
436                         lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
437                                 AMDGPU_VCN_STACK_SIZE), 0, indirect);
438         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
439                         VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
440                         upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset +
441                                 AMDGPU_VCN_STACK_SIZE), 0, indirect);
442         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
443                         VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
444         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
445                         VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
446
447         /* non-cache window */
448         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
449                         VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
450                         lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
451         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
452                         VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
453                         upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
454         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
455                         VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
456         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
457                         VCN, 0, regUVD_VCPU_NONCACHE_SIZE0),
458                         AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect);
459
460         /* VCN global tiling registers */
461         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
462                 VCN, 0, regUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
463         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
464                 VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
465 }
466
467 /**
468  * vcn_v4_0_3_disable_clock_gating - disable VCN clock gating
469  *
470  * @adev: amdgpu_device pointer
471  * @inst_idx: instance number
472  *
473  * Disable clock gating for VCN block
474  */
475 static void vcn_v4_0_3_disable_clock_gating(struct amdgpu_device *adev, int inst_idx)
476 {
477         uint32_t data;
478         int vcn_inst;
479
480         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
481                 return;
482
483         vcn_inst = GET_INST(VCN, inst_idx);
484
485         /* VCN disable CGC */
486         data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
487         data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
488         data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
489         data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
490         WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
491
492         data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE);
493         data &= ~(UVD_CGC_GATE__SYS_MASK
494                 | UVD_CGC_GATE__MPEG2_MASK
495                 | UVD_CGC_GATE__REGS_MASK
496                 | UVD_CGC_GATE__RBC_MASK
497                 | UVD_CGC_GATE__LMI_MC_MASK
498                 | UVD_CGC_GATE__LMI_UMC_MASK
499                 | UVD_CGC_GATE__MPC_MASK
500                 | UVD_CGC_GATE__LBSI_MASK
501                 | UVD_CGC_GATE__LRBBM_MASK
502                 | UVD_CGC_GATE__WCB_MASK
503                 | UVD_CGC_GATE__VCPU_MASK
504                 | UVD_CGC_GATE__MMSCH_MASK);
505
506         WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_GATE, data);
507         SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF);
508
509         data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
510         data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK
511                 | UVD_CGC_CTRL__MPEG2_MODE_MASK
512                 | UVD_CGC_CTRL__REGS_MODE_MASK
513                 | UVD_CGC_CTRL__RBC_MODE_MASK
514                 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
515                 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
516                 | UVD_CGC_CTRL__MPC_MODE_MASK
517                 | UVD_CGC_CTRL__LBSI_MODE_MASK
518                 | UVD_CGC_CTRL__LRBBM_MODE_MASK
519                 | UVD_CGC_CTRL__WCB_MODE_MASK
520                 | UVD_CGC_CTRL__VCPU_MODE_MASK
521                 | UVD_CGC_CTRL__MMSCH_MODE_MASK);
522         WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
523
524         data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE);
525         data |= (UVD_SUVD_CGC_GATE__SRE_MASK
526                 | UVD_SUVD_CGC_GATE__SIT_MASK
527                 | UVD_SUVD_CGC_GATE__SMP_MASK
528                 | UVD_SUVD_CGC_GATE__SCM_MASK
529                 | UVD_SUVD_CGC_GATE__SDB_MASK
530                 | UVD_SUVD_CGC_GATE__SRE_H264_MASK
531                 | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
532                 | UVD_SUVD_CGC_GATE__SIT_H264_MASK
533                 | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
534                 | UVD_SUVD_CGC_GATE__SCM_H264_MASK
535                 | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
536                 | UVD_SUVD_CGC_GATE__SDB_H264_MASK
537                 | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
538                 | UVD_SUVD_CGC_GATE__ENT_MASK
539                 | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
540                 | UVD_SUVD_CGC_GATE__SITE_MASK
541                 | UVD_SUVD_CGC_GATE__SRE_VP9_MASK
542                 | UVD_SUVD_CGC_GATE__SCM_VP9_MASK
543                 | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
544                 | UVD_SUVD_CGC_GATE__SDB_VP9_MASK
545                 | UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
546         WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_GATE, data);
547
548         data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
549         data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
550                 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
551                 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
552                 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
553                 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
554                 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
555                 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
556                 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
557         WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
558 }
559
560 /**
561  * vcn_v4_0_3_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode
562  *
563  * @adev: amdgpu_device pointer
564  * @sram_sel: sram select
565  * @inst_idx: instance number index
566  * @indirect: indirectly write sram
567  *
568  * Disable clock gating for VCN block with dpg mode
569  */
570 static void vcn_v4_0_3_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel,
571                                 int inst_idx, uint8_t indirect)
572 {
573         uint32_t reg_data = 0;
574
575         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
576                 return;
577
578         /* enable sw clock gating control */
579         reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
580         reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
581         reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
582         reg_data &= ~(UVD_CGC_CTRL__SYS_MODE_MASK |
583                  UVD_CGC_CTRL__MPEG2_MODE_MASK |
584                  UVD_CGC_CTRL__REGS_MODE_MASK |
585                  UVD_CGC_CTRL__RBC_MODE_MASK |
586                  UVD_CGC_CTRL__LMI_MC_MODE_MASK |
587                  UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
588                  UVD_CGC_CTRL__IDCT_MODE_MASK |
589                  UVD_CGC_CTRL__MPRD_MODE_MASK |
590                  UVD_CGC_CTRL__MPC_MODE_MASK |
591                  UVD_CGC_CTRL__LBSI_MODE_MASK |
592                  UVD_CGC_CTRL__LRBBM_MODE_MASK |
593                  UVD_CGC_CTRL__WCB_MODE_MASK |
594                  UVD_CGC_CTRL__VCPU_MODE_MASK);
595         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
596                 VCN, 0, regUVD_CGC_CTRL), reg_data, sram_sel, indirect);
597
598         /* turn off clock gating */
599         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
600                 VCN, 0, regUVD_CGC_GATE), 0, sram_sel, indirect);
601
602         /* turn on SUVD clock gating */
603         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
604                 VCN, 0, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
605
606         /* turn on sw mode in UVD_SUVD_CGC_CTRL */
607         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
608                 VCN, 0, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
609 }
610
611 /**
612  * vcn_v4_0_enable_clock_gating - enable VCN clock gating
613  *
614  * @adev: amdgpu_device pointer
615  * @inst_idx: instance number
616  *
617  * Enable clock gating for VCN block
618  */
619 static void vcn_v4_0_3_enable_clock_gating(struct amdgpu_device *adev, int inst_idx)
620 {
621         uint32_t data;
622         int vcn_inst;
623
624         if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
625                 return;
626
627         vcn_inst = GET_INST(VCN, inst_idx);
628
629         /* enable VCN CGC */
630         data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
631         data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
632         data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
633         data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
634         WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
635
636         data = RREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL);
637         data |= (UVD_CGC_CTRL__SYS_MODE_MASK
638                 | UVD_CGC_CTRL__MPEG2_MODE_MASK
639                 | UVD_CGC_CTRL__REGS_MODE_MASK
640                 | UVD_CGC_CTRL__RBC_MODE_MASK
641                 | UVD_CGC_CTRL__LMI_MC_MODE_MASK
642                 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK
643                 | UVD_CGC_CTRL__MPC_MODE_MASK
644                 | UVD_CGC_CTRL__LBSI_MODE_MASK
645                 | UVD_CGC_CTRL__LRBBM_MODE_MASK
646                 | UVD_CGC_CTRL__WCB_MODE_MASK
647                 | UVD_CGC_CTRL__VCPU_MODE_MASK);
648         WREG32_SOC15(VCN, vcn_inst, regUVD_CGC_CTRL, data);
649
650         data = RREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL);
651         data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
652                 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
653                 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
654                 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
655                 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
656                 | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
657                 | UVD_SUVD_CGC_CTRL__IME_MODE_MASK
658                 | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
659         WREG32_SOC15(VCN, vcn_inst, regUVD_SUVD_CGC_CTRL, data);
660 }
661
662 /**
663  * vcn_v4_0_3_start_dpg_mode - VCN start with dpg mode
664  *
665  * @adev: amdgpu_device pointer
666  * @inst_idx: instance number index
667  * @indirect: indirectly write sram
668  *
669  * Start VCN block with dpg mode
670  */
671 static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
672 {
673         volatile struct amdgpu_vcn4_fw_shared *fw_shared =
674                                                 adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
675         struct amdgpu_ring *ring;
676         int vcn_inst;
677         uint32_t tmp;
678
679         vcn_inst = GET_INST(VCN, inst_idx);
680         /* disable register anti-hang mechanism */
681         WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1,
682                  ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
683         /* enable dynamic power gating mode */
684         tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS);
685         tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
686         tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
687         WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp);
688
689         if (indirect) {
690                 DRM_DEV_DEBUG(adev->dev, "VCN %d start: on AID %d",
691                         inst_idx, adev->vcn.inst[inst_idx].aid_id);
692                 adev->vcn.inst[inst_idx].dpg_sram_curr_addr =
693                                 (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
694                 /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */
695                 WREG32_SOC15_DPG_MODE(inst_idx, 0xDEADBEEF,
696                         adev->vcn.inst[inst_idx].aid_id, 0, true);
697         }
698
699         /* enable clock gating */
700         vcn_v4_0_3_disable_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
701
702         /* enable VCPU clock */
703         tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
704         tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
705         tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
706
707         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
708                 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
709
710         /* disable master interrupt */
711         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
712                 VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect);
713
714         /* setup regUVD_LMI_CTRL */
715         tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
716                 UVD_LMI_CTRL__REQ_MODE_MASK |
717                 UVD_LMI_CTRL__CRC_RESET_MASK |
718                 UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
719                 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
720                 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
721                 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
722                 0x00100000L);
723         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
724                 VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect);
725
726         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
727                 VCN, 0, regUVD_MPC_CNTL),
728                 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
729
730         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
731                 VCN, 0, regUVD_MPC_SET_MUXA0),
732                 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
733                  (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
734                  (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
735                  (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
736
737         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
738                 VCN, 0, regUVD_MPC_SET_MUXB0),
739                  ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
740                  (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
741                  (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
742                  (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
743
744         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
745                 VCN, 0, regUVD_MPC_SET_MUX),
746                 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
747                  (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
748                  (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
749
750         vcn_v4_0_3_mc_resume_dpg_mode(adev, inst_idx, indirect);
751
752         tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
753         tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
754         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
755                 VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect);
756
757         /* enable LMI MC and UMC channels */
758         tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT;
759         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
760                 VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect);
761
762         /* enable master interrupt */
763         WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
764                 VCN, 0, regUVD_MASTINT_EN),
765                 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
766
767         if (indirect)
768                 psp_update_vcn_sram(adev, 0, adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
769                         (uint32_t)((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
770                                 (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr));
771
772         ring = &adev->vcn.inst[inst_idx].ring_enc[0];
773
774         /* program the RB_BASE for ring buffer */
775         WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
776                      lower_32_bits(ring->gpu_addr));
777         WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
778                      upper_32_bits(ring->gpu_addr));
779
780         WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
781                      ring->ring_size / sizeof(uint32_t));
782
783         /* resetting ring, fw should not check RB ring */
784         tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
785         tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
786         WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
787         fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
788
789         /* Initialize the ring buffer's read and write pointers */
790         WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
791         WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
792         ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
793
794         tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
795         tmp |= VCN_RB_ENABLE__RB_EN_MASK;
796         WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
797         fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF);
798
799         /*resetting done, fw can check RB ring */
800         fw_shared->sq.queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
801
802         return 0;
803 }
804
805 /**
806  * vcn_v4_0_3_start - VCN start
807  *
808  * @adev: amdgpu_device pointer
809  *
810  * Start VCN block
811  */
812 static int vcn_v4_0_3_start(struct amdgpu_device *adev)
813 {
814         volatile struct amdgpu_vcn4_fw_shared *fw_shared;
815         struct amdgpu_ring *ring;
816         int i, j, k, r, vcn_inst;
817         uint32_t tmp;
818
819         if (adev->pm.dpm_enabled)
820                 amdgpu_dpm_enable_uvd(adev, true);
821
822         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
823                 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
824                         r = vcn_v4_0_3_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
825                         continue;
826                 }
827
828                 vcn_inst = GET_INST(VCN, i);
829                 /* set VCN status busy */
830                 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) |
831                       UVD_STATUS__UVD_BUSY;
832                 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp);
833
834                 /*SW clock gating */
835                 vcn_v4_0_3_disable_clock_gating(adev, i);
836
837                 /* enable VCPU clock */
838                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
839                          UVD_VCPU_CNTL__CLK_EN_MASK,
840                          ~UVD_VCPU_CNTL__CLK_EN_MASK);
841
842                 /* disable master interrupt */
843                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0,
844                          ~UVD_MASTINT_EN__VCPU_EN_MASK);
845
846                 /* enable LMI MC and UMC channels */
847                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0,
848                          ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
849
850                 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
851                 tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
852                 tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
853                 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
854
855                 /* setup regUVD_LMI_CTRL */
856                 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL);
857                 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL,
858                              tmp | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
859                                      UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
860                                      UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
861                                      UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
862
863                 /* setup regUVD_MPC_CNTL */
864                 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL);
865                 tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
866                 tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
867                 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_CNTL, tmp);
868
869                 /* setup UVD_MPC_SET_MUXA0 */
870                 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXA0,
871                              ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
872                               (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
873                               (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
874                               (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
875
876                 /* setup UVD_MPC_SET_MUXB0 */
877                 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUXB0,
878                              ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
879                               (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
880                               (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
881                               (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
882
883                 /* setup UVD_MPC_SET_MUX */
884                 WREG32_SOC15(VCN, vcn_inst, regUVD_MPC_SET_MUX,
885                              ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
886                               (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
887                               (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
888
889                 vcn_v4_0_3_mc_resume(adev, i);
890
891                 /* VCN global tiling registers */
892                 WREG32_SOC15(VCN, vcn_inst, regUVD_GFX8_ADDR_CONFIG,
893                              adev->gfx.config.gb_addr_config);
894                 WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG,
895                              adev->gfx.config.gb_addr_config);
896
897                 /* unblock VCPU register access */
898                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0,
899                          ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
900
901                 /* release VCPU reset to boot */
902                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
903                          ~UVD_VCPU_CNTL__BLK_RST_MASK);
904
905                 for (j = 0; j < 10; ++j) {
906                         uint32_t status;
907
908                         for (k = 0; k < 100; ++k) {
909                                 status = RREG32_SOC15(VCN, vcn_inst,
910                                                       regUVD_STATUS);
911                                 if (status & 2)
912                                         break;
913                                 mdelay(10);
914                         }
915                         r = 0;
916                         if (status & 2)
917                                 break;
918
919                         DRM_DEV_ERROR(adev->dev,
920                                 "VCN decode not responding, trying to reset the VCPU!!!\n");
921                         WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
922                                                   regUVD_VCPU_CNTL),
923                                  UVD_VCPU_CNTL__BLK_RST_MASK,
924                                  ~UVD_VCPU_CNTL__BLK_RST_MASK);
925                         mdelay(10);
926                         WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst,
927                                                   regUVD_VCPU_CNTL),
928                                  0, ~UVD_VCPU_CNTL__BLK_RST_MASK);
929
930                         mdelay(10);
931                         r = -1;
932                 }
933
934                 if (r) {
935                         DRM_DEV_ERROR(adev->dev, "VCN decode not responding, giving up!!!\n");
936                         return r;
937                 }
938
939                 /* enable master interrupt */
940                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN),
941                          UVD_MASTINT_EN__VCPU_EN_MASK,
942                          ~UVD_MASTINT_EN__VCPU_EN_MASK);
943
944                 /* clear the busy bit of VCN_STATUS */
945                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0,
946                          ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
947
948                 ring = &adev->vcn.inst[i].ring_enc[0];
949                 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
950
951                 /* program the RB_BASE for ring buffer */
952                 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO,
953                              lower_32_bits(ring->gpu_addr));
954                 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI,
955                              upper_32_bits(ring->gpu_addr));
956
957                 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE,
958                              ring->ring_size / sizeof(uint32_t));
959
960                 /* resetting ring, fw should not check RB ring */
961                 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
962                 tmp &= ~(VCN_RB_ENABLE__RB_EN_MASK);
963                 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
964
965                 /* Initialize the ring buffer's read and write pointers */
966                 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0);
967                 WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0);
968
969                 tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE);
970                 tmp |= VCN_RB_ENABLE__RB_EN_MASK;
971                 WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp);
972
973                 ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
974                 fw_shared->sq.queue_mode &=
975                         cpu_to_le32(~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF));
976
977         }
978         return 0;
979 }
980
981 /**
982  * vcn_v4_0_3_stop_dpg_mode - VCN stop with dpg mode
983  *
984  * @adev: amdgpu_device pointer
985  * @inst_idx: instance number index
986  *
987  * Stop VCN block with dpg mode
988  */
989 static int vcn_v4_0_3_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
990 {
991         uint32_t tmp;
992         int vcn_inst;
993
994         vcn_inst = GET_INST(VCN, inst_idx);
995
996         /* Wait for power status to be 1 */
997         SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
998                            UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
999
1000         /* wait for read ptr to be equal to write ptr */
1001         tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR);
1002         SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1003
1004         SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1,
1005                            UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1006
1007         /* disable dynamic power gating mode */
1008         WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0,
1009                  ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1010         return 0;
1011 }
1012
1013 /**
1014  * vcn_v4_0_3_stop - VCN stop
1015  *
1016  * @adev: amdgpu_device pointer
1017  *
1018  * Stop VCN block
1019  */
1020 static int vcn_v4_0_3_stop(struct amdgpu_device *adev)
1021 {
1022         volatile struct amdgpu_vcn4_fw_shared *fw_shared;
1023         int i, r = 0, vcn_inst;
1024         uint32_t tmp;
1025
1026         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1027                 vcn_inst = GET_INST(VCN, i);
1028
1029                 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1030                 fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
1031
1032                 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1033                         vcn_v4_0_3_stop_dpg_mode(adev, i);
1034                         continue;
1035                 }
1036
1037                 /* wait for vcn idle */
1038                 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS,
1039                                        UVD_STATUS__IDLE, 0x7);
1040                 if (r)
1041                         goto Done;
1042
1043                 tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1044                         UVD_LMI_STATUS__READ_CLEAN_MASK |
1045                         UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1046                         UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1047                 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1048                                        tmp);
1049                 if (r)
1050                         goto Done;
1051
1052                 /* stall UMC channel */
1053                 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2);
1054                 tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1055                 WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp);
1056                 tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
1057                         UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1058                 r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp,
1059                                        tmp);
1060                 if (r)
1061                         goto Done;
1062
1063                 /* Unblock VCPU Register access */
1064                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL),
1065                          UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1066                          ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1067
1068                 /* release VCPU reset to boot */
1069                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL),
1070                          UVD_VCPU_CNTL__BLK_RST_MASK,
1071                          ~UVD_VCPU_CNTL__BLK_RST_MASK);
1072
1073                 /* disable VCPU clock */
1074                 WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0,
1075                          ~(UVD_VCPU_CNTL__CLK_EN_MASK));
1076
1077                 /* reset LMI UMC/LMI/VCPU */
1078                 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1079                 tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK;
1080                 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1081
1082                 tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET);
1083                 tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK;
1084                 WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp);
1085
1086                 /* clear VCN status */
1087                 WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0);
1088
1089                 /* apply HW clock gating */
1090                 vcn_v4_0_3_enable_clock_gating(adev, i);
1091         }
1092 Done:
1093         if (adev->pm.dpm_enabled)
1094                 amdgpu_dpm_enable_uvd(adev, false);
1095
1096         return 0;
1097 }
1098
1099 /**
1100  * vcn_v4_0_3_pause_dpg_mode - VCN pause with dpg mode
1101  *
1102  * @adev: amdgpu_device pointer
1103  * @inst_idx: instance number index
1104  * @new_state: pause state
1105  *
1106  * Pause dpg mode for VCN block
1107  */
1108 static int vcn_v4_0_3_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx,
1109                                 struct dpg_pause_state *new_state)
1110 {
1111
1112         return 0;
1113 }
1114
1115 /**
1116  * vcn_v4_0_3_unified_ring_get_rptr - get unified read pointer
1117  *
1118  * @ring: amdgpu_ring pointer
1119  *
1120  * Returns the current hardware unified read pointer
1121  */
1122 static uint64_t vcn_v4_0_3_unified_ring_get_rptr(struct amdgpu_ring *ring)
1123 {
1124         struct amdgpu_device *adev = ring->adev;
1125
1126         if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1127                 DRM_ERROR("wrong ring id is identified in %s", __func__);
1128
1129         return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR);
1130 }
1131
1132 /**
1133  * vcn_v4_0_3_unified_ring_get_wptr - get unified write pointer
1134  *
1135  * @ring: amdgpu_ring pointer
1136  *
1137  * Returns the current hardware unified write pointer
1138  */
1139 static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
1140 {
1141         struct amdgpu_device *adev = ring->adev;
1142
1143         if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1144                 DRM_ERROR("wrong ring id is identified in %s", __func__);
1145
1146         if (ring->use_doorbell)
1147                 return *ring->wptr_cpu_addr;
1148         else
1149                 return RREG32_SOC15(VCN, GET_INST(VCN, ring->me),
1150                                     regUVD_RB_WPTR);
1151 }
1152
1153 /**
1154  * vcn_v4_0_3_unified_ring_set_wptr - set enc write pointer
1155  *
1156  * @ring: amdgpu_ring pointer
1157  *
1158  * Commits the enc write pointer to the hardware
1159  */
1160 static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring)
1161 {
1162         struct amdgpu_device *adev = ring->adev;
1163
1164         if (ring != &adev->vcn.inst[ring->me].ring_enc[0])
1165                 DRM_ERROR("wrong ring id is identified in %s", __func__);
1166
1167         if (ring->use_doorbell) {
1168                 *ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1169                 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1170         } else {
1171                 WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR,
1172                              lower_32_bits(ring->wptr));
1173         }
1174 }
1175
1176 static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = {
1177         .type = AMDGPU_RING_TYPE_VCN_ENC,
1178         .align_mask = 0x3f,
1179         .nop = VCN_ENC_CMD_NO_OP,
1180         .get_rptr = vcn_v4_0_3_unified_ring_get_rptr,
1181         .get_wptr = vcn_v4_0_3_unified_ring_get_wptr,
1182         .set_wptr = vcn_v4_0_3_unified_ring_set_wptr,
1183         .emit_frame_size =
1184                 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1185                 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1186                 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1187                 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1188                 1, /* vcn_v2_0_enc_ring_insert_end */
1189         .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1190         .emit_ib = vcn_v2_0_enc_ring_emit_ib,
1191         .emit_fence = vcn_v2_0_enc_ring_emit_fence,
1192         .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1193         .test_ring = amdgpu_vcn_enc_ring_test_ring,
1194         .test_ib = amdgpu_vcn_unified_ring_test_ib,
1195         .insert_nop = amdgpu_ring_insert_nop,
1196         .insert_end = vcn_v2_0_enc_ring_insert_end,
1197         .pad_ib = amdgpu_ring_generic_pad_ib,
1198         .begin_use = amdgpu_vcn_ring_begin_use,
1199         .end_use = amdgpu_vcn_ring_end_use,
1200         .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1201         .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1202         .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1203 };
1204
1205 /**
1206  * vcn_v4_0_3_set_unified_ring_funcs - set unified ring functions
1207  *
1208  * @adev: amdgpu_device pointer
1209  *
1210  * Set unified ring functions
1211  */
1212 static void vcn_v4_0_3_set_unified_ring_funcs(struct amdgpu_device *adev)
1213 {
1214         int i, vcn_inst;
1215
1216         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1217                 adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_3_unified_ring_vm_funcs;
1218                 adev->vcn.inst[i].ring_enc[0].me = i;
1219                 vcn_inst = GET_INST(VCN, i);
1220                 adev->vcn.inst[i].aid_id =
1221                         vcn_inst / adev->vcn.num_inst_per_aid;
1222         }
1223         DRM_DEV_INFO(adev->dev, "VCN decode is enabled in VM mode\n");
1224 }
1225
1226 /**
1227  * vcn_v4_0_3_is_idle - check VCN block is idle
1228  *
1229  * @handle: amdgpu_device pointer
1230  *
1231  * Check whether VCN block is idle
1232  */
1233 static bool vcn_v4_0_3_is_idle(void *handle)
1234 {
1235         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1236         int i, ret = 1;
1237
1238         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1239                 ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) ==
1240                         UVD_STATUS__IDLE);
1241         }
1242
1243         return ret;
1244 }
1245
1246 /**
1247  * vcn_v4_0_3_wait_for_idle - wait for VCN block idle
1248  *
1249  * @handle: amdgpu_device pointer
1250  *
1251  * Wait for VCN block idle
1252  */
1253 static int vcn_v4_0_3_wait_for_idle(void *handle)
1254 {
1255         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1256         int i, ret = 0;
1257
1258         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1259                 ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS,
1260                                          UVD_STATUS__IDLE, UVD_STATUS__IDLE);
1261                 if (ret)
1262                         return ret;
1263         }
1264
1265         return ret;
1266 }
1267
1268 /* vcn_v4_0_3_set_clockgating_state - set VCN block clockgating state
1269  *
1270  * @handle: amdgpu_device pointer
1271  * @state: clock gating state
1272  *
1273  * Set VCN block clockgating state
1274  */
1275 static int vcn_v4_0_3_set_clockgating_state(void *handle,
1276                                           enum amd_clockgating_state state)
1277 {
1278         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1279         bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
1280         int i;
1281
1282         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1283                 if (enable) {
1284                         if (RREG32_SOC15(VCN, GET_INST(VCN, i),
1285                                          regUVD_STATUS) != UVD_STATUS__IDLE)
1286                                 return -EBUSY;
1287                         vcn_v4_0_3_enable_clock_gating(adev, i);
1288                 } else {
1289                         vcn_v4_0_3_disable_clock_gating(adev, i);
1290                 }
1291         }
1292         return 0;
1293 }
1294
1295 /**
1296  * vcn_v4_0_3_set_powergating_state - set VCN block powergating state
1297  *
1298  * @handle: amdgpu_device pointer
1299  * @state: power gating state
1300  *
1301  * Set VCN block powergating state
1302  */
1303 static int vcn_v4_0_3_set_powergating_state(void *handle,
1304                                           enum amd_powergating_state state)
1305 {
1306         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1307         int ret;
1308
1309         if (state == adev->vcn.cur_state)
1310                 return 0;
1311
1312         if (state == AMD_PG_STATE_GATE)
1313                 ret = vcn_v4_0_3_stop(adev);
1314         else
1315                 ret = vcn_v4_0_3_start(adev);
1316
1317         if (!ret)
1318                 adev->vcn.cur_state = state;
1319
1320         return ret;
1321 }
1322
1323 /**
1324  * vcn_v4_0_3_set_interrupt_state - set VCN block interrupt state
1325  *
1326  * @adev: amdgpu_device pointer
1327  * @source: interrupt sources
1328  * @type: interrupt types
1329  * @state: interrupt states
1330  *
1331  * Set VCN block interrupt state
1332  */
1333 static int vcn_v4_0_3_set_interrupt_state(struct amdgpu_device *adev,
1334                                         struct amdgpu_irq_src *source,
1335                                         unsigned int type,
1336                                         enum amdgpu_interrupt_state state)
1337 {
1338         return 0;
1339 }
1340
1341 /**
1342  * vcn_v4_0_3_process_interrupt - process VCN block interrupt
1343  *
1344  * @adev: amdgpu_device pointer
1345  * @source: interrupt sources
1346  * @entry: interrupt entry from clients and sources
1347  *
1348  * Process VCN block interrupt
1349  */
1350 static int vcn_v4_0_3_process_interrupt(struct amdgpu_device *adev,
1351                                       struct amdgpu_irq_src *source,
1352                                       struct amdgpu_iv_entry *entry)
1353 {
1354         uint32_t i, inst;
1355
1356         i = node_id_to_phys_map[entry->node_id];
1357
1358         DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n");
1359
1360         for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst)
1361                 if (adev->vcn.inst[inst].aid_id == i)
1362                         break;
1363
1364         if (inst >= adev->vcn.num_vcn_inst) {
1365                 dev_WARN_ONCE(adev->dev, 1,
1366                               "Interrupt received for unknown VCN instance %d",
1367                               entry->node_id);
1368                 return 0;
1369         }
1370
1371         switch (entry->src_id) {
1372         case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1373                 amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]);
1374                 break;
1375         default:
1376                 DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
1377                           entry->src_id, entry->src_data[0]);
1378                 break;
1379         }
1380
1381         return 0;
1382 }
1383
1384 static const struct amdgpu_irq_src_funcs vcn_v4_0_3_irq_funcs = {
1385         .set = vcn_v4_0_3_set_interrupt_state,
1386         .process = vcn_v4_0_3_process_interrupt,
1387 };
1388
1389 /**
1390  * vcn_v4_0_3_set_irq_funcs - set VCN block interrupt irq functions
1391  *
1392  * @adev: amdgpu_device pointer
1393  *
1394  * Set VCN block interrupt irq functions
1395  */
1396 static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
1397 {
1398         int i;
1399
1400         for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1401                 adev->vcn.inst->irq.num_types++;
1402         }
1403         adev->vcn.inst->irq.funcs = &vcn_v4_0_3_irq_funcs;
1404 }
1405
1406 static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
1407         .name = "vcn_v4_0_3",
1408         .early_init = vcn_v4_0_3_early_init,
1409         .late_init = NULL,
1410         .sw_init = vcn_v4_0_3_sw_init,
1411         .sw_fini = vcn_v4_0_3_sw_fini,
1412         .hw_init = vcn_v4_0_3_hw_init,
1413         .hw_fini = vcn_v4_0_3_hw_fini,
1414         .suspend = vcn_v4_0_3_suspend,
1415         .resume = vcn_v4_0_3_resume,
1416         .is_idle = vcn_v4_0_3_is_idle,
1417         .wait_for_idle = vcn_v4_0_3_wait_for_idle,
1418         .check_soft_reset = NULL,
1419         .pre_soft_reset = NULL,
1420         .soft_reset = NULL,
1421         .post_soft_reset = NULL,
1422         .set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
1423         .set_powergating_state = vcn_v4_0_3_set_powergating_state,
1424 };
1425
1426 const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
1427         .type = AMD_IP_BLOCK_TYPE_VCN,
1428         .major = 4,
1429         .minor = 0,
1430         .rev = 3,
1431         .funcs = &vcn_v4_0_3_ip_funcs,
1432 };