2 * Copyright 2016 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
27 #include <linux/firmware.h>
30 #include "amdgpu_vce.h"
33 #include "soc15_common.h"
34 #include "mmsch_v1_0.h"
36 #include "vce/vce_4_0_offset.h"
37 #include "vce/vce_4_0_default.h"
38 #include "vce/vce_4_0_sh_mask.h"
39 #include "mmhub/mmhub_1_0_offset.h"
40 #include "mmhub/mmhub_1_0_sh_mask.h"
42 #include "ivsrcid/vce/irqsrcs_vce_4_0.h"
44 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
46 #define VCE_V4_0_FW_SIZE (384 * 1024)
47 #define VCE_V4_0_STACK_SIZE (64 * 1024)
48 #define VCE_V4_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024))
50 static void vce_v4_0_mc_resume(struct amdgpu_device *adev);
51 static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev);
52 static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev);
55 * vce_v4_0_ring_get_rptr - get read pointer
57 * @ring: amdgpu_ring pointer
59 * Returns the current hardware read pointer
61 static uint64_t vce_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
63 struct amdgpu_device *adev = ring->adev;
66 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR));
67 else if (ring->me == 1)
68 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2));
70 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3));
74 * vce_v4_0_ring_get_wptr - get write pointer
76 * @ring: amdgpu_ring pointer
78 * Returns the current hardware write pointer
80 static uint64_t vce_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
82 struct amdgpu_device *adev = ring->adev;
84 if (ring->use_doorbell)
85 return adev->wb.wb[ring->wptr_offs];
88 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR));
89 else if (ring->me == 1)
90 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2));
92 return RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3));
96 * vce_v4_0_ring_set_wptr - set write pointer
98 * @ring: amdgpu_ring pointer
100 * Commits the write pointer to the hardware
102 static void vce_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
104 struct amdgpu_device *adev = ring->adev;
106 if (ring->use_doorbell) {
107 /* XXX check if swapping is necessary on BE */
108 adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
109 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
114 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR),
115 lower_32_bits(ring->wptr));
116 else if (ring->me == 1)
117 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2),
118 lower_32_bits(ring->wptr));
120 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3),
121 lower_32_bits(ring->wptr));
124 static int vce_v4_0_firmware_loaded(struct amdgpu_device *adev)
128 for (i = 0; i < 10; ++i) {
129 for (j = 0; j < 100; ++j) {
131 RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS));
133 if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
138 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
139 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
140 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
141 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
143 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
144 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
152 static int vce_v4_0_mmsch_start(struct amdgpu_device *adev,
153 struct amdgpu_mm_table *table)
155 uint32_t data = 0, loop;
156 uint64_t addr = table->gpu_addr;
157 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr;
160 size = header->header_size + header->vce_table_size + header->uvd_table_size;
162 /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */
163 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO), lower_32_bits(addr));
164 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI), upper_32_bits(addr));
166 /* 2, update vmid of descriptor */
167 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID));
168 data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK;
169 data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */
170 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_VMID), data);
172 /* 3, notify mmsch about the size of this descriptor */
173 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE), size);
175 /* 4, set resp to zero */
176 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0);
178 WDOORBELL32(adev->vce.ring[0].doorbell_index, 0);
179 adev->wb.wb[adev->vce.ring[0].wptr_offs] = 0;
180 adev->vce.ring[0].wptr = 0;
181 adev->vce.ring[0].wptr_old = 0;
183 /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */
184 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001);
186 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
188 while ((data & 0x10000002) != 0x10000002) {
190 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP));
197 dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data);
204 static int vce_v4_0_sriov_start(struct amdgpu_device *adev)
206 struct amdgpu_ring *ring;
207 uint32_t offset, size;
208 uint32_t table_size = 0;
209 struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
210 struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
211 struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
212 struct mmsch_v1_0_cmd_end end = { { 0 } };
213 uint32_t *init_table = adev->virt.mm_table.cpu_addr;
214 struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table;
216 direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
217 direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
218 direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
219 end.cmd_header.command_type = MMSCH_COMMAND__END;
221 if (header->vce_table_offset == 0 && header->vce_table_size == 0) {
222 header->version = MMSCH_VERSION;
223 header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2;
225 if (header->uvd_table_offset == 0 && header->uvd_table_size == 0)
226 header->vce_table_offset = header->header_size;
228 header->vce_table_offset = header->uvd_table_size + header->uvd_table_offset;
230 init_table += header->vce_table_offset;
232 ring = &adev->vce.ring[0];
233 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO),
234 lower_32_bits(ring->gpu_addr));
235 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI),
236 upper_32_bits(ring->gpu_addr));
237 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE),
238 ring->ring_size / 4);
240 /* BEGING OF MC_RESUME */
241 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x398000);
242 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), ~0x1, 0);
243 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
244 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
245 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
247 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
248 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
249 uint32_t low = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
250 uint32_t hi = adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi;
251 uint64_t tmr_mc_addr = (uint64_t)(hi) << 32 | low;
253 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
254 mmVCE_LMI_VCPU_CACHE_40BIT_BAR0), tmr_mc_addr >> 8);
255 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
256 mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
257 (tmr_mc_addr >> 40) & 0xff);
258 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
260 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
261 mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
262 adev->vce.gpu_addr >> 8);
263 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
264 mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
265 (adev->vce.gpu_addr >> 40) & 0xff);
266 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0),
267 offset & ~0x0f000000);
270 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
271 mmVCE_LMI_VCPU_CACHE_40BIT_BAR1),
272 adev->vce.gpu_addr >> 8);
273 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
274 mmVCE_LMI_VCPU_CACHE_64BIT_BAR1),
275 (adev->vce.gpu_addr >> 40) & 0xff);
276 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
277 mmVCE_LMI_VCPU_CACHE_40BIT_BAR2),
278 adev->vce.gpu_addr >> 8);
279 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0,
280 mmVCE_LMI_VCPU_CACHE_64BIT_BAR2),
281 (adev->vce.gpu_addr >> 40) & 0xff);
283 size = VCE_V4_0_FW_SIZE;
284 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
286 offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
287 size = VCE_V4_0_STACK_SIZE;
288 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1),
289 (offset & ~0x0f000000) | (1 << 24));
290 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
293 size = VCE_V4_0_DATA_SIZE;
294 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2),
295 (offset & ~0x0f000000) | (2 << 24));
296 MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
298 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0);
299 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
300 VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
301 VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
303 /* end of MC_RESUME */
304 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
305 VCE_STATUS__JOB_BUSY_MASK, ~VCE_STATUS__JOB_BUSY_MASK);
306 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL),
307 ~0x200001, VCE_VCPU_CNTL__CLK_EN_MASK);
308 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
309 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, 0);
311 MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
312 VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK,
313 VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK);
315 /* clear BUSY flag */
316 MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS),
317 ~VCE_STATUS__JOB_BUSY_MASK, 0);
320 memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
321 table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
322 header->vce_table_size = table_size;
325 return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table);
329 * vce_v4_0_start - start VCE block
331 * @adev: amdgpu_device pointer
333 * Setup and start the VCE block
335 static int vce_v4_0_start(struct amdgpu_device *adev)
337 struct amdgpu_ring *ring;
340 ring = &adev->vce.ring[0];
342 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR), lower_32_bits(ring->wptr));
343 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR), lower_32_bits(ring->wptr));
344 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO), ring->gpu_addr);
345 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI), upper_32_bits(ring->gpu_addr));
346 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE), ring->ring_size / 4);
348 ring = &adev->vce.ring[1];
350 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR2), lower_32_bits(ring->wptr));
351 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR2), lower_32_bits(ring->wptr));
352 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO2), ring->gpu_addr);
353 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI2), upper_32_bits(ring->gpu_addr));
354 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE2), ring->ring_size / 4);
356 ring = &adev->vce.ring[2];
358 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_RPTR3), lower_32_bits(ring->wptr));
359 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_WPTR3), lower_32_bits(ring->wptr));
360 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_LO3), ring->gpu_addr);
361 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_BASE_HI3), upper_32_bits(ring->gpu_addr));
362 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_SIZE3), ring->ring_size / 4);
364 vce_v4_0_mc_resume(adev);
365 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), VCE_STATUS__JOB_BUSY_MASK,
366 ~VCE_STATUS__JOB_BUSY_MASK);
368 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 1, ~0x200001);
370 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET), 0,
371 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
374 r = vce_v4_0_firmware_loaded(adev);
376 /* clear BUSY flag */
377 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
380 DRM_ERROR("VCE not responding, giving up!!!\n");
387 static int vce_v4_0_stop(struct amdgpu_device *adev)
391 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001);
394 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SOFT_RESET),
395 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
396 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
398 /* clear VCE_STATUS */
399 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0);
401 /* Set Clock-Gating off */
402 /* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
403 vce_v4_0_set_vce_sw_clock_gating(adev, false);
409 static int vce_v4_0_early_init(void *handle)
411 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
413 if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */
414 adev->vce.num_rings = 1;
416 adev->vce.num_rings = 3;
418 vce_v4_0_set_ring_funcs(adev);
419 vce_v4_0_set_irq_funcs(adev);
424 static int vce_v4_0_sw_init(void *handle)
426 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
427 struct amdgpu_ring *ring;
432 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCE0, 167, &adev->vce.irq);
436 size = VCE_V4_0_STACK_SIZE + VCE_V4_0_DATA_SIZE;
437 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
438 size += VCE_V4_0_FW_SIZE;
440 r = amdgpu_vce_sw_init(adev, size);
444 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
445 const struct common_firmware_header *hdr;
446 unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
448 adev->vce.saved_bo = kvmalloc(size, GFP_KERNEL);
449 if (!adev->vce.saved_bo)
452 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
453 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].ucode_id = AMDGPU_UCODE_ID_VCE;
454 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].fw = adev->vce.fw;
455 adev->firmware.fw_size +=
456 ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
457 DRM_INFO("PSP loading VCE firmware\n");
459 r = amdgpu_vce_resume(adev);
464 for (i = 0; i < adev->vce.num_rings; i++) {
465 ring = &adev->vce.ring[i];
466 sprintf(ring->name, "vce%d", i);
467 if (amdgpu_sriov_vf(adev)) {
468 /* DOORBELL only works under SRIOV */
469 ring->use_doorbell = true;
471 /* currently only use the first encoding ring for sriov,
472 * so set unused location for other unused rings.
475 ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring0_1 * 2;
477 ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
479 r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
480 AMDGPU_RING_PRIO_DEFAULT);
486 r = amdgpu_vce_entity_init(adev);
490 r = amdgpu_virt_alloc_mm_table(adev);
497 static int vce_v4_0_sw_fini(void *handle)
500 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
503 amdgpu_virt_free_mm_table(adev);
505 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
506 kvfree(adev->vce.saved_bo);
507 adev->vce.saved_bo = NULL;
510 r = amdgpu_vce_suspend(adev);
514 return amdgpu_vce_sw_fini(adev);
517 static int vce_v4_0_hw_init(void *handle)
520 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
522 if (amdgpu_sriov_vf(adev))
523 r = vce_v4_0_sriov_start(adev);
525 r = vce_v4_0_start(adev);
529 for (i = 0; i < adev->vce.num_rings; i++) {
530 r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
535 DRM_INFO("VCE initialized successfully.\n");
540 static int vce_v4_0_hw_fini(void *handle)
542 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
544 if (!amdgpu_sriov_vf(adev)) {
545 /* vce_v4_0_wait_for_idle(handle); */
548 /* full access mode, so don't touch any VCE register */
549 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
555 static int vce_v4_0_suspend(void *handle)
557 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
560 if (adev->vce.vcpu_bo == NULL)
563 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
564 unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
565 void *ptr = adev->vce.cpu_addr;
567 memcpy_fromio(adev->vce.saved_bo, ptr, size);
570 r = vce_v4_0_hw_fini(adev);
574 return amdgpu_vce_suspend(adev);
577 static int vce_v4_0_resume(void *handle)
579 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
582 if (adev->vce.vcpu_bo == NULL)
585 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
586 unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
587 void *ptr = adev->vce.cpu_addr;
589 memcpy_toio(ptr, adev->vce.saved_bo, size);
591 r = amdgpu_vce_resume(adev);
596 return vce_v4_0_hw_init(adev);
599 static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
601 uint32_t offset, size;
602 uint64_t tmr_mc_addr;
604 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A), 0, ~(1 << 16));
605 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), 0x1FF000, ~0xFF9FF000);
606 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), 0x3F, ~0x3F);
607 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), 0x1FF);
609 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL), 0x00398000);
610 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CACHE_CTRL), 0x0, ~0x1);
611 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL), 0);
612 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_SWAP_CNTL1), 0);
613 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VM_CTRL), 0);
615 offset = AMDGPU_VCE_FIRMWARE_OFFSET;
617 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
618 tmr_mc_addr = (uint64_t)(adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_hi) << 32 |
619 adev->firmware.ucode[AMDGPU_UCODE_ID_VCE].tmr_mc_addr_lo;
620 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
622 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
623 (tmr_mc_addr >> 40) & 0xff);
624 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), 0);
626 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR0),
627 (adev->vce.gpu_addr >> 8));
628 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR0),
629 (adev->vce.gpu_addr >> 40) & 0xff);
630 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET0), offset & ~0x0f000000);
633 size = VCE_V4_0_FW_SIZE;
634 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE0), size);
636 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR1), (adev->vce.gpu_addr >> 8));
637 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR1), (adev->vce.gpu_addr >> 40) & 0xff);
638 offset = (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) ? offset + size : 0;
639 size = VCE_V4_0_STACK_SIZE;
640 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET1), (offset & ~0x0f000000) | (1 << 24));
641 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE1), size);
643 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_40BIT_BAR2), (adev->vce.gpu_addr >> 8));
644 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_VCPU_CACHE_64BIT_BAR2), (adev->vce.gpu_addr >> 40) & 0xff);
646 size = VCE_V4_0_DATA_SIZE;
647 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_OFFSET2), (offset & ~0x0f000000) | (2 << 24));
648 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CACHE_SIZE2), size);
650 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), 0x0, ~0x100);
651 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN),
652 VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK,
653 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
656 static int vce_v4_0_set_clockgating_state(void *handle,
657 enum amd_clockgating_state state)
659 /* needed for driver unload*/
664 static bool vce_v4_0_is_idle(void *handle)
666 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
669 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE0) ? 0 : SRBM_STATUS2__VCE0_BUSY_MASK;
670 mask |= (adev->vce.harvest_config & AMDGPU_VCE_HARVEST_VCE1) ? 0 : SRBM_STATUS2__VCE1_BUSY_MASK;
672 return !(RREG32(mmSRBM_STATUS2) & mask);
675 static int vce_v4_0_wait_for_idle(void *handle)
678 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
680 for (i = 0; i < adev->usec_timeout; i++)
681 if (vce_v4_0_is_idle(handle))
687 #define VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK 0x00000008L /* AUTO_BUSY */
688 #define VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK 0x00000010L /* RB0_BUSY */
689 #define VCE_STATUS_VCPU_REPORT_RB1_BUSY_MASK 0x00000020L /* RB1_BUSY */
690 #define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
691 VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
693 static bool vce_v4_0_check_soft_reset(void *handle)
695 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
696 u32 srbm_soft_reset = 0;
698 /* According to VCE team , we should use VCE_STATUS instead
699 * SRBM_STATUS.VCE_BUSY bit for busy status checking.
700 * GRBM_GFX_INDEX.INSTANCE_INDEX is used to specify which VCE
701 * instance's registers are accessed
702 * (0 for 1st instance, 10 for 2nd instance).
705 *|UENC|ACPI|AUTO ACTIVE|RB1 |RB0 |RB2 | |FW_LOADED|JOB |
706 *|----+----+-----------+----+----+----+----------+---------+----|
707 *|bit8|bit7| bit6 |bit5|bit4|bit3| bit2 | bit1 |bit0|
709 * VCE team suggest use bit 3--bit 6 for busy status check
711 mutex_lock(&adev->grbm_idx_mutex);
712 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
713 if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
714 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
715 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
717 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
718 if (RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
719 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
720 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
722 WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
723 mutex_unlock(&adev->grbm_idx_mutex);
725 if (srbm_soft_reset) {
726 adev->vce.srbm_soft_reset = srbm_soft_reset;
729 adev->vce.srbm_soft_reset = 0;
734 static int vce_v4_0_soft_reset(void *handle)
736 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
739 if (!adev->vce.srbm_soft_reset)
741 srbm_soft_reset = adev->vce.srbm_soft_reset;
743 if (srbm_soft_reset) {
746 tmp = RREG32(mmSRBM_SOFT_RESET);
747 tmp |= srbm_soft_reset;
748 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
749 WREG32(mmSRBM_SOFT_RESET, tmp);
750 tmp = RREG32(mmSRBM_SOFT_RESET);
754 tmp &= ~srbm_soft_reset;
755 WREG32(mmSRBM_SOFT_RESET, tmp);
756 tmp = RREG32(mmSRBM_SOFT_RESET);
758 /* Wait a little for things to settle down */
765 static int vce_v4_0_pre_soft_reset(void *handle)
767 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
769 if (!adev->vce.srbm_soft_reset)
774 return vce_v4_0_suspend(adev);
778 static int vce_v4_0_post_soft_reset(void *handle)
780 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
782 if (!adev->vce.srbm_soft_reset)
787 return vce_v4_0_resume(adev);
790 static void vce_v4_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
794 tmp = data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL));
796 data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
798 data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
801 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_RB_ARB_CTRL), data);
804 static void vce_v4_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
809 /* Set Override to disable Clock Gating */
810 vce_v4_0_override_vce_clock_gating(adev, true);
812 /* This function enables MGCG which is controlled by firmware.
813 With the clocks in the gated state the core is still
814 accessible but the firmware will throttle the clocks on the
818 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
821 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
823 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
826 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
828 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
831 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
833 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
835 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
837 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
838 data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
839 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
840 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
842 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
844 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B));
847 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_B), data);
849 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING));
851 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING), data);
853 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2));
855 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING_2), data);
857 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING));
859 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_REG_CLOCK_GATING), data);
861 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL));
862 data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
863 VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
864 VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
866 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_DMA_DCLK_CTRL), data);
868 vce_v4_0_override_vce_clock_gating(adev, false);
871 static void vce_v4_0_set_bypass_mode(struct amdgpu_device *adev, bool enable)
873 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
876 tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
878 tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
880 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
883 static int vce_v4_0_set_clockgating_state(void *handle,
884 enum amd_clockgating_state state)
886 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
887 bool enable = (state == AMD_CG_STATE_GATE);
890 if ((adev->asic_type == CHIP_POLARIS10) ||
891 (adev->asic_type == CHIP_TONGA) ||
892 (adev->asic_type == CHIP_FIJI))
893 vce_v4_0_set_bypass_mode(adev, enable);
895 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
898 mutex_lock(&adev->grbm_idx_mutex);
899 for (i = 0; i < 2; i++) {
900 /* Program VCE Instance 0 or 1 if not harvested */
901 if (adev->vce.harvest_config & (1 << i))
904 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
907 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
908 uint32_t data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A);
909 data &= ~(0xf | 0xff0);
910 data |= ((0x0 << 0) | (0x04 << 4));
911 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_CLOCK_GATING_A, data);
913 /* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
914 data = RREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING);
915 data &= ~(0xf | 0xff0);
916 data |= ((0x0 << 0) | (0x04 << 4));
917 WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_UENC_CLOCK_GATING, data);
920 vce_v4_0_set_vce_sw_clock_gating(adev, enable);
923 WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
924 mutex_unlock(&adev->grbm_idx_mutex);
930 static int vce_v4_0_set_powergating_state(void *handle,
931 enum amd_powergating_state state)
933 /* This doesn't actually powergate the VCE block.
934 * That's done in the dpm code via the SMC. This
935 * just re-inits the block as necessary. The actual
936 * gating still happens in the dpm code. We should
937 * revisit this when there is a cleaner line between
938 * the smc and the hw blocks
940 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
942 if (state == AMD_PG_STATE_GATE)
943 return vce_v4_0_stop(adev);
945 return vce_v4_0_start(adev);
948 static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
949 struct amdgpu_ib *ib, uint32_t flags)
951 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
953 amdgpu_ring_write(ring, VCE_CMD_IB_VM);
954 amdgpu_ring_write(ring, vmid);
955 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
956 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
957 amdgpu_ring_write(ring, ib->length_dw);
960 static void vce_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
961 u64 seq, unsigned flags)
963 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
965 amdgpu_ring_write(ring, VCE_CMD_FENCE);
966 amdgpu_ring_write(ring, addr);
967 amdgpu_ring_write(ring, upper_32_bits(addr));
968 amdgpu_ring_write(ring, seq);
969 amdgpu_ring_write(ring, VCE_CMD_TRAP);
972 static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
974 amdgpu_ring_write(ring, VCE_CMD_END);
977 static void vce_v4_0_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
978 uint32_t val, uint32_t mask)
980 amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
981 amdgpu_ring_write(ring, reg << 2);
982 amdgpu_ring_write(ring, mask);
983 amdgpu_ring_write(ring, val);
986 static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
987 unsigned int vmid, uint64_t pd_addr)
989 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
991 pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
993 /* wait for reg writes */
994 vce_v4_0_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
995 lower_32_bits(pd_addr), 0xffffffff);
998 static void vce_v4_0_emit_wreg(struct amdgpu_ring *ring,
999 uint32_t reg, uint32_t val)
1001 amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
1002 amdgpu_ring_write(ring, reg << 2);
1003 amdgpu_ring_write(ring, val);
1006 static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
1007 struct amdgpu_irq_src *source,
1009 enum amdgpu_interrupt_state state)
1013 if (!amdgpu_sriov_vf(adev)) {
1014 if (state == AMDGPU_IRQ_STATE_ENABLE)
1015 val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
1017 WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val,
1018 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
1023 static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
1024 struct amdgpu_irq_src *source,
1025 struct amdgpu_iv_entry *entry)
1027 DRM_DEBUG("IH: VCE\n");
1029 switch (entry->src_data[0]) {
1033 amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
1036 DRM_ERROR("Unhandled interrupt: %d %d\n",
1037 entry->src_id, entry->src_data[0]);
1044 const struct amd_ip_funcs vce_v4_0_ip_funcs = {
1046 .early_init = vce_v4_0_early_init,
1048 .sw_init = vce_v4_0_sw_init,
1049 .sw_fini = vce_v4_0_sw_fini,
1050 .hw_init = vce_v4_0_hw_init,
1051 .hw_fini = vce_v4_0_hw_fini,
1052 .suspend = vce_v4_0_suspend,
1053 .resume = vce_v4_0_resume,
1054 .is_idle = NULL /* vce_v4_0_is_idle */,
1055 .wait_for_idle = NULL /* vce_v4_0_wait_for_idle */,
1056 .check_soft_reset = NULL /* vce_v4_0_check_soft_reset */,
1057 .pre_soft_reset = NULL /* vce_v4_0_pre_soft_reset */,
1058 .soft_reset = NULL /* vce_v4_0_soft_reset */,
1059 .post_soft_reset = NULL /* vce_v4_0_post_soft_reset */,
1060 .set_clockgating_state = vce_v4_0_set_clockgating_state,
1061 .set_powergating_state = vce_v4_0_set_powergating_state,
1064 static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
1065 .type = AMDGPU_RING_TYPE_VCE,
1067 .nop = VCE_CMD_NO_OP,
1068 .support_64bit_ptrs = false,
1069 .no_user_fence = true,
1070 .vmhub = AMDGPU_MMHUB_0,
1071 .get_rptr = vce_v4_0_ring_get_rptr,
1072 .get_wptr = vce_v4_0_ring_get_wptr,
1073 .set_wptr = vce_v4_0_ring_set_wptr,
1074 .parse_cs = amdgpu_vce_ring_parse_cs_vm,
1076 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1077 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1078 4 + /* vce_v4_0_emit_vm_flush */
1079 5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */
1080 1, /* vce_v4_0_ring_insert_end */
1081 .emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */
1082 .emit_ib = vce_v4_0_ring_emit_ib,
1083 .emit_vm_flush = vce_v4_0_emit_vm_flush,
1084 .emit_fence = vce_v4_0_ring_emit_fence,
1085 .test_ring = amdgpu_vce_ring_test_ring,
1086 .test_ib = amdgpu_vce_ring_test_ib,
1087 .insert_nop = amdgpu_ring_insert_nop,
1088 .insert_end = vce_v4_0_ring_insert_end,
1089 .pad_ib = amdgpu_ring_generic_pad_ib,
1090 .begin_use = amdgpu_vce_ring_begin_use,
1091 .end_use = amdgpu_vce_ring_end_use,
1092 .emit_wreg = vce_v4_0_emit_wreg,
1093 .emit_reg_wait = vce_v4_0_emit_reg_wait,
1094 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1097 static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)
1101 for (i = 0; i < adev->vce.num_rings; i++) {
1102 adev->vce.ring[i].funcs = &vce_v4_0_ring_vm_funcs;
1103 adev->vce.ring[i].me = i;
1105 DRM_INFO("VCE enabled in VM mode\n");
1108 static const struct amdgpu_irq_src_funcs vce_v4_0_irq_funcs = {
1109 .set = vce_v4_0_set_interrupt_state,
1110 .process = vce_v4_0_process_interrupt,
1113 static void vce_v4_0_set_irq_funcs(struct amdgpu_device *adev)
1115 adev->vce.irq.num_types = 1;
1116 adev->vce.irq.funcs = &vce_v4_0_irq_funcs;
1119 const struct amdgpu_ip_block_version vce_v4_0_ip_block =
1121 .type = AMD_IP_BLOCK_TYPE_VCE,
1125 .funcs = &vce_v4_0_ip_funcs,