2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_vcn.h"
29 #include "soc15_common.h"
31 #include "vega10/soc15ip.h"
32 #include "raven1/VCN/vcn_1_0_offset.h"
33 #include "raven1/VCN/vcn_1_0_sh_mask.h"
34 #include "vega10/HDP/hdp_4_0_offset.h"
35 #include "raven1/MMHUB/mmhub_9_1_offset.h"
36 #include "raven1/MMHUB/mmhub_9_1_sh_mask.h"
38 static int vcn_v1_0_start(struct amdgpu_device *adev);
39 static int vcn_v1_0_stop(struct amdgpu_device *adev);
40 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
41 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
44 * vcn_v1_0_early_init - set function pointers
46 * @handle: amdgpu_device pointer
48 * Set ring and irq function pointers
50 static int vcn_v1_0_early_init(void *handle)
52 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
54 vcn_v1_0_set_dec_ring_funcs(adev);
55 vcn_v1_0_set_irq_funcs(adev);
61 * vcn_v1_0_sw_init - sw init for VCN block
63 * @handle: amdgpu_device pointer
65 * Load firmware and sw initialization
67 static int vcn_v1_0_sw_init(void *handle)
69 struct amdgpu_ring *ring;
71 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
74 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VCN, 124, &adev->vcn.irq);
78 r = amdgpu_vcn_sw_init(adev);
82 r = amdgpu_vcn_resume(adev);
86 ring = &adev->vcn.ring_dec;
87 sprintf(ring->name, "vcn_dec");
88 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.irq, 0);
94 * vcn_v1_0_sw_fini - sw fini for VCN block
96 * @handle: amdgpu_device pointer
98 * VCN suspend and free up sw allocation
100 static int vcn_v1_0_sw_fini(void *handle)
103 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
105 r = amdgpu_vcn_suspend(adev);
109 r = amdgpu_vcn_sw_fini(adev);
115 * vcn_v1_0_hw_init - start and test VCN block
117 * @handle: amdgpu_device pointer
119 * Initialize the hardware, boot up the VCPU and do some testing
121 static int vcn_v1_0_hw_init(void *handle)
123 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
124 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
127 r = vcn_v1_0_start(adev);
132 r = amdgpu_ring_test_ring(ring);
140 DRM_INFO("VCN decode initialized successfully.\n");
146 * vcn_v1_0_hw_fini - stop the hardware block
148 * @handle: amdgpu_device pointer
150 * Stop the VCN block, mark ring as not ready any more
152 static int vcn_v1_0_hw_fini(void *handle)
154 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
155 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
158 r = vcn_v1_0_stop(adev);
168 * vcn_v1_0_suspend - suspend VCN block
170 * @handle: amdgpu_device pointer
172 * HW fini and suspend VCN block
174 static int vcn_v1_0_suspend(void *handle)
177 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
179 r = vcn_v1_0_hw_fini(adev);
183 r = amdgpu_vcn_suspend(adev);
189 * vcn_v1_0_resume - resume VCN block
191 * @handle: amdgpu_device pointer
193 * Resume firmware and hw init VCN block
195 static int vcn_v1_0_resume(void *handle)
198 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
200 r = amdgpu_vcn_resume(adev);
204 r = vcn_v1_0_hw_init(adev);
210 * vcn_v1_0_mc_resume - memory controller programming
212 * @adev: amdgpu_device pointer
214 * Let the VCN memory controller know it's offsets
216 static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
221 /* programm memory controller bits 0-27 */
222 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
223 lower_32_bits(adev->vcn.gpu_addr));
224 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
225 upper_32_bits(adev->vcn.gpu_addr));
227 /* Current FW has no signed header, but will be added later on */
228 /* offset = AMDGPU_VCN_FIRMWARE_OFFSET; */
230 size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
231 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), offset >> 3);
232 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size);
235 size = AMDGPU_VCN_HEAP_SIZE;
236 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), offset >> 3);
237 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), size);
240 size = AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40);
241 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), offset >> 3);
242 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), size);
244 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG),
245 adev->gfx.config.gb_addr_config);
246 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG),
247 adev->gfx.config.gb_addr_config);
248 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG),
249 adev->gfx.config.gb_addr_config);
253 * vcn_v1_0_start - start VCN block
255 * @adev: amdgpu_device pointer
257 * Setup and start the VCN block
259 static int vcn_v1_0_start(struct amdgpu_device *adev)
261 struct amdgpu_ring *ring = &adev->vcn.ring_dec;
262 uint32_t rb_bufsz, tmp;
263 uint32_t lmi_swap_cntl;
266 /* disable byte swapping */
269 vcn_v1_0_mc_resume(adev);
271 /* disable clock gating */
272 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), 0,
273 ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK);
275 /* disable interupt */
276 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0,
277 ~UVD_MASTINT_EN__VCPU_EN_MASK);
279 /* stall UMC and register bus before resetting VCPU */
280 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
281 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
282 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
285 /* put LMI, VCPU, RBC etc... into reset */
286 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
287 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
288 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
289 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
290 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
291 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
292 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
293 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
294 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
297 /* initialize VCN memory controller */
298 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL),
299 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
300 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
301 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
302 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
303 UVD_LMI_CTRL__REQ_MODE_MASK |
307 /* swap (8 in 32) RB and IB */
310 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), lmi_swap_cntl);
312 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040);
313 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0);
314 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040);
315 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0);
316 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0);
317 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88);
319 /* take all subblocks out of reset, except VCPU */
320 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
321 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
324 /* enable VCPU clock */
325 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL),
326 UVD_VCPU_CNTL__CLK_EN_MASK);
329 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
330 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
332 /* boot up the VCPU */
333 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0);
336 for (i = 0; i < 10; ++i) {
339 for (j = 0; j < 100; ++j) {
340 status = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS));
349 DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
350 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
351 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
352 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
354 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0,
355 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
361 DRM_ERROR("VCN decode not responding, giving up!!!\n");
364 /* enable master interrupt */
365 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN),
366 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
367 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
369 /* clear the bit 4 of VCN_STATUS */
370 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0,
371 ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
373 /* force RBC into idle state */
374 rb_bufsz = order_base_2(ring->ring_size);
375 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
376 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
377 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
378 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
379 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
380 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
381 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp);
383 /* set the write pointer delay */
384 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0);
386 /* set the wb address */
387 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR),
388 (upper_32_bits(ring->gpu_addr) >> 2));
390 /* programm the RB_BASE for ring buffer */
391 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
392 lower_32_bits(ring->gpu_addr));
393 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
394 upper_32_bits(ring->gpu_addr));
396 /* Initialize the ring buffer's read and write pointers */
397 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR), 0);
399 ring->wptr = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR));
400 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR),
401 lower_32_bits(ring->wptr));
403 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
404 ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
410 * vcn_v1_0_stop - stop VCN block
412 * @adev: amdgpu_device pointer
416 static int vcn_v1_0_stop(struct amdgpu_device *adev)
418 /* force RBC into idle state */
419 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0x11010101);
421 /* Stall UMC and register bus before resetting VCPU */
422 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
423 UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
424 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
427 /* put VCPU into reset */
428 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
429 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
432 /* disable VCPU clock */
433 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0x0);
435 /* Unstall UMC and register bus */
436 WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0,
437 ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
442 static int vcn_v1_0_set_clockgating_state(void *handle,
443 enum amd_clockgating_state state)
445 /* needed for driver unload*/
450 * vcn_v1_0_dec_ring_get_rptr - get read pointer
452 * @ring: amdgpu_ring pointer
454 * Returns the current hardware read pointer
456 static uint64_t vcn_v1_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
458 struct amdgpu_device *adev = ring->adev;
460 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR));
464 * vcn_v1_0_dec_ring_get_wptr - get write pointer
466 * @ring: amdgpu_ring pointer
468 * Returns the current hardware write pointer
470 static uint64_t vcn_v1_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
472 struct amdgpu_device *adev = ring->adev;
474 return RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR));
478 * vcn_v1_0_dec_ring_set_wptr - set write pointer
480 * @ring: amdgpu_ring pointer
482 * Commits the write pointer to the hardware
484 static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
486 struct amdgpu_device *adev = ring->adev;
488 WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR), lower_32_bits(ring->wptr));
492 * vcn_v1_0_dec_ring_emit_fence - emit an fence & trap command
494 * @ring: amdgpu_ring pointer
495 * @fence: fence to emit
497 * Write a fence and a trap command to the ring.
499 static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
502 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
504 amdgpu_ring_write(ring,
505 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0));
506 amdgpu_ring_write(ring, seq);
507 amdgpu_ring_write(ring,
508 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
509 amdgpu_ring_write(ring, addr & 0xffffffff);
510 amdgpu_ring_write(ring,
511 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
512 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
513 amdgpu_ring_write(ring,
514 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
515 amdgpu_ring_write(ring, 0);
517 amdgpu_ring_write(ring,
518 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
519 amdgpu_ring_write(ring, 0);
520 amdgpu_ring_write(ring,
521 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
522 amdgpu_ring_write(ring, 0);
523 amdgpu_ring_write(ring,
524 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
525 amdgpu_ring_write(ring, 2);
529 * vcn_v1_0_dec_ring_hdp_invalidate - emit an hdp invalidate
531 * @ring: amdgpu_ring pointer
533 * Emits an hdp invalidate.
535 static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
537 amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0));
538 amdgpu_ring_write(ring, 1);
542 * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
544 * @ring: amdgpu_ring pointer
545 * @ib: indirect buffer to execute
547 * Write ring commands to execute the indirect buffer
549 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
550 struct amdgpu_ib *ib,
551 unsigned vm_id, bool ctx_switch)
553 amdgpu_ring_write(ring,
554 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
555 amdgpu_ring_write(ring, vm_id);
557 amdgpu_ring_write(ring,
558 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0));
559 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
560 amdgpu_ring_write(ring,
561 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0));
562 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
563 amdgpu_ring_write(ring,
564 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0));
565 amdgpu_ring_write(ring, ib->length_dw);
568 static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring *ring,
569 uint32_t data0, uint32_t data1)
571 amdgpu_ring_write(ring,
572 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
573 amdgpu_ring_write(ring, data0);
574 amdgpu_ring_write(ring,
575 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
576 amdgpu_ring_write(ring, data1);
577 amdgpu_ring_write(ring,
578 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
579 amdgpu_ring_write(ring, 8);
582 static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
583 uint32_t data0, uint32_t data1, uint32_t mask)
585 amdgpu_ring_write(ring,
586 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
587 amdgpu_ring_write(ring, data0);
588 amdgpu_ring_write(ring,
589 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
590 amdgpu_ring_write(ring, data1);
591 amdgpu_ring_write(ring,
592 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
593 amdgpu_ring_write(ring, mask);
594 amdgpu_ring_write(ring,
595 PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
596 amdgpu_ring_write(ring, 12);
599 static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
600 unsigned vm_id, uint64_t pd_addr)
602 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
603 uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
604 uint32_t data0, data1, mask;
605 unsigned eng = ring->vm_inv_eng;
607 pd_addr = pd_addr | 0x1; /* valid bit */
608 /* now only use physical base address of PDE and valid */
609 BUG_ON(pd_addr & 0xFFFF00000000003EULL);
611 data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
612 data1 = upper_32_bits(pd_addr);
613 vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
615 data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
616 data1 = lower_32_bits(pd_addr);
617 vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
619 data0 = (hub->ctx0_ptb_addr_lo32 + vm_id * 2) << 2;
620 data1 = lower_32_bits(pd_addr);
622 vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
625 data0 = (hub->vm_inv_eng0_req + eng) << 2;
627 vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
630 data0 = (hub->vm_inv_eng0_ack + eng) << 2;
633 vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask);
636 static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
637 struct amdgpu_irq_src *source,
639 enum amdgpu_interrupt_state state)
644 static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev,
645 struct amdgpu_irq_src *source,
646 struct amdgpu_iv_entry *entry)
648 DRM_DEBUG("IH: VCN TRAP\n");
650 amdgpu_fence_process(&adev->vcn.ring_dec);
655 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
657 .early_init = vcn_v1_0_early_init,
659 .sw_init = vcn_v1_0_sw_init,
660 .sw_fini = vcn_v1_0_sw_fini,
661 .hw_init = vcn_v1_0_hw_init,
662 .hw_fini = vcn_v1_0_hw_fini,
663 .suspend = vcn_v1_0_suspend,
664 .resume = vcn_v1_0_resume,
665 .is_idle = NULL /* vcn_v1_0_is_idle */,
666 .wait_for_idle = NULL /* vcn_v1_0_wait_for_idle */,
667 .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */,
668 .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */,
669 .soft_reset = NULL /* vcn_v1_0_soft_reset */,
670 .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */,
671 .set_clockgating_state = vcn_v1_0_set_clockgating_state,
672 .set_powergating_state = NULL /* vcn_v1_0_set_powergating_state */,
675 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
676 .type = AMDGPU_RING_TYPE_VCN_DEC,
678 .nop = PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0),
679 .support_64bit_ptrs = false,
680 .get_rptr = vcn_v1_0_dec_ring_get_rptr,
681 .get_wptr = vcn_v1_0_dec_ring_get_wptr,
682 .set_wptr = vcn_v1_0_dec_ring_set_wptr,
684 2 + /* vcn_v1_0_dec_ring_emit_hdp_invalidate */
685 34 * AMDGPU_MAX_VMHUBS + /* vcn_v1_0_dec_ring_emit_vm_flush */
686 14 + 14, /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
687 .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
688 .emit_ib = vcn_v1_0_dec_ring_emit_ib,
689 .emit_fence = vcn_v1_0_dec_ring_emit_fence,
690 .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
691 .emit_hdp_invalidate = vcn_v1_0_dec_ring_emit_hdp_invalidate,
692 .test_ring = amdgpu_vcn_dec_ring_test_ring,
693 .test_ib = amdgpu_vcn_dec_ring_test_ib,
694 .insert_nop = amdgpu_ring_insert_nop,
695 .pad_ib = amdgpu_ring_generic_pad_ib,
696 .begin_use = amdgpu_vcn_ring_begin_use,
697 .end_use = amdgpu_vcn_ring_end_use,
700 static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)
702 adev->vcn.ring_dec.funcs = &vcn_v1_0_dec_ring_vm_funcs;
703 DRM_INFO("VCN decode is enabled in VM mode\n");
706 static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
707 .set = vcn_v1_0_set_interrupt_state,
708 .process = vcn_v1_0_process_interrupt,
711 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
713 adev->vcn.irq.num_types = 1;
714 adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
717 const struct amdgpu_ip_block_version vcn_v1_0_ip_block =
719 .type = AMD_IP_BLOCK_TYPE_VCN,
723 .funcs = &vcn_v1_0_ip_funcs,