2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
26 #include "amdgpu_trace.h"
30 const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
36 static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
37 static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
38 static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
39 static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
41 static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
43 return *ring->rptr_cpu_addr;
46 static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
48 struct amdgpu_device *adev = ring->adev;
49 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
51 return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
54 static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
56 struct amdgpu_device *adev = ring->adev;
57 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
59 WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc);
62 static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
63 struct amdgpu_job *job,
67 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
68 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
69 * Pad as necessary with NOPs.
71 while ((lower_32_bits(ring->wptr) & 7) != 5)
72 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
73 amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0));
74 amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
75 amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
80 * si_dma_ring_emit_fence - emit a fence on the DMA ring
82 * @ring: amdgpu ring pointer
84 * @seq: sequence number
85 * @flags: fence related flags
87 * Add a DMA fence packet to the ring to write
88 * the fence seq number and DMA trap packet to generate
89 * an interrupt if needed (VI).
91 static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
95 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
97 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
98 amdgpu_ring_write(ring, addr & 0xfffffffc);
99 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
100 amdgpu_ring_write(ring, seq);
101 /* optionally write high bits as well */
104 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
105 amdgpu_ring_write(ring, addr & 0xfffffffc);
106 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
107 amdgpu_ring_write(ring, upper_32_bits(seq));
109 /* generate an interrupt */
110 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
113 static void si_dma_stop(struct amdgpu_device *adev)
118 amdgpu_sdma_unset_buffer_funcs_helper(adev);
120 for (i = 0; i < adev->sdma.num_instances; i++) {
122 rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
123 rb_cntl &= ~DMA_RB_ENABLE;
124 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
128 static int si_dma_start(struct amdgpu_device *adev)
130 struct amdgpu_ring *ring;
131 u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz;
135 for (i = 0; i < adev->sdma.num_instances; i++) {
136 ring = &adev->sdma.instance[i].ring;
138 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
139 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
141 /* Set ring buffer size in dwords */
142 rb_bufsz = order_base_2(ring->ring_size / 4);
143 rb_cntl = rb_bufsz << 1;
145 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
147 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
149 /* Initialize the ring buffer's read and write pointers */
150 WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
151 WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
153 rptr_addr = ring->rptr_gpu_addr;
155 WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
156 WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
158 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
160 WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
163 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
165 ib_cntl |= DMA_IB_SWAP_ENABLE;
167 WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
169 dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
170 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
171 WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
174 WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
175 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
177 r = amdgpu_ring_test_helper(ring);
181 if (adev->mman.buffer_funcs_ring == ring)
182 amdgpu_ttm_set_buffer_funcs_status(adev, true);
189 * si_dma_ring_test_ring - simple async dma engine test
191 * @ring: amdgpu_ring structure holding ring information
193 * Test the DMA engine by writing using it to write an
194 * value to memory. (VI).
195 * Returns 0 for success, error for failure.
197 static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
199 struct amdgpu_device *adev = ring->adev;
206 r = amdgpu_device_wb_get(adev, &index);
210 gpu_addr = adev->wb.gpu_addr + (index * 4);
212 adev->wb.wb[index] = cpu_to_le32(tmp);
214 r = amdgpu_ring_alloc(ring, 4);
218 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
219 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
220 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
221 amdgpu_ring_write(ring, 0xDEADBEEF);
222 amdgpu_ring_commit(ring);
224 for (i = 0; i < adev->usec_timeout; i++) {
225 tmp = le32_to_cpu(adev->wb.wb[index]);
226 if (tmp == 0xDEADBEEF)
231 if (i >= adev->usec_timeout)
235 amdgpu_device_wb_free(adev, index);
240 * si_dma_ring_test_ib - test an IB on the DMA engine
242 * @ring: amdgpu_ring structure holding ring information
243 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
245 * Test a simple IB in the DMA ring (VI).
246 * Returns 0 on success, error on failure.
248 static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
250 struct amdgpu_device *adev = ring->adev;
252 struct dma_fence *f = NULL;
258 r = amdgpu_device_wb_get(adev, &index);
262 gpu_addr = adev->wb.gpu_addr + (index * 4);
264 adev->wb.wb[index] = cpu_to_le32(tmp);
265 memset(&ib, 0, sizeof(ib));
266 r = amdgpu_ib_get(adev, NULL, 256,
267 AMDGPU_IB_POOL_DIRECT, &ib);
271 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
272 ib.ptr[1] = lower_32_bits(gpu_addr);
273 ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
274 ib.ptr[3] = 0xDEADBEEF;
276 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
280 r = dma_fence_wait_timeout(f, false, timeout);
287 tmp = le32_to_cpu(adev->wb.wb[index]);
288 if (tmp == 0xDEADBEEF)
294 amdgpu_ib_free(adev, &ib, NULL);
297 amdgpu_device_wb_free(adev, index);
302 * si_dma_vm_copy_pte - update PTEs by copying them from the GART
304 * @ib: indirect buffer to fill with commands
305 * @pe: addr of the page entry
306 * @src: src addr to copy from
307 * @count: number of page entries to update
309 * Update PTEs by copying them from the GART using DMA (SI).
311 static void si_dma_vm_copy_pte(struct amdgpu_ib *ib,
312 uint64_t pe, uint64_t src,
315 unsigned bytes = count * 8;
317 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
319 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
320 ib->ptr[ib->length_dw++] = lower_32_bits(src);
321 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
322 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
326 * si_dma_vm_write_pte - update PTEs by writing them manually
328 * @ib: indirect buffer to fill with commands
329 * @pe: addr of the page entry
330 * @value: dst addr to write into pe
331 * @count: number of page entries to update
332 * @incr: increase next addr by incr bytes
334 * Update PTEs by writing them manually using DMA (SI).
336 static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
337 uint64_t value, unsigned count,
340 unsigned ndw = count * 2;
342 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
343 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
344 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
345 for (; ndw > 0; ndw -= 2) {
346 ib->ptr[ib->length_dw++] = lower_32_bits(value);
347 ib->ptr[ib->length_dw++] = upper_32_bits(value);
353 * si_dma_vm_set_pte_pde - update the page tables using sDMA
355 * @ib: indirect buffer to fill with commands
356 * @pe: addr of the page entry
357 * @addr: dst addr to write into pe
358 * @count: number of page entries to update
359 * @incr: increase next addr by incr bytes
360 * @flags: access flags
362 * Update the page tables using sDMA (CIK).
364 static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
366 uint64_t addr, unsigned count,
367 uint32_t incr, uint64_t flags)
377 if (flags & AMDGPU_PTE_VALID)
382 /* for physically contiguous pages (vram) */
383 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
384 ib->ptr[ib->length_dw++] = pe; /* dst addr */
385 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
386 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
387 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
388 ib->ptr[ib->length_dw++] = value; /* value */
389 ib->ptr[ib->length_dw++] = upper_32_bits(value);
390 ib->ptr[ib->length_dw++] = incr; /* increment size */
391 ib->ptr[ib->length_dw++] = 0;
393 addr += (ndw / 2) * incr;
399 * si_dma_ring_pad_ib - pad the IB to the required number of dw
401 * @ring: amdgpu_ring pointer
402 * @ib: indirect buffer to fill with padding
405 static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
407 while (ib->length_dw & 0x7)
408 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
412 * si_dma_ring_emit_pipeline_sync - sync the pipeline
414 * @ring: amdgpu_ring pointer
416 * Make sure all previous operations are completed (CIK).
418 static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
420 uint32_t seq = ring->fence_drv.sync_seq;
421 uint64_t addr = ring->fence_drv.gpu_addr;
424 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
425 (1 << 27)); /* Poll memory */
426 amdgpu_ring_write(ring, lower_32_bits(addr));
427 amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
428 amdgpu_ring_write(ring, 0xffffffff); /* mask */
429 amdgpu_ring_write(ring, seq); /* value */
430 amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
434 * si_dma_ring_emit_vm_flush - cik vm flush using sDMA
436 * @ring: amdgpu_ring pointer
437 * @vmid: vmid number to use
440 * Update the page table base and flush the VM TLB
443 static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
444 unsigned vmid, uint64_t pd_addr)
446 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
448 /* wait for invalidate to complete */
449 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
450 amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
451 amdgpu_ring_write(ring, 0xff << 16); /* retry */
452 amdgpu_ring_write(ring, 1 << vmid); /* mask */
453 amdgpu_ring_write(ring, 0); /* value */
454 amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
457 static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
458 uint32_t reg, uint32_t val)
460 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
461 amdgpu_ring_write(ring, (0xf << 16) | reg);
462 amdgpu_ring_write(ring, val);
465 static int si_dma_early_init(void *handle)
467 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
469 adev->sdma.num_instances = 2;
471 si_dma_set_ring_funcs(adev);
472 si_dma_set_buffer_funcs(adev);
473 si_dma_set_vm_pte_funcs(adev);
474 si_dma_set_irq_funcs(adev);
479 static int si_dma_sw_init(void *handle)
481 struct amdgpu_ring *ring;
483 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
485 /* DMA0 trap event */
486 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
487 &adev->sdma.trap_irq);
491 /* DMA1 trap event */
492 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244,
493 &adev->sdma.trap_irq);
497 for (i = 0; i < adev->sdma.num_instances; i++) {
498 ring = &adev->sdma.instance[i].ring;
499 ring->ring_obj = NULL;
500 ring->use_doorbell = false;
501 sprintf(ring->name, "sdma%d", i);
502 r = amdgpu_ring_init(adev, ring, 1024,
503 &adev->sdma.trap_irq,
504 (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
505 AMDGPU_SDMA_IRQ_INSTANCE1,
506 AMDGPU_RING_PRIO_DEFAULT, NULL);
514 static int si_dma_sw_fini(void *handle)
516 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
519 for (i = 0; i < adev->sdma.num_instances; i++)
520 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
525 static int si_dma_hw_init(void *handle)
527 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
529 return si_dma_start(adev);
532 static int si_dma_hw_fini(void *handle)
534 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
541 static int si_dma_suspend(void *handle)
543 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
545 return si_dma_hw_fini(adev);
548 static int si_dma_resume(void *handle)
550 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
552 return si_dma_hw_init(adev);
555 static bool si_dma_is_idle(void *handle)
557 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
558 u32 tmp = RREG32(SRBM_STATUS2);
560 if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
566 static int si_dma_wait_for_idle(void *handle)
569 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
571 for (i = 0; i < adev->usec_timeout; i++) {
572 if (si_dma_is_idle(handle))
579 static int si_dma_soft_reset(void *handle)
581 DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
585 static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
586 struct amdgpu_irq_src *src,
588 enum amdgpu_interrupt_state state)
593 case AMDGPU_SDMA_IRQ_INSTANCE0:
595 case AMDGPU_IRQ_STATE_DISABLE:
596 sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
597 sdma_cntl &= ~TRAP_ENABLE;
598 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
600 case AMDGPU_IRQ_STATE_ENABLE:
601 sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
602 sdma_cntl |= TRAP_ENABLE;
603 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
609 case AMDGPU_SDMA_IRQ_INSTANCE1:
611 case AMDGPU_IRQ_STATE_DISABLE:
612 sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
613 sdma_cntl &= ~TRAP_ENABLE;
614 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
616 case AMDGPU_IRQ_STATE_ENABLE:
617 sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
618 sdma_cntl |= TRAP_ENABLE;
619 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
631 static int si_dma_process_trap_irq(struct amdgpu_device *adev,
632 struct amdgpu_irq_src *source,
633 struct amdgpu_iv_entry *entry)
635 if (entry->src_id == 224)
636 amdgpu_fence_process(&adev->sdma.instance[0].ring);
638 amdgpu_fence_process(&adev->sdma.instance[1].ring);
642 static int si_dma_set_clockgating_state(void *handle,
643 enum amd_clockgating_state state)
645 u32 orig, data, offset;
648 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
650 enable = (state == AMD_CG_STATE_GATE);
652 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
653 for (i = 0; i < adev->sdma.num_instances; i++) {
655 offset = DMA0_REGISTER_OFFSET;
657 offset = DMA1_REGISTER_OFFSET;
658 orig = data = RREG32(DMA_POWER_CNTL + offset);
659 data &= ~MEM_POWER_OVERRIDE;
661 WREG32(DMA_POWER_CNTL + offset, data);
662 WREG32(DMA_CLK_CTRL + offset, 0x00000100);
665 for (i = 0; i < adev->sdma.num_instances; i++) {
667 offset = DMA0_REGISTER_OFFSET;
669 offset = DMA1_REGISTER_OFFSET;
670 orig = data = RREG32(DMA_POWER_CNTL + offset);
671 data |= MEM_POWER_OVERRIDE;
673 WREG32(DMA_POWER_CNTL + offset, data);
675 orig = data = RREG32(DMA_CLK_CTRL + offset);
678 WREG32(DMA_CLK_CTRL + offset, data);
685 static int si_dma_set_powergating_state(void *handle,
686 enum amd_powergating_state state)
690 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
692 WREG32(DMA_PGFSM_WRITE, 0x00002000);
693 WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
695 for (tmp = 0; tmp < 5; tmp++)
696 WREG32(DMA_PGFSM_WRITE, 0);
701 static const struct amd_ip_funcs si_dma_ip_funcs = {
703 .early_init = si_dma_early_init,
705 .sw_init = si_dma_sw_init,
706 .sw_fini = si_dma_sw_fini,
707 .hw_init = si_dma_hw_init,
708 .hw_fini = si_dma_hw_fini,
709 .suspend = si_dma_suspend,
710 .resume = si_dma_resume,
711 .is_idle = si_dma_is_idle,
712 .wait_for_idle = si_dma_wait_for_idle,
713 .soft_reset = si_dma_soft_reset,
714 .set_clockgating_state = si_dma_set_clockgating_state,
715 .set_powergating_state = si_dma_set_powergating_state,
718 static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
719 .type = AMDGPU_RING_TYPE_SDMA,
721 .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
722 .support_64bit_ptrs = false,
723 .get_rptr = si_dma_ring_get_rptr,
724 .get_wptr = si_dma_ring_get_wptr,
725 .set_wptr = si_dma_ring_set_wptr,
727 3 + 3 + /* hdp flush / invalidate */
728 6 + /* si_dma_ring_emit_pipeline_sync */
729 SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */
730 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
731 .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
732 .emit_ib = si_dma_ring_emit_ib,
733 .emit_fence = si_dma_ring_emit_fence,
734 .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
735 .emit_vm_flush = si_dma_ring_emit_vm_flush,
736 .test_ring = si_dma_ring_test_ring,
737 .test_ib = si_dma_ring_test_ib,
738 .insert_nop = amdgpu_ring_insert_nop,
739 .pad_ib = si_dma_ring_pad_ib,
740 .emit_wreg = si_dma_ring_emit_wreg,
743 static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
747 for (i = 0; i < adev->sdma.num_instances; i++)
748 adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
751 static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
752 .set = si_dma_set_trap_irq_state,
753 .process = si_dma_process_trap_irq,
756 static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
758 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
759 adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
763 * si_dma_emit_copy_buffer - copy buffer using the sDMA engine
765 * @ib: indirect buffer to copy to
766 * @src_offset: src GPU address
767 * @dst_offset: dst GPU address
768 * @byte_count: number of bytes to xfer
769 * @tmz: is this a secure operation
771 * Copy GPU buffers using the DMA engine (VI).
772 * Used by the amdgpu ttm implementation to move pages if
773 * registered as the asic copy callback.
775 static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
781 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
782 1, 0, 0, byte_count);
783 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
784 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
785 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff;
786 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff;
790 * si_dma_emit_fill_buffer - fill buffer using the sDMA engine
792 * @ib: indirect buffer to copy to
793 * @src_data: value to write to buffer
794 * @dst_offset: dst GPU address
795 * @byte_count: number of bytes to xfer
797 * Fill GPU buffers using the DMA engine (VI).
799 static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib,
804 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL,
805 0, 0, 0, byte_count / 4);
806 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
807 ib->ptr[ib->length_dw++] = src_data;
808 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16;
812 static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
813 .copy_max_bytes = 0xffff8,
815 .emit_copy_buffer = si_dma_emit_copy_buffer,
817 .fill_max_bytes = 0xffff8,
819 .emit_fill_buffer = si_dma_emit_fill_buffer,
822 static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
824 adev->mman.buffer_funcs = &si_dma_buffer_funcs;
825 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
828 static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
829 .copy_pte_num_dw = 5,
830 .copy_pte = si_dma_vm_copy_pte,
832 .write_pte = si_dma_vm_write_pte,
833 .set_pte_pde = si_dma_vm_set_pte_pde,
836 static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
840 adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
841 for (i = 0; i < adev->sdma.num_instances; i++) {
842 adev->vm_manager.vm_pte_scheds[i] =
843 &adev->sdma.instance[i].ring.sched;
845 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
848 const struct amdgpu_ip_block_version si_dma_ip_block =
850 .type = AMD_IP_BLOCK_TYPE_SDMA,
854 .funcs = &si_dma_ip_funcs,