2 * Copyright 2022 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
30 #include "amdgpu_ucode.h"
31 #include "amdgpu_trace.h"
33 #include "sdma/sdma_4_4_2_offset.h"
34 #include "sdma/sdma_4_4_2_sh_mask.h"
36 #include "soc15_common.h"
38 #include "vega10_sdma_pkt_open.h"
40 #include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
41 #include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
43 #include "amdgpu_ras.h"
45 MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
47 #define WREG32_SDMA(instance, offset, value) \
48 WREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)), value)
49 #define RREG32_SDMA(instance, offset) \
50 RREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)))
52 static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev);
53 static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
54 static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
55 static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
57 static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
58 u32 instance, u32 offset)
60 return (adev->reg_offset[SDMA0_HWIP][instance][0] + offset);
63 static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num)
67 return SOC15_IH_CLIENTID_SDMA0;
69 return SOC15_IH_CLIENTID_SDMA1;
71 return SOC15_IH_CLIENTID_SDMA2;
73 return SOC15_IH_CLIENTID_SDMA3;
79 static int sdma_v4_4_2_irq_id_to_seq(unsigned client_id)
82 case SOC15_IH_CLIENTID_SDMA0:
84 case SOC15_IH_CLIENTID_SDMA1:
86 case SOC15_IH_CLIENTID_SDMA2:
88 case SOC15_IH_CLIENTID_SDMA3:
95 static void sdma_v4_4_2_init_golden_registers(struct amdgpu_device *adev)
97 switch (adev->ip_versions[SDMA0_HWIP][0]) {
98 case IP_VERSION(4, 4, 2):
106 * sdma_v4_4_2_init_microcode - load ucode images from disk
108 * @adev: amdgpu_device pointer
110 * Use the firmware interface to load the ucode images into
111 * the driver (not loaded into hw).
112 * Returns 0 on success, error on failure.
114 static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev)
118 for (i = 0; i < adev->sdma.num_instances; i++) {
119 if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2)) {
120 ret = amdgpu_sdma_init_microcode(adev, 0, true);
123 ret = amdgpu_sdma_init_microcode(adev, i, false);
133 * sdma_v4_4_2_ring_get_rptr - get the current read pointer
135 * @ring: amdgpu ring pointer
137 * Get the current rptr from the hardware.
139 static uint64_t sdma_v4_4_2_ring_get_rptr(struct amdgpu_ring *ring)
143 /* XXX check if swapping is necessary on BE */
144 rptr = ((u64 *)&ring->adev->wb.wb[ring->rptr_offs]);
146 DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
147 return ((*rptr) >> 2);
151 * sdma_v4_4_2_ring_get_wptr - get the current write pointer
153 * @ring: amdgpu ring pointer
155 * Get the current wptr from the hardware.
157 static uint64_t sdma_v4_4_2_ring_get_wptr(struct amdgpu_ring *ring)
159 struct amdgpu_device *adev = ring->adev;
162 if (ring->use_doorbell) {
163 /* XXX check if swapping is necessary on BE */
164 wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
165 DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
167 wptr = RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI);
169 wptr |= RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR);
170 DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
178 * sdma_v4_4_2_ring_set_wptr - commit the write pointer
180 * @ring: amdgpu ring pointer
182 * Write the wptr back to the hardware.
184 static void sdma_v4_4_2_ring_set_wptr(struct amdgpu_ring *ring)
186 struct amdgpu_device *adev = ring->adev;
188 DRM_DEBUG("Setting write pointer\n");
189 if (ring->use_doorbell) {
190 u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
192 DRM_DEBUG("Using doorbell -- "
193 "wptr_offs == 0x%08x "
194 "lower_32_bits(ring->wptr) << 2 == 0x%08x "
195 "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
197 lower_32_bits(ring->wptr << 2),
198 upper_32_bits(ring->wptr << 2));
199 /* XXX check if swapping is necessary on BE */
200 WRITE_ONCE(*wb, (ring->wptr << 2));
201 DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
202 ring->doorbell_index, ring->wptr << 2);
203 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
205 DRM_DEBUG("Not using doorbell -- "
206 "regSDMA%i_GFX_RB_WPTR == 0x%08x "
207 "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
209 lower_32_bits(ring->wptr << 2),
211 upper_32_bits(ring->wptr << 2));
212 WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR,
213 lower_32_bits(ring->wptr << 2));
214 WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI,
215 upper_32_bits(ring->wptr << 2));
220 * sdma_v4_4_2_page_ring_get_wptr - get the current write pointer
222 * @ring: amdgpu ring pointer
224 * Get the current wptr from the hardware.
226 static uint64_t sdma_v4_4_2_page_ring_get_wptr(struct amdgpu_ring *ring)
228 struct amdgpu_device *adev = ring->adev;
231 if (ring->use_doorbell) {
232 /* XXX check if swapping is necessary on BE */
233 wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
235 wptr = RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI);
237 wptr |= RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR);
244 * sdma_v4_4_2_page_ring_set_wptr - commit the write pointer
246 * @ring: amdgpu ring pointer
248 * Write the wptr back to the hardware.
250 static void sdma_v4_4_2_page_ring_set_wptr(struct amdgpu_ring *ring)
252 struct amdgpu_device *adev = ring->adev;
254 if (ring->use_doorbell) {
255 u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
257 /* XXX check if swapping is necessary on BE */
258 WRITE_ONCE(*wb, (ring->wptr << 2));
259 WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
261 uint64_t wptr = ring->wptr << 2;
263 WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR,
264 lower_32_bits(wptr));
265 WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI,
266 upper_32_bits(wptr));
270 static void sdma_v4_4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
272 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
275 for (i = 0; i < count; i++)
276 if (sdma && sdma->burst_nop && (i == 0))
277 amdgpu_ring_write(ring, ring->funcs->nop |
278 SDMA_PKT_NOP_HEADER_COUNT(count - 1));
280 amdgpu_ring_write(ring, ring->funcs->nop);
284 * sdma_v4_4_2_ring_emit_ib - Schedule an IB on the DMA engine
286 * @ring: amdgpu ring pointer
287 * @job: job to retrieve vmid from
288 * @ib: IB object to schedule
291 * Schedule an IB in the DMA ring.
293 static void sdma_v4_4_2_ring_emit_ib(struct amdgpu_ring *ring,
294 struct amdgpu_job *job,
295 struct amdgpu_ib *ib,
298 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
300 /* IB packet must end on a 8 DW boundary */
301 sdma_v4_4_2_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
303 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
304 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
305 /* base must be 32 byte aligned */
306 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
307 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
308 amdgpu_ring_write(ring, ib->length_dw);
309 amdgpu_ring_write(ring, 0);
310 amdgpu_ring_write(ring, 0);
314 static void sdma_v4_4_2_wait_reg_mem(struct amdgpu_ring *ring,
315 int mem_space, int hdp,
316 uint32_t addr0, uint32_t addr1,
317 uint32_t ref, uint32_t mask,
320 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
321 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
322 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
323 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
326 amdgpu_ring_write(ring, addr0);
327 amdgpu_ring_write(ring, addr1);
330 amdgpu_ring_write(ring, addr0 << 2);
331 amdgpu_ring_write(ring, addr1 << 2);
333 amdgpu_ring_write(ring, ref); /* reference */
334 amdgpu_ring_write(ring, mask); /* mask */
335 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
336 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
340 * sdma_v4_4_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
342 * @ring: amdgpu ring pointer
344 * Emit an hdp flush packet on the requested DMA ring.
346 static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
348 struct amdgpu_device *adev = ring->adev;
349 u32 ref_and_mask = 0;
350 const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
352 ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
354 sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
355 adev->nbio.funcs->get_hdp_flush_done_offset(adev),
356 adev->nbio.funcs->get_hdp_flush_req_offset(adev),
357 ref_and_mask, ref_and_mask, 10);
361 * sdma_v4_4_2_ring_emit_fence - emit a fence on the DMA ring
363 * @ring: amdgpu ring pointer
365 * @seq: sequence number
366 * @flags: fence related flags
368 * Add a DMA fence packet to the ring to write
369 * the fence seq number and DMA trap packet to generate
370 * an interrupt if needed.
372 static void sdma_v4_4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
375 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
376 /* write the fence */
377 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
378 /* zero in first two bits */
380 amdgpu_ring_write(ring, lower_32_bits(addr));
381 amdgpu_ring_write(ring, upper_32_bits(addr));
382 amdgpu_ring_write(ring, lower_32_bits(seq));
384 /* optionally write high bits as well */
387 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
388 /* zero in first two bits */
390 amdgpu_ring_write(ring, lower_32_bits(addr));
391 amdgpu_ring_write(ring, upper_32_bits(addr));
392 amdgpu_ring_write(ring, upper_32_bits(seq));
395 /* generate an interrupt */
396 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
397 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
402 * sdma_v4_4_2_gfx_stop - stop the gfx async dma engines
404 * @adev: amdgpu_device pointer
406 * Stop the gfx async dma ring buffers.
408 static void sdma_v4_4_2_gfx_stop(struct amdgpu_device *adev)
410 struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
411 u32 rb_cntl, ib_cntl;
414 for (i = 0; i < adev->sdma.num_instances; i++) {
415 sdma[i] = &adev->sdma.instance[i].ring;
417 if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
418 amdgpu_ttm_set_buffer_funcs_status(adev, false);
422 rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
423 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
424 WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
425 ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
426 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
427 WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
432 * sdma_v4_4_2_rlc_stop - stop the compute async dma engines
434 * @adev: amdgpu_device pointer
436 * Stop the compute async dma queues.
438 static void sdma_v4_4_2_rlc_stop(struct amdgpu_device *adev)
444 * sdma_v4_4_2_page_stop - stop the page async dma engines
446 * @adev: amdgpu_device pointer
448 * Stop the page async dma ring buffers.
450 static void sdma_v4_4_2_page_stop(struct amdgpu_device *adev)
452 struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
453 u32 rb_cntl, ib_cntl;
457 for (i = 0; i < adev->sdma.num_instances; i++) {
458 sdma[i] = &adev->sdma.instance[i].page;
460 if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
462 amdgpu_ttm_set_buffer_funcs_status(adev, false);
466 rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
467 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
469 WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
470 ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL);
471 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL,
473 WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
478 * sdma_v4_4_2_ctx_switch_enable - stop the async dma engines context switch
480 * @adev: amdgpu_device pointer
481 * @enable: enable/disable the DMA MEs context switch.
483 * Halt or unhalt the async dma engines context switch.
485 static void sdma_v4_4_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
487 u32 f32_cntl, phase_quantum = 0;
490 if (amdgpu_sdma_phase_quantum) {
491 unsigned value = amdgpu_sdma_phase_quantum;
494 while (value > (SDMA_PHASE0_QUANTUM__VALUE_MASK >>
495 SDMA_PHASE0_QUANTUM__VALUE__SHIFT)) {
496 value = (value + 1) >> 1;
499 if (unit > (SDMA_PHASE0_QUANTUM__UNIT_MASK >>
500 SDMA_PHASE0_QUANTUM__UNIT__SHIFT)) {
501 value = (SDMA_PHASE0_QUANTUM__VALUE_MASK >>
502 SDMA_PHASE0_QUANTUM__VALUE__SHIFT);
503 unit = (SDMA_PHASE0_QUANTUM__UNIT_MASK >>
504 SDMA_PHASE0_QUANTUM__UNIT__SHIFT);
506 "clamping sdma_phase_quantum to %uK clock cycles\n",
510 value << SDMA_PHASE0_QUANTUM__VALUE__SHIFT |
511 unit << SDMA_PHASE0_QUANTUM__UNIT__SHIFT;
514 for (i = 0; i < adev->sdma.num_instances; i++) {
515 f32_cntl = RREG32_SDMA(i, regSDMA_CNTL);
516 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_CNTL,
517 AUTO_CTXSW_ENABLE, enable ? 1 : 0);
518 if (enable && amdgpu_sdma_phase_quantum) {
519 WREG32_SDMA(i, regSDMA_PHASE0_QUANTUM, phase_quantum);
520 WREG32_SDMA(i, regSDMA_PHASE1_QUANTUM, phase_quantum);
521 WREG32_SDMA(i, regSDMA_PHASE2_QUANTUM, phase_quantum);
523 WREG32_SDMA(i, regSDMA_CNTL, f32_cntl);
525 /* Extend page fault timeout to avoid interrupt storm */
526 WREG32_SDMA(i, regSDMA_UTCL1_TIMEOUT, 0x00800080);
532 * sdma_v4_4_2_enable - stop the async dma engines
534 * @adev: amdgpu_device pointer
535 * @enable: enable/disable the DMA MEs.
537 * Halt or unhalt the async dma engines.
539 static void sdma_v4_4_2_enable(struct amdgpu_device *adev, bool enable)
545 sdma_v4_4_2_gfx_stop(adev);
546 sdma_v4_4_2_rlc_stop(adev);
547 if (adev->sdma.has_page_queue)
548 sdma_v4_4_2_page_stop(adev);
551 for (i = 0; i < adev->sdma.num_instances; i++) {
552 f32_cntl = RREG32_SDMA(i, regSDMA_F32_CNTL);
553 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_F32_CNTL, HALT, enable ? 0 : 1);
554 WREG32_SDMA(i, regSDMA_F32_CNTL, f32_cntl);
559 * sdma_v4_4_2_rb_cntl - get parameters for rb_cntl
561 static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
563 /* Set ring buffer size in dwords */
564 uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
566 barrier(); /* work around https://bugs.llvm.org/show_bug.cgi?id=42576 */
567 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
569 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
570 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL,
571 RPTR_WRITEBACK_SWAP_ENABLE, 1);
577 * sdma_v4_4_2_gfx_resume - setup and start the async dma engines
579 * @adev: amdgpu_device pointer
580 * @i: instance to resume
582 * Set up the gfx DMA ring buffers and enable them.
583 * Returns 0 for success, error for failure.
585 static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
587 struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
588 u32 rb_cntl, ib_cntl, wptr_poll_cntl;
594 wb_offset = (ring->rptr_offs * 4);
596 rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
597 rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
598 WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
600 /* Initialize the ring buffer's read and write pointers */
601 WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
602 WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
603 WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
604 WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
606 /* set the wb address whether it's enabled or not */
607 WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_HI,
608 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
609 WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_LO,
610 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
612 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL,
613 RPTR_WRITEBACK_ENABLE, 1);
615 WREG32_SDMA(i, regSDMA_GFX_RB_BASE, ring->gpu_addr >> 8);
616 WREG32_SDMA(i, regSDMA_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
620 /* before programing wptr to a less value, need set minor_ptr_update first */
621 WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
623 doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
624 doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
626 doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE,
628 doorbell_offset = REG_SET_FIELD(doorbell_offset,
629 SDMA_GFX_DOORBELL_OFFSET,
630 OFFSET, ring->doorbell_index);
631 WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell);
632 WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset);
634 sdma_v4_4_2_ring_set_wptr(ring);
636 /* set minor_ptr_update to 0 after wptr programed */
637 WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 0);
639 /* setup the wptr shadow polling */
640 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
641 WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_LO,
642 lower_32_bits(wptr_gpu_addr));
643 WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_HI,
644 upper_32_bits(wptr_gpu_addr));
645 wptr_poll_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL);
646 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
647 SDMA_GFX_RB_WPTR_POLL_CNTL,
648 F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
649 WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
652 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 1);
653 WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
655 ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
656 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 1);
658 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
661 WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
663 ring->sched.ready = true;
667 * sdma_v4_4_2_page_resume - setup and start the async dma engines
669 * @adev: amdgpu_device pointer
670 * @i: instance to resume
672 * Set up the page DMA ring buffers and enable them.
673 * Returns 0 for success, error for failure.
675 static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i)
677 struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
678 u32 rb_cntl, ib_cntl, wptr_poll_cntl;
684 wb_offset = (ring->rptr_offs * 4);
686 rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
687 rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
688 WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
690 /* Initialize the ring buffer's read and write pointers */
691 WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
692 WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
693 WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
694 WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
696 /* set the wb address whether it's enabled or not */
697 WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_HI,
698 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
699 WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_LO,
700 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
702 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
703 RPTR_WRITEBACK_ENABLE, 1);
705 WREG32_SDMA(i, regSDMA_PAGE_RB_BASE, ring->gpu_addr >> 8);
706 WREG32_SDMA(i, regSDMA_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
710 /* before programing wptr to a less value, need set minor_ptr_update first */
711 WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 1);
713 doorbell = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL);
714 doorbell_offset = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET);
716 doorbell = REG_SET_FIELD(doorbell, SDMA_PAGE_DOORBELL, ENABLE,
718 doorbell_offset = REG_SET_FIELD(doorbell_offset,
719 SDMA_PAGE_DOORBELL_OFFSET,
720 OFFSET, ring->doorbell_index);
721 WREG32_SDMA(i, regSDMA_PAGE_DOORBELL, doorbell);
722 WREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET, doorbell_offset);
724 /* paging queue doorbell range is setup at sdma_v4_4_2_gfx_resume */
725 sdma_v4_4_2_page_ring_set_wptr(ring);
727 /* set minor_ptr_update to 0 after wptr programed */
728 WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 0);
730 /* setup the wptr shadow polling */
731 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
732 WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO,
733 lower_32_bits(wptr_gpu_addr));
734 WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI,
735 upper_32_bits(wptr_gpu_addr));
736 wptr_poll_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL);
737 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
738 SDMA_PAGE_RB_WPTR_POLL_CNTL,
739 F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
740 WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
743 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, RB_ENABLE, 1);
744 WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
746 ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL);
747 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_ENABLE, 1);
749 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
752 WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
754 ring->sched.ready = true;
757 static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev)
763 * sdma_v4_4_2_rlc_resume - setup and start the async dma engines
765 * @adev: amdgpu_device pointer
767 * Set up the compute DMA queues and enable them.
768 * Returns 0 for success, error for failure.
770 static int sdma_v4_4_2_rlc_resume(struct amdgpu_device *adev)
772 sdma_v4_4_2_init_pg(adev);
778 * sdma_v4_4_2_load_microcode - load the sDMA ME ucode
780 * @adev: amdgpu_device pointer
782 * Loads the sDMA0/1 ucode.
783 * Returns 0 for success, -EINVAL if the ucode is not available.
785 static int sdma_v4_4_2_load_microcode(struct amdgpu_device *adev)
787 const struct sdma_firmware_header_v1_0 *hdr;
788 const __le32 *fw_data;
793 sdma_v4_4_2_enable(adev, false);
795 for (i = 0; i < adev->sdma.num_instances; i++) {
796 if (!adev->sdma.instance[i].fw)
799 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
800 amdgpu_ucode_print_sdma_hdr(&hdr->header);
801 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
803 fw_data = (const __le32 *)
804 (adev->sdma.instance[i].fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
807 WREG32_SDMA(i, regSDMA_UCODE_ADDR, 0);
809 for (j = 0; j < fw_size; j++)
810 WREG32_SDMA(i, regSDMA_UCODE_DATA,
811 le32_to_cpup(fw_data++));
813 WREG32_SDMA(i, regSDMA_UCODE_ADDR,
814 adev->sdma.instance[i].fw_version);
821 * sdma_v4_4_2_start - setup and start the async dma engines
823 * @adev: amdgpu_device pointer
825 * Set up the DMA engines and enable them.
826 * Returns 0 for success, error for failure.
828 static int sdma_v4_4_2_start(struct amdgpu_device *adev)
830 struct amdgpu_ring *ring;
833 if (amdgpu_sriov_vf(adev)) {
834 sdma_v4_4_2_ctx_switch_enable(adev, false);
835 sdma_v4_4_2_enable(adev, false);
837 /* bypass sdma microcode loading on Gopher */
838 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
839 !(adev->pdev->device == 0x49) && !(adev->pdev->device == 0x50)) {
840 r = sdma_v4_4_2_load_microcode(adev);
846 sdma_v4_4_2_enable(adev, true);
847 /* enable sdma ring preemption */
848 sdma_v4_4_2_ctx_switch_enable(adev, true);
851 /* start the gfx rings and rlc compute queues */
852 for (i = 0; i < adev->sdma.num_instances; i++) {
855 WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
856 sdma_v4_4_2_gfx_resume(adev, i);
857 if (adev->sdma.has_page_queue)
858 sdma_v4_4_2_page_resume(adev, i);
860 /* set utc l1 enable flag always to 1 */
861 temp = RREG32_SDMA(i, regSDMA_CNTL);
862 temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
863 WREG32_SDMA(i, regSDMA_CNTL, temp);
865 if (!amdgpu_sriov_vf(adev)) {
866 ring = &adev->sdma.instance[i].ring;
867 adev->nbio.funcs->sdma_doorbell_range(adev, i,
868 ring->use_doorbell, ring->doorbell_index,
869 adev->doorbell_index.sdma_doorbell_range);
872 temp = RREG32_SDMA(i, regSDMA_F32_CNTL);
873 temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0);
874 WREG32_SDMA(i, regSDMA_F32_CNTL, temp);
878 if (amdgpu_sriov_vf(adev)) {
879 sdma_v4_4_2_ctx_switch_enable(adev, true);
880 sdma_v4_4_2_enable(adev, true);
882 r = sdma_v4_4_2_rlc_resume(adev);
887 for (i = 0; i < adev->sdma.num_instances; i++) {
888 ring = &adev->sdma.instance[i].ring;
890 r = amdgpu_ring_test_helper(ring);
894 if (adev->sdma.has_page_queue) {
895 struct amdgpu_ring *page = &adev->sdma.instance[i].page;
897 r = amdgpu_ring_test_helper(page);
901 if (adev->mman.buffer_funcs_ring == page)
902 amdgpu_ttm_set_buffer_funcs_status(adev, true);
905 if (adev->mman.buffer_funcs_ring == ring)
906 amdgpu_ttm_set_buffer_funcs_status(adev, true);
913 * sdma_v4_4_2_ring_test_ring - simple async dma engine test
915 * @ring: amdgpu_ring structure holding ring information
917 * Test the DMA engine by writing using it to write an
919 * Returns 0 for success, error for failure.
921 static int sdma_v4_4_2_ring_test_ring(struct amdgpu_ring *ring)
923 struct amdgpu_device *adev = ring->adev;
930 r = amdgpu_device_wb_get(adev, &index);
934 gpu_addr = adev->wb.gpu_addr + (index * 4);
936 adev->wb.wb[index] = cpu_to_le32(tmp);
938 r = amdgpu_ring_alloc(ring, 5);
942 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
943 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
944 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
945 amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
946 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
947 amdgpu_ring_write(ring, 0xDEADBEEF);
948 amdgpu_ring_commit(ring);
950 for (i = 0; i < adev->usec_timeout; i++) {
951 tmp = le32_to_cpu(adev->wb.wb[index]);
952 if (tmp == 0xDEADBEEF)
957 if (i >= adev->usec_timeout)
961 amdgpu_device_wb_free(adev, index);
966 * sdma_v4_4_2_ring_test_ib - test an IB on the DMA engine
968 * @ring: amdgpu_ring structure holding ring information
969 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
971 * Test a simple IB in the DMA ring.
972 * Returns 0 on success, error on failure.
974 static int sdma_v4_4_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
976 struct amdgpu_device *adev = ring->adev;
978 struct dma_fence *f = NULL;
984 r = amdgpu_device_wb_get(adev, &index);
988 gpu_addr = adev->wb.gpu_addr + (index * 4);
990 adev->wb.wb[index] = cpu_to_le32(tmp);
991 memset(&ib, 0, sizeof(ib));
992 r = amdgpu_ib_get(adev, NULL, 256,
993 AMDGPU_IB_POOL_DIRECT, &ib);
997 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
998 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
999 ib.ptr[1] = lower_32_bits(gpu_addr);
1000 ib.ptr[2] = upper_32_bits(gpu_addr);
1001 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1002 ib.ptr[4] = 0xDEADBEEF;
1003 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1004 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1005 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1008 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1012 r = dma_fence_wait_timeout(f, false, timeout);
1019 tmp = le32_to_cpu(adev->wb.wb[index]);
1020 if (tmp == 0xDEADBEEF)
1026 amdgpu_ib_free(adev, &ib, NULL);
1029 amdgpu_device_wb_free(adev, index);
1035 * sdma_v4_4_2_vm_copy_pte - update PTEs by copying them from the GART
1037 * @ib: indirect buffer to fill with commands
1038 * @pe: addr of the page entry
1039 * @src: src addr to copy from
1040 * @count: number of page entries to update
1042 * Update PTEs by copying them from the GART using sDMA.
1044 static void sdma_v4_4_2_vm_copy_pte(struct amdgpu_ib *ib,
1045 uint64_t pe, uint64_t src,
1048 unsigned bytes = count * 8;
1050 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1051 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1052 ib->ptr[ib->length_dw++] = bytes - 1;
1053 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1054 ib->ptr[ib->length_dw++] = lower_32_bits(src);
1055 ib->ptr[ib->length_dw++] = upper_32_bits(src);
1056 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1057 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1062 * sdma_v4_4_2_vm_write_pte - update PTEs by writing them manually
1064 * @ib: indirect buffer to fill with commands
1065 * @pe: addr of the page entry
1066 * @value: dst addr to write into pe
1067 * @count: number of page entries to update
1068 * @incr: increase next addr by incr bytes
1070 * Update PTEs by writing them manually using sDMA.
1072 static void sdma_v4_4_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1073 uint64_t value, unsigned count,
1076 unsigned ndw = count * 2;
1078 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1079 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1080 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1081 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1082 ib->ptr[ib->length_dw++] = ndw - 1;
1083 for (; ndw > 0; ndw -= 2) {
1084 ib->ptr[ib->length_dw++] = lower_32_bits(value);
1085 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1091 * sdma_v4_4_2_vm_set_pte_pde - update the page tables using sDMA
1093 * @ib: indirect buffer to fill with commands
1094 * @pe: addr of the page entry
1095 * @addr: dst addr to write into pe
1096 * @count: number of page entries to update
1097 * @incr: increase next addr by incr bytes
1098 * @flags: access flags
1100 * Update the page tables using sDMA.
1102 static void sdma_v4_4_2_vm_set_pte_pde(struct amdgpu_ib *ib,
1104 uint64_t addr, unsigned count,
1105 uint32_t incr, uint64_t flags)
1107 /* for physically contiguous pages (vram) */
1108 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1109 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1110 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1111 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1112 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1113 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1114 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1115 ib->ptr[ib->length_dw++] = incr; /* increment size */
1116 ib->ptr[ib->length_dw++] = 0;
1117 ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1121 * sdma_v4_4_2_ring_pad_ib - pad the IB to the required number of dw
1123 * @ring: amdgpu_ring structure holding ring information
1124 * @ib: indirect buffer to fill with padding
1126 static void sdma_v4_4_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1128 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1132 pad_count = (-ib->length_dw) & 7;
1133 for (i = 0; i < pad_count; i++)
1134 if (sdma && sdma->burst_nop && (i == 0))
1135 ib->ptr[ib->length_dw++] =
1136 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1137 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1139 ib->ptr[ib->length_dw++] =
1140 SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1145 * sdma_v4_4_2_ring_emit_pipeline_sync - sync the pipeline
1147 * @ring: amdgpu_ring pointer
1149 * Make sure all previous operations are completed (CIK).
1151 static void sdma_v4_4_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1153 uint32_t seq = ring->fence_drv.sync_seq;
1154 uint64_t addr = ring->fence_drv.gpu_addr;
1157 sdma_v4_4_2_wait_reg_mem(ring, 1, 0,
1159 upper_32_bits(addr) & 0xffffffff,
1160 seq, 0xffffffff, 4);
1165 * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA
1167 * @ring: amdgpu_ring pointer
1168 * @vmid: vmid number to use
1171 * Update the page table base and flush the VM TLB
1174 static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
1175 unsigned vmid, uint64_t pd_addr)
1177 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1180 static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring,
1181 uint32_t reg, uint32_t val)
1183 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1184 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1185 amdgpu_ring_write(ring, reg);
1186 amdgpu_ring_write(ring, val);
1189 static void sdma_v4_4_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1190 uint32_t val, uint32_t mask)
1192 sdma_v4_4_2_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
1195 static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
1197 switch (adev->ip_versions[SDMA0_HWIP][0]) {
1198 case IP_VERSION(4, 4, 2):
1205 static int sdma_v4_4_2_early_init(void *handle)
1207 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1210 r = sdma_v4_4_2_init_microcode(adev);
1212 DRM_ERROR("Failed to load sdma firmware!\n");
1216 /* TODO: Page queue breaks driver reload under SRIOV */
1217 if (sdma_v4_4_2_fw_support_paging_queue(adev))
1218 adev->sdma.has_page_queue = true;
1220 sdma_v4_4_2_set_ring_funcs(adev);
1221 sdma_v4_4_2_set_buffer_funcs(adev);
1222 sdma_v4_4_2_set_vm_pte_funcs(adev);
1223 sdma_v4_4_2_set_irq_funcs(adev);
1229 static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
1231 struct amdgpu_iv_entry *entry);
1234 static int sdma_v4_4_2_late_init(void *handle)
1236 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1238 struct ras_ih_if ih_info = {
1239 .cb = sdma_v4_4_2_process_ras_data_cb,
1242 if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
1243 if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops &&
1244 adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count)
1245 adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev);
1251 static int sdma_v4_4_2_sw_init(void *handle)
1253 struct amdgpu_ring *ring;
1255 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1257 /* SDMA trap event */
1258 for (i = 0; i < adev->sdma.num_instances; i++) {
1259 r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1260 SDMA0_4_0__SRCID__SDMA_TRAP,
1261 &adev->sdma.trap_irq);
1266 /* SDMA SRAM ECC event */
1267 for (i = 0; i < adev->sdma.num_instances; i++) {
1268 r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1269 SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
1270 &adev->sdma.ecc_irq);
1275 /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/
1276 for (i = 0; i < adev->sdma.num_instances; i++) {
1277 r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1278 SDMA0_4_0__SRCID__SDMA_VM_HOLE,
1279 &adev->sdma.vm_hole_irq);
1283 r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1284 SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID,
1285 &adev->sdma.doorbell_invalid_irq);
1289 r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1290 SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT,
1291 &adev->sdma.pool_timeout_irq);
1295 r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1296 SDMA0_4_0__SRCID__SDMA_SRBMWRITE,
1297 &adev->sdma.srbm_write_irq);
1302 for (i = 0; i < adev->sdma.num_instances; i++) {
1303 ring = &adev->sdma.instance[i].ring;
1304 ring->ring_obj = NULL;
1305 ring->use_doorbell = true;
1307 DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1308 ring->use_doorbell?"true":"false");
1310 /* doorbell size is 2 dwords, get DWORD offset */
1311 ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
1312 ring->vm_hub = AMDGPU_MMHUB_0;
1314 sprintf(ring->name, "sdma%d", i);
1315 r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1316 AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1317 AMDGPU_RING_PRIO_DEFAULT, NULL);
1321 if (adev->sdma.has_page_queue) {
1322 ring = &adev->sdma.instance[i].page;
1323 ring->ring_obj = NULL;
1324 ring->use_doorbell = true;
1326 /* paging queue use same doorbell index/routing as gfx queue
1327 * with 0x400 (4096 dwords) offset on second doorbell page
1329 ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
1330 ring->doorbell_index += 0x400;
1331 ring->vm_hub = AMDGPU_MMHUB_0;
1333 sprintf(ring->name, "page%d", i);
1334 r = amdgpu_ring_init(adev, ring, 1024,
1335 &adev->sdma.trap_irq,
1336 AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1337 AMDGPU_RING_PRIO_DEFAULT, NULL);
1346 static int sdma_v4_4_2_sw_fini(void *handle)
1348 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1351 for (i = 0; i < adev->sdma.num_instances; i++) {
1352 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1353 if (adev->sdma.has_page_queue)
1354 amdgpu_ring_fini(&adev->sdma.instance[i].page);
1357 if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 2))
1358 amdgpu_sdma_destroy_inst_ctx(adev, true);
1360 amdgpu_sdma_destroy_inst_ctx(adev, false);
1365 static int sdma_v4_4_2_hw_init(void *handle)
1368 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1370 if (adev->flags & AMD_IS_APU)
1371 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
1373 if (!amdgpu_sriov_vf(adev))
1374 sdma_v4_4_2_init_golden_registers(adev);
1376 r = sdma_v4_4_2_start(adev);
1381 static int sdma_v4_4_2_hw_fini(void *handle)
1383 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1386 if (amdgpu_sriov_vf(adev))
1389 for (i = 0; i < adev->sdma.num_instances; i++) {
1390 amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
1391 AMDGPU_SDMA_IRQ_INSTANCE0 + i);
1394 sdma_v4_4_2_ctx_switch_enable(adev, false);
1395 sdma_v4_4_2_enable(adev, false);
1400 static int sdma_v4_4_2_suspend(void *handle)
1402 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1404 return sdma_v4_4_2_hw_fini(adev);
1407 static int sdma_v4_4_2_resume(void *handle)
1409 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1411 return sdma_v4_4_2_hw_init(adev);
1414 static bool sdma_v4_4_2_is_idle(void *handle)
1416 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1419 for (i = 0; i < adev->sdma.num_instances; i++) {
1420 u32 tmp = RREG32_SDMA(i, regSDMA_STATUS_REG);
1422 if (!(tmp & SDMA_STATUS_REG__IDLE_MASK))
1429 static int sdma_v4_4_2_wait_for_idle(void *handle)
1432 u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
1433 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1435 for (i = 0; i < adev->usec_timeout; i++) {
1436 for (j = 0; j < adev->sdma.num_instances; j++) {
1437 sdma[j] = RREG32_SDMA(j, regSDMA_STATUS_REG);
1438 if (!(sdma[j] & SDMA_STATUS_REG__IDLE_MASK))
1441 if (j == adev->sdma.num_instances)
1448 static int sdma_v4_4_2_soft_reset(void *handle)
1455 static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
1456 struct amdgpu_irq_src *source,
1458 enum amdgpu_interrupt_state state)
1462 sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
1463 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, TRAP_ENABLE,
1464 state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1465 WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
1470 static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev,
1471 struct amdgpu_irq_src *source,
1472 struct amdgpu_iv_entry *entry)
1476 DRM_DEBUG("IH: SDMA trap\n");
1477 instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
1478 switch (entry->ring_id) {
1480 amdgpu_fence_process(&adev->sdma.instance[instance].ring);
1489 static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
1491 struct amdgpu_iv_entry *entry)
1495 /* When “Full RAS” is enabled, the per-IP interrupt sources should
1496 * be disabled and the driver should only look for the aggregated
1497 * interrupt via sync flood
1499 if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
1502 instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
1506 amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
1509 return AMDGPU_RAS_SUCCESS;
1513 static int sdma_v4_4_2_process_illegal_inst_irq(struct amdgpu_device *adev,
1514 struct amdgpu_irq_src *source,
1515 struct amdgpu_iv_entry *entry)
1519 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1521 instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
1525 switch (entry->ring_id) {
1527 drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
1533 static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
1534 struct amdgpu_irq_src *source,
1536 enum amdgpu_interrupt_state state)
1538 u32 sdma_edc_config;
1540 sdma_edc_config = RREG32_SDMA(type, regCC_SDMA_EDC_CONFIG);
1542 * FIXME: This was inherited from Aldebaran, but no this field
1543 * definition in the regspec of both Aldebaran and SDMA 4.4.2
1545 sdma_edc_config |= (state == AMDGPU_IRQ_STATE_ENABLE) ? (1 << 2) : 0;
1546 WREG32_SDMA(type, regCC_SDMA_EDC_CONFIG, sdma_edc_config);
1551 static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
1552 struct amdgpu_iv_entry *entry)
1555 struct amdgpu_task_info task_info;
1558 instance = sdma_v4_4_2_irq_id_to_seq(entry->client_id);
1559 if (instance < 0 || instance >= adev->sdma.num_instances) {
1560 dev_err(adev->dev, "sdma instance invalid %d\n", instance);
1564 addr = (u64)entry->src_data[0] << 12;
1565 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
1567 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
1568 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
1570 dev_dbg_ratelimited(adev->dev,
1571 "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u "
1572 "pasid:%u, for process %s pid %d thread %s pid %d\n",
1573 instance, addr, entry->src_id, entry->ring_id, entry->vmid,
1574 entry->pasid, task_info.process_name, task_info.tgid,
1575 task_info.task_name, task_info.pid);
1579 static int sdma_v4_4_2_process_vm_hole_irq(struct amdgpu_device *adev,
1580 struct amdgpu_irq_src *source,
1581 struct amdgpu_iv_entry *entry)
1583 dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n");
1584 sdma_v4_4_2_print_iv_entry(adev, entry);
1588 static int sdma_v4_4_2_process_doorbell_invalid_irq(struct amdgpu_device *adev,
1589 struct amdgpu_irq_src *source,
1590 struct amdgpu_iv_entry *entry)
1593 dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n");
1594 sdma_v4_4_2_print_iv_entry(adev, entry);
1598 static int sdma_v4_4_2_process_pool_timeout_irq(struct amdgpu_device *adev,
1599 struct amdgpu_irq_src *source,
1600 struct amdgpu_iv_entry *entry)
1602 dev_dbg_ratelimited(adev->dev,
1603 "Polling register/memory timeout executing POLL_REG/MEM with finite timer\n");
1604 sdma_v4_4_2_print_iv_entry(adev, entry);
1608 static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev,
1609 struct amdgpu_irq_src *source,
1610 struct amdgpu_iv_entry *entry)
1612 dev_dbg_ratelimited(adev->dev,
1613 "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n");
1614 sdma_v4_4_2_print_iv_entry(adev, entry);
1618 static void sdma_v4_4_2_update_medium_grain_clock_gating(
1619 struct amdgpu_device *adev,
1625 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1626 for (i = 0; i < adev->sdma.num_instances; i++) {
1627 def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
1628 data &= ~(SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1629 SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1630 SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1631 SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1632 SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1633 SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1634 SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1635 SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1637 WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
1640 for (i = 0; i < adev->sdma.num_instances; i++) {
1641 def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
1642 data |= (SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK |
1643 SDMA_CLK_CTRL__SOFT_OVERRIDE6_MASK |
1644 SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1645 SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1646 SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1647 SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1648 SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1649 SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1651 WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
1657 static void sdma_v4_4_2_update_medium_grain_light_sleep(
1658 struct amdgpu_device *adev,
1664 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1665 for (i = 0; i < adev->sdma.num_instances; i++) {
1666 /* 1-not override: enable sdma mem light sleep */
1667 def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL);
1668 data |= SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1670 WREG32_SDMA(0, regSDMA_POWER_CNTL, data);
1673 for (i = 0; i < adev->sdma.num_instances; i++) {
1674 /* 0-override:disable sdma mem light sleep */
1675 def = data = RREG32_SDMA(0, regSDMA_POWER_CNTL);
1676 data &= ~SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1678 WREG32_SDMA(0, regSDMA_POWER_CNTL, data);
1683 static int sdma_v4_4_2_set_clockgating_state(void *handle,
1684 enum amd_clockgating_state state)
1686 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1688 if (amdgpu_sriov_vf(adev))
1691 sdma_v4_4_2_update_medium_grain_clock_gating(adev,
1692 state == AMD_CG_STATE_GATE);
1693 sdma_v4_4_2_update_medium_grain_light_sleep(adev,
1694 state == AMD_CG_STATE_GATE);
1698 static int sdma_v4_4_2_set_powergating_state(void *handle,
1699 enum amd_powergating_state state)
1704 static void sdma_v4_4_2_get_clockgating_state(void *handle, u64 *flags)
1706 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1709 if (amdgpu_sriov_vf(adev))
1712 /* AMD_CG_SUPPORT_SDMA_MGCG */
1713 data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_CLK_CTRL));
1714 if (!(data & SDMA_CLK_CTRL__SOFT_OVERRIDE7_MASK))
1715 *flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1717 /* AMD_CG_SUPPORT_SDMA_LS */
1718 data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, regSDMA_POWER_CNTL));
1719 if (data & SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1720 *flags |= AMD_CG_SUPPORT_SDMA_LS;
1723 const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
1724 .name = "sdma_v4_4_2",
1725 .early_init = sdma_v4_4_2_early_init,
1726 .late_init = sdma_v4_4_2_late_init,
1727 .sw_init = sdma_v4_4_2_sw_init,
1728 .sw_fini = sdma_v4_4_2_sw_fini,
1729 .hw_init = sdma_v4_4_2_hw_init,
1730 .hw_fini = sdma_v4_4_2_hw_fini,
1731 .suspend = sdma_v4_4_2_suspend,
1732 .resume = sdma_v4_4_2_resume,
1733 .is_idle = sdma_v4_4_2_is_idle,
1734 .wait_for_idle = sdma_v4_4_2_wait_for_idle,
1735 .soft_reset = sdma_v4_4_2_soft_reset,
1736 .set_clockgating_state = sdma_v4_4_2_set_clockgating_state,
1737 .set_powergating_state = sdma_v4_4_2_set_powergating_state,
1738 .get_clockgating_state = sdma_v4_4_2_get_clockgating_state,
1741 static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
1742 .type = AMDGPU_RING_TYPE_SDMA,
1744 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1745 .support_64bit_ptrs = true,
1746 .get_rptr = sdma_v4_4_2_ring_get_rptr,
1747 .get_wptr = sdma_v4_4_2_ring_get_wptr,
1748 .set_wptr = sdma_v4_4_2_ring_set_wptr,
1750 6 + /* sdma_v4_4_2_ring_emit_hdp_flush */
1751 3 + /* hdp invalidate */
1752 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
1753 /* sdma_v4_4_2_ring_emit_vm_flush */
1754 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1755 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1756 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
1757 .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
1758 .emit_ib = sdma_v4_4_2_ring_emit_ib,
1759 .emit_fence = sdma_v4_4_2_ring_emit_fence,
1760 .emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync,
1761 .emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush,
1762 .emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush,
1763 .test_ring = sdma_v4_4_2_ring_test_ring,
1764 .test_ib = sdma_v4_4_2_ring_test_ib,
1765 .insert_nop = sdma_v4_4_2_ring_insert_nop,
1766 .pad_ib = sdma_v4_4_2_ring_pad_ib,
1767 .emit_wreg = sdma_v4_4_2_ring_emit_wreg,
1768 .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
1769 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1772 static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
1773 .type = AMDGPU_RING_TYPE_SDMA,
1775 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1776 .support_64bit_ptrs = true,
1777 .get_rptr = sdma_v4_4_2_ring_get_rptr,
1778 .get_wptr = sdma_v4_4_2_page_ring_get_wptr,
1779 .set_wptr = sdma_v4_4_2_page_ring_set_wptr,
1781 6 + /* sdma_v4_4_2_ring_emit_hdp_flush */
1782 3 + /* hdp invalidate */
1783 6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
1784 /* sdma_v4_4_2_ring_emit_vm_flush */
1785 SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1786 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1787 10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
1788 .emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
1789 .emit_ib = sdma_v4_4_2_ring_emit_ib,
1790 .emit_fence = sdma_v4_4_2_ring_emit_fence,
1791 .emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync,
1792 .emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush,
1793 .emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush,
1794 .test_ring = sdma_v4_4_2_ring_test_ring,
1795 .test_ib = sdma_v4_4_2_ring_test_ib,
1796 .insert_nop = sdma_v4_4_2_ring_insert_nop,
1797 .pad_ib = sdma_v4_4_2_ring_pad_ib,
1798 .emit_wreg = sdma_v4_4_2_ring_emit_wreg,
1799 .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
1800 .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1803 static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
1807 for (i = 0; i < adev->sdma.num_instances; i++) {
1808 adev->sdma.instance[i].ring.funcs = &sdma_v4_4_2_ring_funcs;
1809 adev->sdma.instance[i].ring.me = i;
1810 if (adev->sdma.has_page_queue) {
1811 adev->sdma.instance[i].page.funcs =
1812 &sdma_v4_4_2_page_ring_funcs;
1813 adev->sdma.instance[i].page.me = i;
1818 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_trap_irq_funcs = {
1819 .set = sdma_v4_4_2_set_trap_irq_state,
1820 .process = sdma_v4_4_2_process_trap_irq,
1823 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_illegal_inst_irq_funcs = {
1824 .process = sdma_v4_4_2_process_illegal_inst_irq,
1827 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ecc_irq_funcs = {
1828 .set = sdma_v4_4_2_set_ecc_irq_state,
1829 .process = amdgpu_sdma_process_ecc_irq,
1832 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_vm_hole_irq_funcs = {
1833 .process = sdma_v4_4_2_process_vm_hole_irq,
1836 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_doorbell_invalid_irq_funcs = {
1837 .process = sdma_v4_4_2_process_doorbell_invalid_irq,
1840 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_pool_timeout_irq_funcs = {
1841 .process = sdma_v4_4_2_process_pool_timeout_irq,
1844 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_srbm_write_irq_funcs = {
1845 .process = sdma_v4_4_2_process_srbm_write_irq,
1848 static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
1850 adev->sdma.trap_irq.num_types = adev->sdma.num_instances;
1851 adev->sdma.ecc_irq.num_types = adev->sdma.num_instances;
1852 adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances;
1853 adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances;
1854 adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances;
1855 adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances;
1857 adev->sdma.trap_irq.funcs = &sdma_v4_4_2_trap_irq_funcs;
1858 adev->sdma.illegal_inst_irq.funcs = &sdma_v4_4_2_illegal_inst_irq_funcs;
1859 adev->sdma.ecc_irq.funcs = &sdma_v4_4_2_ecc_irq_funcs;
1860 adev->sdma.vm_hole_irq.funcs = &sdma_v4_4_2_vm_hole_irq_funcs;
1861 adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_4_2_doorbell_invalid_irq_funcs;
1862 adev->sdma.pool_timeout_irq.funcs = &sdma_v4_4_2_pool_timeout_irq_funcs;
1863 adev->sdma.srbm_write_irq.funcs = &sdma_v4_4_2_srbm_write_irq_funcs;
1867 * sdma_v4_4_2_emit_copy_buffer - copy buffer using the sDMA engine
1869 * @ib: indirect buffer to copy to
1870 * @src_offset: src GPU address
1871 * @dst_offset: dst GPU address
1872 * @byte_count: number of bytes to xfer
1873 * @tmz: if a secure copy should be used
1875 * Copy GPU buffers using the DMA engine.
1876 * Used by the amdgpu ttm implementation to move pages if
1877 * registered as the asic copy callback.
1879 static void sdma_v4_4_2_emit_copy_buffer(struct amdgpu_ib *ib,
1880 uint64_t src_offset,
1881 uint64_t dst_offset,
1882 uint32_t byte_count,
1885 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1886 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
1887 SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
1888 ib->ptr[ib->length_dw++] = byte_count - 1;
1889 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1890 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1891 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1892 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1893 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1897 * sdma_v4_4_2_emit_fill_buffer - fill buffer using the sDMA engine
1899 * @ib: indirect buffer to copy to
1900 * @src_data: value to write to buffer
1901 * @dst_offset: dst GPU address
1902 * @byte_count: number of bytes to xfer
1904 * Fill GPU buffers using the DMA engine.
1906 static void sdma_v4_4_2_emit_fill_buffer(struct amdgpu_ib *ib,
1908 uint64_t dst_offset,
1909 uint32_t byte_count)
1911 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1912 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1913 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1914 ib->ptr[ib->length_dw++] = src_data;
1915 ib->ptr[ib->length_dw++] = byte_count - 1;
1918 static const struct amdgpu_buffer_funcs sdma_v4_4_2_buffer_funcs = {
1919 .copy_max_bytes = 0x400000,
1921 .emit_copy_buffer = sdma_v4_4_2_emit_copy_buffer,
1923 .fill_max_bytes = 0x400000,
1925 .emit_fill_buffer = sdma_v4_4_2_emit_fill_buffer,
1928 static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev)
1930 adev->mman.buffer_funcs = &sdma_v4_4_2_buffer_funcs;
1931 if (adev->sdma.has_page_queue)
1932 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
1934 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1937 static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = {
1938 .copy_pte_num_dw = 7,
1939 .copy_pte = sdma_v4_4_2_vm_copy_pte,
1941 .write_pte = sdma_v4_4_2_vm_write_pte,
1942 .set_pte_pde = sdma_v4_4_2_vm_set_pte_pde,
1945 static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev)
1947 struct drm_gpu_scheduler *sched;
1950 adev->vm_manager.vm_pte_funcs = &sdma_v4_4_2_vm_pte_funcs;
1951 for (i = 0; i < adev->sdma.num_instances; i++) {
1952 if (adev->sdma.has_page_queue)
1953 sched = &adev->sdma.instance[i].page.sched;
1955 sched = &adev->sdma.instance[i].ring.sched;
1956 adev->vm_manager.vm_pte_scheds[i] = sched;
1958 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
1961 const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = {
1962 .type = AMD_IP_BLOCK_TYPE_SDMA,
1966 .funcs = &sdma_v4_4_2_ip_funcs,