drm/radeon: fix VM flush on SI (v3)
authorAlex Deucher <alexander.deucher@amd.com>
Tue, 6 Jan 2015 00:54:50 +0000 (19:54 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 8 Jan 2015 14:36:50 +0000 (09:36 -0500)
We need to wait for the GPUVM flush to complete.  There
was some confusion as to how this mechanism was supposed
to work.  The operation is not atomic.  For GPU initiated
invalidations you need to read back a VM register to
introduce enough latency for the update to complete.

v2: drop gart changes
v3: just read back rather than polling

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dma.c
drivers/gpu/drm/radeon/sid.h

index 60df444..5d89b87 100644 (file)
@@ -5057,6 +5057,16 @@ void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, 1 << vm_id);
 
+       /* wait for the invalidate to complete */
+       radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+       radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
+                                WAIT_REG_MEM_ENGINE(0))); /* me */
+       radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+       radeon_ring_write(ring, 0);
+       radeon_ring_write(ring, 0); /* ref */
+       radeon_ring_write(ring, 0); /* mask */
+       radeon_ring_write(ring, 0x20); /* poll interval */
+
        /* sync PFP to ME, otherwise we might get invalid PFP reads */
        radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
        radeon_ring_write(ring, 0x0);
index f5cc777..aa7b872 100644 (file)
@@ -206,6 +206,14 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
        radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
        radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
        radeon_ring_write(ring, 1 << vm_id);
+
+       /* wait for invalidate to complete */
+       radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
+       radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
+       radeon_ring_write(ring, 0xff << 16); /* retry */
+       radeon_ring_write(ring, 1 << vm_id); /* mask */
+       radeon_ring_write(ring, 0); /* value */
+       radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
 }
 
 /**
index 4069be8..8499924 100644 (file)
 #define        PACKET3_MPEG_INDEX                              0x3A
 #define        PACKET3_COPY_DW                                 0x3B
 #define        PACKET3_WAIT_REG_MEM                            0x3C
+#define                WAIT_REG_MEM_FUNCTION(x)                ((x) << 0)
+                /* 0 - always
+                * 1 - <
+                * 2 - <=
+                * 3 - ==
+                * 4 - !=
+                * 5 - >=
+                * 6 - >
+                */
+#define                WAIT_REG_MEM_MEM_SPACE(x)               ((x) << 4)
+                /* 0 - reg
+                * 1 - mem
+                */
+#define                WAIT_REG_MEM_ENGINE(x)                  ((x) << 8)
+                /* 0 - me
+                * 1 - pfp
+                */
 #define        PACKET3_MEM_WRITE                               0x3D
 #define        PACKET3_COPY_DATA                               0x40
 #define        PACKET3_CP_DMA                                  0x41
 #define        DMA_PACKET_TRAP                                   0x7
 #define        DMA_PACKET_SRBM_WRITE                             0x9
 #define        DMA_PACKET_CONSTANT_FILL                          0xd
+#define        DMA_PACKET_POLL_REG_MEM                           0xe
 #define        DMA_PACKET_NOP                                    0xf
 
 #define VCE_STATUS                                     0x20004