drm/radeon: fix write back suspend regression with uvd v2
authorJerome Glisse <jglisse@redhat.com>
Thu, 6 Jun 2013 21:51:21 +0000 (17:51 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 12 Jun 2013 12:16:29 +0000 (08:16 -0400)
UVD ring can't use scratch thus it does need writeback buffer to keep
a valid address or radeon_ring_backup will trigger a kernel fault.

It's ok to not unpin the write back buffer on suspend as it leave in
gtt and thus does not need eviction.

v2: Fix the uvd case.

Reported and tracked by Wojtek <wojtask9@wp.pl>

Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_uvd.c

index 1899738..b0dc0b6 100644 (file)
@@ -244,16 +244,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
  */
 void radeon_wb_disable(struct radeon_device *rdev)
 {
-       int r;
-
-       if (rdev->wb.wb_obj) {
-               r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-               if (unlikely(r != 0))
-                       return;
-               radeon_bo_kunmap(rdev->wb.wb_obj);
-               radeon_bo_unpin(rdev->wb.wb_obj);
-               radeon_bo_unreserve(rdev->wb.wb_obj);
-       }
        rdev->wb.enabled = false;
 }
 
@@ -269,6 +259,11 @@ void radeon_wb_fini(struct radeon_device *rdev)
 {
        radeon_wb_disable(rdev);
        if (rdev->wb.wb_obj) {
+               if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
+                       radeon_bo_kunmap(rdev->wb.wb_obj);
+                       radeon_bo_unpin(rdev->wb.wb_obj);
+                       radeon_bo_unreserve(rdev->wb.wb_obj);
+               }
                radeon_bo_unref(&rdev->wb.wb_obj);
                rdev->wb.wb = NULL;
                rdev->wb.wb_obj = NULL;
@@ -295,26 +290,26 @@ int radeon_wb_init(struct radeon_device *rdev)
                        dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
                        return r;
                }
-       }
-       r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-       if (unlikely(r != 0)) {
-               radeon_wb_fini(rdev);
-               return r;
-       }
-       r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
-                         &rdev->wb.gpu_addr);
-       if (r) {
+               r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+               if (unlikely(r != 0)) {
+                       radeon_wb_fini(rdev);
+                       return r;
+               }
+               r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+                               &rdev->wb.gpu_addr);
+               if (r) {
+                       radeon_bo_unreserve(rdev->wb.wb_obj);
+                       dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+                       radeon_wb_fini(rdev);
+                       return r;
+               }
+               r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
                radeon_bo_unreserve(rdev->wb.wb_obj);
-               dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
-               radeon_wb_fini(rdev);
-               return r;
-       }
-       r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
-       radeon_bo_unreserve(rdev->wb.wb_obj);
-       if (r) {
-               dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
-               radeon_wb_fini(rdev);
-               return r;
+               if (r) {
+                       dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+                       radeon_wb_fini(rdev);
+                       return r;
+               }
        }
 
        /* clear wb memory */
index 5b937df..ddb8f8e 100644 (file)
@@ -63,7 +63,9 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
 {
        struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
        if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
-               *drv->cpu_addr = cpu_to_le32(seq);
+               if (drv->cpu_addr) {
+                       *drv->cpu_addr = cpu_to_le32(seq);
+               }
        } else {
                WREG32(drv->scratch_reg, seq);
        }
@@ -84,7 +86,11 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
        u32 seq = 0;
 
        if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
-               seq = le32_to_cpu(*drv->cpu_addr);
+               if (drv->cpu_addr) {
+                       seq = le32_to_cpu(*drv->cpu_addr);
+               } else {
+                       seq = lower_32_bits(atomic64_read(&drv->last_seq));
+               }
        } else {
                seq = RREG32(drv->scratch_reg);
        }
index 906e5c0..9f55ade 100644 (file)
@@ -159,7 +159,17 @@ int radeon_uvd_suspend(struct radeon_device *rdev)
        if (!r) {
                radeon_bo_kunmap(rdev->uvd.vcpu_bo);
                radeon_bo_unpin(rdev->uvd.vcpu_bo);
+               rdev->uvd.cpu_addr = NULL;
+               if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
+                       radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
+               }
                radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+
+               if (rdev->uvd.cpu_addr) {
+                       radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+               } else {
+                       rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
+               }
        }
        return r;
 }
@@ -178,6 +188,10 @@ int radeon_uvd_resume(struct radeon_device *rdev)
                return r;
        }
 
+       /* Have been pin in cpu unmap unpin */
+       radeon_bo_kunmap(rdev->uvd.vcpu_bo);
+       radeon_bo_unpin(rdev->uvd.vcpu_bo);
+
        r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
                          &rdev->uvd.gpu_addr);
        if (r) {