bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
+void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr);
return 0;
}
+/* caller must hold ring lock */
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{
uint64_t seq;
- /* We are not protected by ring lock when reading current seq but
- * it's ok as worst case is we return to early while we could have
- * wait.
- */
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
/* nothing to wait for, last_seq is
return radeon_fence_wait_seq(rdev, seq, ring, false, false);
}
-int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
+/* caller must hold ring lock */
+void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
- /* We are not protected by ring lock when reading current seq
- * but it's ok as wait empty is call from place where no more
- * activity can be scheduled so there won't be concurrent access
- * to seq value.
- */
- return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].sync_seq[ring],
- ring, false, false);
+ uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
+
+ while(1) {
+ int r;
+ r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
+ if (r == -EDEADLK) {
+ mutex_unlock(&rdev->ring_lock);
+ r = radeon_gpu_reset(rdev);
+ mutex_lock(&rdev->ring_lock);
+ if (!r)
+ continue;
+ }
+ if (r) {
+ dev_err(rdev->dev, "error waiting for ring to become"
+ " idle (%d)\n", r);
+ }
+ return;
+ }
}
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)