int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
-signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
- struct fence **array,
- uint32_t count,
- bool wait_all,
- bool intr,
- signed long t);
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
+ struct fence **array,
+ uint32_t count,
+ bool intr,
+ signed long t);
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
void amdgpu_fence_unref(struct amdgpu_fence **fence);
return false;
}
-static bool amdgpu_test_signaled_all(struct fence **fences, uint32_t count)
-{
- int idx;
- struct fence *fence;
-
- for (idx = 0; idx < count; ++idx) {
- fence = fences[idx];
- if (fence) {
- if (!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return false;
- }
- }
-
- return true;
-}
-
struct amdgpu_wait_cb {
struct fence_cb base;
struct task_struct *task;
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_device *adev = fence->ring->adev;
- return amdgpu_fence_wait_multiple(adev, &f, 1, false, intr, t);
+ return amdgpu_fence_wait_any(adev, &f, 1, intr, t);
}
/**
* @adev: amdgpu device
* @array: the fence array with amdgpu fence pointer
* @count: the number of the fence array
- * @wait_all: the flag of wait all(true) or wait any(false)
* @intr: when sleep, set the current task interruptable or not
* @t: timeout to wait
*
- * If wait_all is true, it will return when all fences are signaled or timeout.
- * If wait_all is false, it will return when any fence is signaled or timeout.
+ * It will return when any fence is signaled or timeout.
*/
-signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
- struct fence **array,
- uint32_t count,
- bool wait_all,
- bool intr,
- signed long t)
-{
- long idx = 0;
+signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
+ struct fence **array, uint32_t count,
+ bool intr, signed long t)
+{
struct amdgpu_wait_cb *cb;
struct fence *fence;
+ unsigned idx;
BUG_ON(!array);
if (fence_add_callback(fence,
&cb[idx].base, amdgpu_fence_wait_cb)) {
/* The fence is already signaled */
- if (wait_all)
- continue;
- else
- goto fence_rm_cb;
+ goto fence_rm_cb;
}
}
}
* amdgpu_test_signaled_any must be called after
* set_current_state to prevent a race with wake_up_process
*/
- if (!wait_all && amdgpu_test_signaled_any(array, count))
- break;
- if (wait_all && amdgpu_test_signaled_all(array, count))
+ if (amdgpu_test_signaled_any(array, count))
break;
if (adev->needs_reset) {
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
spin_unlock(&sa_manager->wq.lock);
- t = amdgpu_fence_wait_multiple(adev, fences, AMDGPU_MAX_RINGS, false, false,
- MAX_SCHEDULE_TIMEOUT);
+ t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS,
+ false, MAX_SCHEDULE_TIMEOUT);
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */