return (const char *)fence->ring->name;
}
-static bool amdgpu_test_signaled_any(struct fence **fences, uint32_t count)
-{
- int idx;
- struct fence *fence;
-
- for (idx = 0; idx < count; ++idx) {
- fence = fences[idx];
- if (fence) {
- if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return true;
- }
- }
- return false;
-}
-
-struct amdgpu_wait_cb {
- struct fence_cb base;
- struct task_struct *task;
-};
-
-static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
-{
- struct amdgpu_wait_cb *wait =
- container_of(cb, struct amdgpu_wait_cb, base);
- wake_up_process(wait->task);
-}
-
-/**
- * Wait the fence array with timeout
- *
- * @array: the fence array with amdgpu fence pointer
- * @count: the number of the fence array
- * @intr: when sleep, set the current task interruptable or not
- * @t: timeout to wait
- *
- * It will return when any fence is signaled or timeout.
- */
-signed long amdgpu_fence_wait_any(struct fence **array, uint32_t count,
- bool intr, signed long t)
-{
- struct amdgpu_wait_cb *cb;
- struct fence *fence;
- unsigned idx;
-
- BUG_ON(!array);
-
- cb = kcalloc(count, sizeof(struct amdgpu_wait_cb), GFP_KERNEL);
- if (cb == NULL) {
- t = -ENOMEM;
- goto err_free_cb;
- }
-
- for (idx = 0; idx < count; ++idx) {
- fence = array[idx];
- if (fence) {
- cb[idx].task = current;
- if (fence_add_callback(fence,
- &cb[idx].base, amdgpu_fence_wait_cb)) {
- /* The fence is already signaled */
- goto fence_rm_cb;
- }
- }
- }
-
- while (t > 0) {
- if (intr)
- set_current_state(TASK_INTERRUPTIBLE);
- else
- set_current_state(TASK_UNINTERRUPTIBLE);
-
- /*
- * amdgpu_test_signaled_any must be called after
- * set_current_state to prevent a race with wake_up_process
- */
- if (amdgpu_test_signaled_any(array, count))
- break;
-
- t = schedule_timeout(t);
-
- if (t > 0 && intr && signal_pending(current))
- t = -ERESTARTSYS;
- }
-
- __set_current_state(TASK_RUNNING);
-
-fence_rm_cb:
- for (idx = 0; idx < count; ++idx) {
- fence = array[idx];
- if (fence && cb[idx].base.func)
- fence_remove_callback(fence, &cb[idx].base);
- }
-
-err_free_cb:
- kfree(cb);
-
- return t;
-}
-
const struct fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
{
struct fence *fences[AMDGPU_MAX_RINGS];
unsigned tries[AMDGPU_MAX_RINGS];
+ unsigned count;
int i, r;
signed long t;
/* see if we can skip over some allocations */
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
- spin_unlock(&sa_manager->wq.lock);
- t = amdgpu_fence_wait_any(fences, AMDGPU_MAX_RINGS,
- false, MAX_SCHEDULE_TIMEOUT);
- r = (t > 0) ? 0 : t;
- spin_lock(&sa_manager->wq.lock);
- /* if we have nothing to wait for block */
- if (r == -ENOENT) {
+ for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
+ if (fences[i])
+ fences[count++] = fences[i];
+
+ if (count) {
+ spin_unlock(&sa_manager->wq.lock);
+ t = fence_wait_any_timeout(fences, count, false,
+ MAX_SCHEDULE_TIMEOUT);
+ r = (t > 0) ? 0 : t;
+ spin_lock(&sa_manager->wq.lock);
+ } else {
+ /* if we have nothing to wait for block */
r = wait_event_interruptible_locked(
sa_manager->wq,
amdgpu_sa_event(sa_manager, size, align)