1 // SPDX-License-Identifier: GPL-2.0-only
3 * Fence mechanism for dma-buf and to allow for asynchronous dma access
5 * Copyright (C) 2012 Canonical Ltd
6 * Copyright (C) 2012 Texas Instruments
9 * Rob Clark <robdclark@gmail.com>
10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/atomic.h>
16 #include <linux/dma-fence.h>
17 #include <linux/sched/signal.h>
18 #include <linux/seq_file.h>
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/dma_fence.h>
23 EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
24 EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
25 EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
27 static DEFINE_SPINLOCK(dma_fence_stub_lock);
28 static struct dma_fence dma_fence_stub;
31 * fence context counter: each execution context should have its own
32 * fence context, this allows checking if fences belong to the same
33 * context or not. One device can have multiple separate contexts,
34 * and they're used if some engine can run independently of another.
36 static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
39 * DOC: DMA fences overview
41 * DMA fences, represented by &struct dma_fence, are the kernel internal
42 * synchronization primitive for DMA operations like GPU rendering, video
43 * encoding/decoding, or displaying buffers on a screen.
45 * A fence is initialized using dma_fence_init() and completed using
46 * dma_fence_signal(). Fences are associated with a context, allocated through
47 * dma_fence_context_alloc(), and all fences on the same context are
50 * Since the purposes of fences is to facilitate cross-device and
51 * cross-application synchronization, there's multiple ways to use one:
53 * - Individual fences can be exposed as a &sync_file, accessed as a file
54 * descriptor from userspace, created by calling sync_file_create(). This is
55 * called explicit fencing, since userspace passes around explicit
56 * synchronization points.
58 * - Some subsystems also have their own explicit fencing primitives, like
59 * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
60 * fence to be updated.
62 * - Then there's also implicit fencing, where the synchronization points are
63 * implicitly passed around as part of shared &dma_buf instances. Such
64 * implicit fences are stored in &struct dma_resv through the
65 * &dma_buf.resv pointer.
69 * DOC: fence cross-driver contract
71 * Since &dma_fence provide a cross driver contract, all drivers must follow the
74 * * Fences must complete in a reasonable time. Fences which represent kernels
75 * and shaders submitted by userspace, which could run forever, must be backed
76 * up by timeout and gpu hang recovery code. Minimally that code must prevent
77 * further command submission and force complete all in-flight fences, e.g.
78 * when the driver or hardware do not support gpu reset, or if the gpu reset
79 * failed for some reason. Ideally the driver supports gpu recovery which only
80 * affects the offending userspace context, and no other userspace
83 * * Drivers may have different ideas of what completion within a reasonable
84 * time means. Some hang recovery code uses a fixed timeout, others a mix
85 * between observing forward progress and increasingly strict timeouts.
86 * Drivers should not try to second guess timeout handling of fences from
89 * * To ensure there's no deadlocks of dma_fence_wait() against other locks
90 * drivers should annotate all code required to reach dma_fence_signal(),
91 * which completes the fences, with dma_fence_begin_signalling() and
92 * dma_fence_end_signalling().
94 * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock().
95 * This means any code required for fence completion cannot acquire a
96 * &dma_resv lock. Note that this also pulls in the entire established
97 * locking hierarchy around dma_resv_lock() and dma_resv_unlock().
99 * * Drivers are allowed to call dma_fence_wait() from their &shrinker
100 * callbacks. This means any code required for fence completion cannot
101 * allocate memory with GFP_KERNEL.
103 * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
104 * respectively &mmu_interval_notifier callbacks. This means any code required
105 * for fence completeion cannot allocate memory with GFP_NOFS or GFP_NOIO.
106 * Only GFP_ATOMIC is permissible, which might fail.
108 * Note that only GPU drivers have a reasonable excuse for both requiring
109 * &mmu_interval_notifier and &shrinker callbacks at the same time as having to
110 * track asynchronous compute work using &dma_fence. No driver outside of
111 * drivers/gpu should ever call dma_fence_wait() in such contexts.
114 static const char *dma_fence_stub_get_name(struct dma_fence *fence)
119 static const struct dma_fence_ops dma_fence_stub_ops = {
120 .get_driver_name = dma_fence_stub_get_name,
121 .get_timeline_name = dma_fence_stub_get_name,
125 * dma_fence_get_stub - return a signaled fence
127 * Return a stub fence which is already signaled. The fence's
128 * timestamp corresponds to the first time after boot this
129 * function is called.
131 struct dma_fence *dma_fence_get_stub(void)
133 spin_lock(&dma_fence_stub_lock);
134 if (!dma_fence_stub.ops) {
135 dma_fence_init(&dma_fence_stub,
137 &dma_fence_stub_lock,
139 dma_fence_signal_locked(&dma_fence_stub);
141 spin_unlock(&dma_fence_stub_lock);
143 return dma_fence_get(&dma_fence_stub);
145 EXPORT_SYMBOL(dma_fence_get_stub);
148 * dma_fence_allocate_private_stub - return a private, signaled fence
150 * Return a newly allocated and signaled stub fence.
152 struct dma_fence *dma_fence_allocate_private_stub(void)
154 struct dma_fence *fence;
156 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
158 return ERR_PTR(-ENOMEM);
160 dma_fence_init(fence,
162 &dma_fence_stub_lock,
164 dma_fence_signal(fence);
168 EXPORT_SYMBOL(dma_fence_allocate_private_stub);
171 * dma_fence_context_alloc - allocate an array of fence contexts
172 * @num: amount of contexts to allocate
174 * This function will return the first index of the number of fence contexts
175 * allocated. The fence context is used for setting &dma_fence.context to a
176 * unique number by passing the context to dma_fence_init().
178 u64 dma_fence_context_alloc(unsigned num)
181 return atomic64_fetch_add(num, &dma_fence_context_counter);
183 EXPORT_SYMBOL(dma_fence_context_alloc);
186 * DOC: fence signalling annotation
188 * Proving correctness of all the kernel code around &dma_fence through code
189 * review and testing is tricky for a few reasons:
191 * * It is a cross-driver contract, and therefore all drivers must follow the
192 * same rules for lock nesting order, calling contexts for various functions
193 * and anything else significant for in-kernel interfaces. But it is also
194 * impossible to test all drivers in a single machine, hence brute-force N vs.
195 * N testing of all combinations is impossible. Even just limiting to the
196 * possible combinations is infeasible.
198 * * There is an enormous amount of driver code involved. For render drivers
199 * there's the tail of command submission, after fences are published,
200 * scheduler code, interrupt and workers to process job completion,
201 * and timeout, gpu reset and gpu hang recovery code. Plus for integration
202 * with core mm with have &mmu_notifier, respectively &mmu_interval_notifier,
203 * and &shrinker. For modesetting drivers there's the commit tail functions
204 * between when fences for an atomic modeset are published, and when the
205 * corresponding vblank completes, including any interrupt processing and
206 * related workers. Auditing all that code, across all drivers, is not
209 * * Due to how many other subsystems are involved and the locking hierarchies
210 * this pulls in there is extremely thin wiggle-room for driver-specific
211 * differences. &dma_fence interacts with almost all of the core memory
212 * handling through page fault handlers via &dma_resv, dma_resv_lock() and
213 * dma_resv_unlock(). On the other side it also interacts through all
214 * allocation sites through &mmu_notifier and &shrinker.
216 * Furthermore lockdep does not handle cross-release dependencies, which means
217 * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught
218 * at runtime with some quick testing. The simplest example is one thread
219 * waiting on a &dma_fence while holding a lock::
225 * while the other thread is stuck trying to acquire the same lock, which
226 * prevents it from signalling the fence the previous thread is stuck waiting
231 * dma_fence_signal(B);
233 * By manually annotating all code relevant to signalling a &dma_fence we can
234 * teach lockdep about these dependencies, which also helps with the validation
235 * headache since now lockdep can check all the rules for us::
237 * cookie = dma_fence_begin_signalling();
240 * dma_fence_signal(B);
241 * dma_fence_end_signalling(cookie);
243 * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to
244 * annotate critical sections the following rules need to be observed:
246 * * All code necessary to complete a &dma_fence must be annotated, from the
247 * point where a fence is accessible to other threads, to the point where
248 * dma_fence_signal() is called. Un-annotated code can contain deadlock issues,
249 * and due to the very strict rules and many corner cases it is infeasible to
250 * catch these just with review or normal stress testing.
252 * * &struct dma_resv deserves a special note, since the readers are only
253 * protected by rcu. This means the signalling critical section starts as soon
254 * as the new fences are installed, even before dma_resv_unlock() is called.
256 * * The only exception are fast paths and opportunistic signalling code, which
257 * calls dma_fence_signal() purely as an optimization, but is not required to
258 * guarantee completion of a &dma_fence. The usual example is a wait IOCTL
259 * which calls dma_fence_signal(), while the mandatory completion path goes
260 * through a hardware interrupt and possible job completion worker.
262 * * To aid composability of code, the annotations can be freely nested, as long
263 * as the overall locking hierarchy is consistent. The annotations also work
264 * both in interrupt and process context. Due to implementation details this
265 * requires that callers pass an opaque cookie from
266 * dma_fence_begin_signalling() to dma_fence_end_signalling().
268 * * Validation against the cross driver contract is implemented by priming
269 * lockdep with the relevant hierarchy at boot-up. This means even just
270 * testing with a single device is enough to validate a driver, at least as
271 * far as deadlocks with dma_fence_wait() against dma_fence_signal() are
274 #ifdef CONFIG_LOCKDEP
275 static struct lockdep_map dma_fence_lockdep_map = {
276 .name = "dma_fence_map"
280 * dma_fence_begin_signalling - begin a critical DMA fence signalling section
282 * Drivers should use this to annotate the beginning of any code section
283 * required to eventually complete &dma_fence by calling dma_fence_signal().
285 * The end of these critical sections are annotated with
286 * dma_fence_end_signalling().
290 * Opaque cookie needed by the implementation, which needs to be passed to
291 * dma_fence_end_signalling().
293 bool dma_fence_begin_signalling(void)
295 /* explicitly nesting ... */
296 if (lock_is_held_type(&dma_fence_lockdep_map, 1))
299 /* rely on might_sleep check for soft/hardirq locks */
303 /* ... and non-recursive readlock */
304 lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_);
308 EXPORT_SYMBOL(dma_fence_begin_signalling);
311 * dma_fence_end_signalling - end a critical DMA fence signalling section
312 * @cookie: opaque cookie from dma_fence_begin_signalling()
314 * Closes a critical section annotation opened by dma_fence_begin_signalling().
316 void dma_fence_end_signalling(bool cookie)
321 lock_release(&dma_fence_lockdep_map, _RET_IP_);
323 EXPORT_SYMBOL(dma_fence_end_signalling);
325 void __dma_fence_might_wait(void)
329 tmp = lock_is_held_type(&dma_fence_lockdep_map, 1);
331 lock_release(&dma_fence_lockdep_map, _THIS_IP_);
332 lock_map_acquire(&dma_fence_lockdep_map);
333 lock_map_release(&dma_fence_lockdep_map);
335 lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_);
341 * dma_fence_signal_timestamp_locked - signal completion of a fence
342 * @fence: the fence to signal
343 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
345 * Signal completion for software callbacks on a fence, this will unblock
346 * dma_fence_wait() calls and run all the callbacks added with
347 * dma_fence_add_callback(). Can be called multiple times, but since a fence
348 * can only go from the unsignaled to the signaled state and not back, it will
349 * only be effective the first time. Set the timestamp provided as the fence
352 * Unlike dma_fence_signal_timestamp(), this function must be called with
353 * &dma_fence.lock held.
355 * Returns 0 on success and a negative error value when @fence has been
358 int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
361 struct dma_fence_cb *cur, *tmp;
362 struct list_head cb_list;
364 lockdep_assert_held(fence->lock);
366 if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
370 /* Stash the cb_list before replacing it with the timestamp */
371 list_replace(&fence->cb_list, &cb_list);
373 fence->timestamp = timestamp;
374 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
375 trace_dma_fence_signaled(fence);
377 list_for_each_entry_safe(cur, tmp, &cb_list, node) {
378 INIT_LIST_HEAD(&cur->node);
379 cur->func(fence, cur);
384 EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
387 * dma_fence_signal_timestamp - signal completion of a fence
388 * @fence: the fence to signal
389 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
391 * Signal completion for software callbacks on a fence, this will unblock
392 * dma_fence_wait() calls and run all the callbacks added with
393 * dma_fence_add_callback(). Can be called multiple times, but since a fence
394 * can only go from the unsignaled to the signaled state and not back, it will
395 * only be effective the first time. Set the timestamp provided as the fence
398 * Returns 0 on success and a negative error value when @fence has been
401 int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
409 spin_lock_irqsave(fence->lock, flags);
410 ret = dma_fence_signal_timestamp_locked(fence, timestamp);
411 spin_unlock_irqrestore(fence->lock, flags);
415 EXPORT_SYMBOL(dma_fence_signal_timestamp);
418 * dma_fence_signal_locked - signal completion of a fence
419 * @fence: the fence to signal
421 * Signal completion for software callbacks on a fence, this will unblock
422 * dma_fence_wait() calls and run all the callbacks added with
423 * dma_fence_add_callback(). Can be called multiple times, but since a fence
424 * can only go from the unsignaled to the signaled state and not back, it will
425 * only be effective the first time.
427 * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
430 * Returns 0 on success and a negative error value when @fence has been
433 int dma_fence_signal_locked(struct dma_fence *fence)
435 return dma_fence_signal_timestamp_locked(fence, ktime_get());
437 EXPORT_SYMBOL(dma_fence_signal_locked);
440 * dma_fence_signal - signal completion of a fence
441 * @fence: the fence to signal
443 * Signal completion for software callbacks on a fence, this will unblock
444 * dma_fence_wait() calls and run all the callbacks added with
445 * dma_fence_add_callback(). Can be called multiple times, but since a fence
446 * can only go from the unsignaled to the signaled state and not back, it will
447 * only be effective the first time.
449 * Returns 0 on success and a negative error value when @fence has been
452 int dma_fence_signal(struct dma_fence *fence)
461 tmp = dma_fence_begin_signalling();
463 spin_lock_irqsave(fence->lock, flags);
464 ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
465 spin_unlock_irqrestore(fence->lock, flags);
467 dma_fence_end_signalling(tmp);
471 EXPORT_SYMBOL(dma_fence_signal);
474 * dma_fence_wait_timeout - sleep until the fence gets signaled
475 * or until timeout elapses
476 * @fence: the fence to wait on
477 * @intr: if true, do an interruptible wait
478 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
480 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
481 * remaining timeout in jiffies on success. Other error values may be
482 * returned on custom implementations.
484 * Performs a synchronous wait on this fence. It is assumed the caller
485 * directly or indirectly (buf-mgr between reservation and committing)
486 * holds a reference to the fence, otherwise the fence might be
487 * freed before return, resulting in undefined behavior.
489 * See also dma_fence_wait() and dma_fence_wait_any_timeout().
492 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
496 if (WARN_ON(timeout < 0))
501 __dma_fence_might_wait();
503 trace_dma_fence_wait_start(fence);
504 if (fence->ops->wait)
505 ret = fence->ops->wait(fence, intr, timeout);
507 ret = dma_fence_default_wait(fence, intr, timeout);
508 trace_dma_fence_wait_end(fence);
511 EXPORT_SYMBOL(dma_fence_wait_timeout);
514 * dma_fence_release - default relese function for fences
515 * @kref: &dma_fence.recfount
517 * This is the default release functions for &dma_fence. Drivers shouldn't call
518 * this directly, but instead call dma_fence_put().
520 void dma_fence_release(struct kref *kref)
522 struct dma_fence *fence =
523 container_of(kref, struct dma_fence, refcount);
525 trace_dma_fence_destroy(fence);
527 if (WARN(!list_empty(&fence->cb_list) &&
528 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
529 "Fence %s:%s:%llx:%llx released with pending signals!\n",
530 fence->ops->get_driver_name(fence),
531 fence->ops->get_timeline_name(fence),
532 fence->context, fence->seqno)) {
536 * Failed to signal before release, likely a refcounting issue.
538 * This should never happen, but if it does make sure that we
539 * don't leave chains dangling. We set the error flag first
540 * so that the callbacks know this signal is due to an error.
542 spin_lock_irqsave(fence->lock, flags);
543 fence->error = -EDEADLK;
544 dma_fence_signal_locked(fence);
545 spin_unlock_irqrestore(fence->lock, flags);
548 if (fence->ops->release)
549 fence->ops->release(fence);
551 dma_fence_free(fence);
553 EXPORT_SYMBOL(dma_fence_release);
556 * dma_fence_free - default release function for &dma_fence.
557 * @fence: fence to release
559 * This is the default implementation for &dma_fence_ops.release. It calls
560 * kfree_rcu() on @fence.
562 void dma_fence_free(struct dma_fence *fence)
564 kfree_rcu(fence, rcu);
566 EXPORT_SYMBOL(dma_fence_free);
568 static bool __dma_fence_enable_signaling(struct dma_fence *fence)
572 lockdep_assert_held(fence->lock);
574 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
577 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
580 if (!was_set && fence->ops->enable_signaling) {
581 trace_dma_fence_enable_signal(fence);
583 if (!fence->ops->enable_signaling(fence)) {
584 dma_fence_signal_locked(fence);
593 * dma_fence_enable_sw_signaling - enable signaling on fence
594 * @fence: the fence to enable
596 * This will request for sw signaling to be enabled, to make the fence
597 * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
600 void dma_fence_enable_sw_signaling(struct dma_fence *fence)
604 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
607 spin_lock_irqsave(fence->lock, flags);
608 __dma_fence_enable_signaling(fence);
609 spin_unlock_irqrestore(fence->lock, flags);
611 EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
614 * dma_fence_add_callback - add a callback to be called when the fence
616 * @fence: the fence to wait on
617 * @cb: the callback to register
618 * @func: the function to call
620 * Add a software callback to the fence. The caller should keep a reference to
623 * @cb will be initialized by dma_fence_add_callback(), no initialization
624 * by the caller is required. Any number of callbacks can be registered
625 * to a fence, but a callback can only be registered to one fence at a time.
627 * If fence is already signaled, this function will return -ENOENT (and
628 * *not* call the callback).
630 * Note that the callback can be called from an atomic context or irq context.
632 * Returns 0 in case of success, -ENOENT if the fence is already signaled
633 * and -EINVAL in case of error.
635 int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
636 dma_fence_func_t func)
641 if (WARN_ON(!fence || !func))
644 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
645 INIT_LIST_HEAD(&cb->node);
649 spin_lock_irqsave(fence->lock, flags);
651 if (__dma_fence_enable_signaling(fence)) {
653 list_add_tail(&cb->node, &fence->cb_list);
655 INIT_LIST_HEAD(&cb->node);
659 spin_unlock_irqrestore(fence->lock, flags);
663 EXPORT_SYMBOL(dma_fence_add_callback);
666 * dma_fence_get_status - returns the status upon completion
667 * @fence: the dma_fence to query
669 * This wraps dma_fence_get_status_locked() to return the error status
670 * condition on a signaled fence. See dma_fence_get_status_locked() for more
673 * Returns 0 if the fence has not yet been signaled, 1 if the fence has
674 * been signaled without an error condition, or a negative error code
675 * if the fence has been completed in err.
677 int dma_fence_get_status(struct dma_fence *fence)
682 spin_lock_irqsave(fence->lock, flags);
683 status = dma_fence_get_status_locked(fence);
684 spin_unlock_irqrestore(fence->lock, flags);
688 EXPORT_SYMBOL(dma_fence_get_status);
691 * dma_fence_remove_callback - remove a callback from the signaling list
692 * @fence: the fence to wait on
693 * @cb: the callback to remove
695 * Remove a previously queued callback from the fence. This function returns
696 * true if the callback is successfully removed, or false if the fence has
697 * already been signaled.
700 * Cancelling a callback should only be done if you really know what you're
701 * doing, since deadlocks and race conditions could occur all too easily. For
702 * this reason, it should only ever be done on hardware lockup recovery,
703 * with a reference held to the fence.
705 * Behaviour is undefined if @cb has not been added to @fence using
706 * dma_fence_add_callback() beforehand.
709 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
714 spin_lock_irqsave(fence->lock, flags);
716 ret = !list_empty(&cb->node);
718 list_del_init(&cb->node);
720 spin_unlock_irqrestore(fence->lock, flags);
724 EXPORT_SYMBOL(dma_fence_remove_callback);
726 struct default_wait_cb {
727 struct dma_fence_cb base;
728 struct task_struct *task;
732 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
734 struct default_wait_cb *wait =
735 container_of(cb, struct default_wait_cb, base);
737 wake_up_state(wait->task, TASK_NORMAL);
741 * dma_fence_default_wait - default sleep until the fence gets signaled
742 * or until timeout elapses
743 * @fence: the fence to wait on
744 * @intr: if true, do an interruptible wait
745 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
747 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
748 * remaining timeout in jiffies on success. If timeout is zero the value one is
749 * returned if the fence is already signaled for consistency with other
750 * functions taking a jiffies timeout.
753 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
755 struct default_wait_cb cb;
757 signed long ret = timeout ? timeout : 1;
759 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
762 spin_lock_irqsave(fence->lock, flags);
764 if (intr && signal_pending(current)) {
769 if (!__dma_fence_enable_signaling(fence))
777 cb.base.func = dma_fence_default_wait_cb;
779 list_add(&cb.base.node, &fence->cb_list);
781 while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
783 __set_current_state(TASK_INTERRUPTIBLE);
785 __set_current_state(TASK_UNINTERRUPTIBLE);
786 spin_unlock_irqrestore(fence->lock, flags);
788 ret = schedule_timeout(ret);
790 spin_lock_irqsave(fence->lock, flags);
791 if (ret > 0 && intr && signal_pending(current))
795 if (!list_empty(&cb.base.node))
796 list_del(&cb.base.node);
797 __set_current_state(TASK_RUNNING);
800 spin_unlock_irqrestore(fence->lock, flags);
803 EXPORT_SYMBOL(dma_fence_default_wait);
806 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
811 for (i = 0; i < count; ++i) {
812 struct dma_fence *fence = fences[i];
813 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
823 * dma_fence_wait_any_timeout - sleep until any fence gets signaled
824 * or until timeout elapses
825 * @fences: array of fences to wait on
826 * @count: number of fences to wait on
827 * @intr: if true, do an interruptible wait
828 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
829 * @idx: used to store the first signaled fence index, meaningful only on
832 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
833 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
836 * Synchronous waits for the first fence in the array to be signaled. The
837 * caller needs to hold a reference to all fences in the array, otherwise a
838 * fence might be freed before return, resulting in undefined behavior.
840 * See also dma_fence_wait() and dma_fence_wait_timeout().
843 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
844 bool intr, signed long timeout, uint32_t *idx)
846 struct default_wait_cb *cb;
847 signed long ret = timeout;
850 if (WARN_ON(!fences || !count || timeout < 0))
854 for (i = 0; i < count; ++i)
855 if (dma_fence_is_signaled(fences[i])) {
864 cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
870 for (i = 0; i < count; ++i) {
871 struct dma_fence *fence = fences[i];
873 cb[i].task = current;
874 if (dma_fence_add_callback(fence, &cb[i].base,
875 dma_fence_default_wait_cb)) {
876 /* This fence is already signaled */
885 set_current_state(TASK_INTERRUPTIBLE);
887 set_current_state(TASK_UNINTERRUPTIBLE);
889 if (dma_fence_test_signaled_any(fences, count, idx))
892 ret = schedule_timeout(ret);
894 if (ret > 0 && intr && signal_pending(current))
898 __set_current_state(TASK_RUNNING);
902 dma_fence_remove_callback(fences[i], &cb[i].base);
909 EXPORT_SYMBOL(dma_fence_wait_any_timeout);
912 * dma_fence_describe - Dump fence describtion into seq_file
913 * @fence: the 6fence to describe
914 * @seq: the seq_file to put the textual description into
916 * Dump a textual description of the fence and it's state into the seq_file.
918 void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
920 seq_printf(seq, "%s %s seq %llu %ssignalled\n",
921 fence->ops->get_driver_name(fence),
922 fence->ops->get_timeline_name(fence), fence->seqno,
923 dma_fence_is_signaled(fence) ? "" : "un");
925 EXPORT_SYMBOL(dma_fence_describe);
928 * dma_fence_init - Initialize a custom fence.
929 * @fence: the fence to initialize
930 * @ops: the dma_fence_ops for operations on this fence
931 * @lock: the irqsafe spinlock to use for locking this fence
932 * @context: the execution context this fence is run on
933 * @seqno: a linear increasing sequence number for this context
935 * Initializes an allocated fence, the caller doesn't have to keep its
936 * refcount after committing with this fence, but it will need to hold a
937 * refcount again if &dma_fence_ops.enable_signaling gets called.
939 * context and seqno are used for easy comparison between fences, allowing
940 * to check which fence is later by simply using dma_fence_later().
943 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
944 spinlock_t *lock, u64 context, u64 seqno)
947 BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
949 kref_init(&fence->refcount);
951 INIT_LIST_HEAD(&fence->cb_list);
953 fence->context = context;
954 fence->seqno = seqno;
958 trace_dma_fence_init(fence);
960 EXPORT_SYMBOL(dma_fence_init);