1 // SPDX-License-Identifier: GPL-2.0-only
3 * Fence mechanism for dma-buf and to allow for asynchronous dma access
5 * Copyright (C) 2012 Canonical Ltd
6 * Copyright (C) 2012 Texas Instruments
9 * Rob Clark <robdclark@gmail.com>
10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/atomic.h>
16 #include <linux/dma-fence.h>
17 #include <linux/sched/signal.h>
18 #include <linux/seq_file.h>
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/dma_fence.h>
23 EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
24 EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
25 EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
27 static DEFINE_SPINLOCK(dma_fence_stub_lock);
28 static struct dma_fence dma_fence_stub;
31 * fence context counter: each execution context should have its own
32 * fence context, this allows checking if fences belong to the same
33 * context or not. One device can have multiple separate contexts,
34 * and they're used if some engine can run independently of another.
36 static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
39 * DOC: DMA fences overview
41 * DMA fences, represented by &struct dma_fence, are the kernel internal
42 * synchronization primitive for DMA operations like GPU rendering, video
43 * encoding/decoding, or displaying buffers on a screen.
45 * A fence is initialized using dma_fence_init() and completed using
46 * dma_fence_signal(). Fences are associated with a context, allocated through
47 * dma_fence_context_alloc(), and all fences on the same context are
50 * Since the purposes of fences is to facilitate cross-device and
51 * cross-application synchronization, there's multiple ways to use one:
53 * - Individual fences can be exposed as a &sync_file, accessed as a file
54 * descriptor from userspace, created by calling sync_file_create(). This is
55 * called explicit fencing, since userspace passes around explicit
56 * synchronization points.
58 * - Some subsystems also have their own explicit fencing primitives, like
59 * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
60 * fence to be updated.
62 * - Then there's also implicit fencing, where the synchronization points are
63 * implicitly passed around as part of shared &dma_buf instances. Such
64 * implicit fences are stored in &struct dma_resv through the
65 * &dma_buf.resv pointer.
69 * DOC: fence cross-driver contract
71 * Since &dma_fence provide a cross driver contract, all drivers must follow the
74 * * Fences must complete in a reasonable time. Fences which represent kernels
75 * and shaders submitted by userspace, which could run forever, must be backed
76 * up by timeout and gpu hang recovery code. Minimally that code must prevent
77 * further command submission and force complete all in-flight fences, e.g.
78 * when the driver or hardware do not support gpu reset, or if the gpu reset
79 * failed for some reason. Ideally the driver supports gpu recovery which only
80 * affects the offending userspace context, and no other userspace
83 * * Drivers may have different ideas of what completion within a reasonable
84 * time means. Some hang recovery code uses a fixed timeout, others a mix
85 * between observing forward progress and increasingly strict timeouts.
86 * Drivers should not try to second guess timeout handling of fences from
89 * * To ensure there's no deadlocks of dma_fence_wait() against other locks
90 * drivers should annotate all code required to reach dma_fence_signal(),
91 * which completes the fences, with dma_fence_begin_signalling() and
92 * dma_fence_end_signalling().
94 * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock().
95 * This means any code required for fence completion cannot acquire a
96 * &dma_resv lock. Note that this also pulls in the entire established
97 * locking hierarchy around dma_resv_lock() and dma_resv_unlock().
99 * * Drivers are allowed to call dma_fence_wait() from their &shrinker
100 * callbacks. This means any code required for fence completion cannot
101 * allocate memory with GFP_KERNEL.
103 * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
104 * respectively &mmu_interval_notifier callbacks. This means any code required
105 * for fence completeion cannot allocate memory with GFP_NOFS or GFP_NOIO.
106 * Only GFP_ATOMIC is permissible, which might fail.
108 * Note that only GPU drivers have a reasonable excuse for both requiring
109 * &mmu_interval_notifier and &shrinker callbacks at the same time as having to
110 * track asynchronous compute work using &dma_fence. No driver outside of
111 * drivers/gpu should ever call dma_fence_wait() in such contexts.
114 static const char *dma_fence_stub_get_name(struct dma_fence *fence)
119 static const struct dma_fence_ops dma_fence_stub_ops = {
120 .get_driver_name = dma_fence_stub_get_name,
121 .get_timeline_name = dma_fence_stub_get_name,
125 * dma_fence_get_stub - return a signaled fence
127 * Return a stub fence which is already signaled. The fence's
128 * timestamp corresponds to the first time after boot this
129 * function is called.
131 struct dma_fence *dma_fence_get_stub(void)
133 spin_lock(&dma_fence_stub_lock);
134 if (!dma_fence_stub.ops) {
135 dma_fence_init(&dma_fence_stub,
137 &dma_fence_stub_lock,
140 set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
141 &dma_fence_stub.flags);
143 dma_fence_signal_locked(&dma_fence_stub);
145 spin_unlock(&dma_fence_stub_lock);
147 return dma_fence_get(&dma_fence_stub);
149 EXPORT_SYMBOL(dma_fence_get_stub);
152 * dma_fence_allocate_private_stub - return a private, signaled fence
154 * Return a newly allocated and signaled stub fence.
156 struct dma_fence *dma_fence_allocate_private_stub(void)
158 struct dma_fence *fence;
160 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
162 return ERR_PTR(-ENOMEM);
164 dma_fence_init(fence,
166 &dma_fence_stub_lock,
169 set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
170 &dma_fence_stub.flags);
172 dma_fence_signal(fence);
176 EXPORT_SYMBOL(dma_fence_allocate_private_stub);
179 * dma_fence_context_alloc - allocate an array of fence contexts
180 * @num: amount of contexts to allocate
182 * This function will return the first index of the number of fence contexts
183 * allocated. The fence context is used for setting &dma_fence.context to a
184 * unique number by passing the context to dma_fence_init().
186 u64 dma_fence_context_alloc(unsigned num)
189 return atomic64_fetch_add(num, &dma_fence_context_counter);
191 EXPORT_SYMBOL(dma_fence_context_alloc);
194 * DOC: fence signalling annotation
196 * Proving correctness of all the kernel code around &dma_fence through code
197 * review and testing is tricky for a few reasons:
199 * * It is a cross-driver contract, and therefore all drivers must follow the
200 * same rules for lock nesting order, calling contexts for various functions
201 * and anything else significant for in-kernel interfaces. But it is also
202 * impossible to test all drivers in a single machine, hence brute-force N vs.
203 * N testing of all combinations is impossible. Even just limiting to the
204 * possible combinations is infeasible.
206 * * There is an enormous amount of driver code involved. For render drivers
207 * there's the tail of command submission, after fences are published,
208 * scheduler code, interrupt and workers to process job completion,
209 * and timeout, gpu reset and gpu hang recovery code. Plus for integration
210 * with core mm with have &mmu_notifier, respectively &mmu_interval_notifier,
211 * and &shrinker. For modesetting drivers there's the commit tail functions
212 * between when fences for an atomic modeset are published, and when the
213 * corresponding vblank completes, including any interrupt processing and
214 * related workers. Auditing all that code, across all drivers, is not
217 * * Due to how many other subsystems are involved and the locking hierarchies
218 * this pulls in there is extremely thin wiggle-room for driver-specific
219 * differences. &dma_fence interacts with almost all of the core memory
220 * handling through page fault handlers via &dma_resv, dma_resv_lock() and
221 * dma_resv_unlock(). On the other side it also interacts through all
222 * allocation sites through &mmu_notifier and &shrinker.
224 * Furthermore lockdep does not handle cross-release dependencies, which means
225 * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught
226 * at runtime with some quick testing. The simplest example is one thread
227 * waiting on a &dma_fence while holding a lock::
233 * while the other thread is stuck trying to acquire the same lock, which
234 * prevents it from signalling the fence the previous thread is stuck waiting
239 * dma_fence_signal(B);
241 * By manually annotating all code relevant to signalling a &dma_fence we can
242 * teach lockdep about these dependencies, which also helps with the validation
243 * headache since now lockdep can check all the rules for us::
245 * cookie = dma_fence_begin_signalling();
248 * dma_fence_signal(B);
249 * dma_fence_end_signalling(cookie);
251 * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to
252 * annotate critical sections the following rules need to be observed:
254 * * All code necessary to complete a &dma_fence must be annotated, from the
255 * point where a fence is accessible to other threads, to the point where
256 * dma_fence_signal() is called. Un-annotated code can contain deadlock issues,
257 * and due to the very strict rules and many corner cases it is infeasible to
258 * catch these just with review or normal stress testing.
260 * * &struct dma_resv deserves a special note, since the readers are only
261 * protected by rcu. This means the signalling critical section starts as soon
262 * as the new fences are installed, even before dma_resv_unlock() is called.
264 * * The only exception are fast paths and opportunistic signalling code, which
265 * calls dma_fence_signal() purely as an optimization, but is not required to
266 * guarantee completion of a &dma_fence. The usual example is a wait IOCTL
267 * which calls dma_fence_signal(), while the mandatory completion path goes
268 * through a hardware interrupt and possible job completion worker.
270 * * To aid composability of code, the annotations can be freely nested, as long
271 * as the overall locking hierarchy is consistent. The annotations also work
272 * both in interrupt and process context. Due to implementation details this
273 * requires that callers pass an opaque cookie from
274 * dma_fence_begin_signalling() to dma_fence_end_signalling().
276 * * Validation against the cross driver contract is implemented by priming
277 * lockdep with the relevant hierarchy at boot-up. This means even just
278 * testing with a single device is enough to validate a driver, at least as
279 * far as deadlocks with dma_fence_wait() against dma_fence_signal() are
282 #ifdef CONFIG_LOCKDEP
283 static struct lockdep_map dma_fence_lockdep_map = {
284 .name = "dma_fence_map"
288 * dma_fence_begin_signalling - begin a critical DMA fence signalling section
290 * Drivers should use this to annotate the beginning of any code section
291 * required to eventually complete &dma_fence by calling dma_fence_signal().
293 * The end of these critical sections are annotated with
294 * dma_fence_end_signalling().
298 * Opaque cookie needed by the implementation, which needs to be passed to
299 * dma_fence_end_signalling().
301 bool dma_fence_begin_signalling(void)
303 /* explicitly nesting ... */
304 if (lock_is_held_type(&dma_fence_lockdep_map, 1))
307 /* rely on might_sleep check for soft/hardirq locks */
311 /* ... and non-recursive readlock */
312 lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_);
316 EXPORT_SYMBOL(dma_fence_begin_signalling);
319 * dma_fence_end_signalling - end a critical DMA fence signalling section
320 * @cookie: opaque cookie from dma_fence_begin_signalling()
322 * Closes a critical section annotation opened by dma_fence_begin_signalling().
324 void dma_fence_end_signalling(bool cookie)
329 lock_release(&dma_fence_lockdep_map, _RET_IP_);
331 EXPORT_SYMBOL(dma_fence_end_signalling);
333 void __dma_fence_might_wait(void)
337 tmp = lock_is_held_type(&dma_fence_lockdep_map, 1);
339 lock_release(&dma_fence_lockdep_map, _THIS_IP_);
340 lock_map_acquire(&dma_fence_lockdep_map);
341 lock_map_release(&dma_fence_lockdep_map);
343 lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_);
349 * dma_fence_signal_timestamp_locked - signal completion of a fence
350 * @fence: the fence to signal
351 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
353 * Signal completion for software callbacks on a fence, this will unblock
354 * dma_fence_wait() calls and run all the callbacks added with
355 * dma_fence_add_callback(). Can be called multiple times, but since a fence
356 * can only go from the unsignaled to the signaled state and not back, it will
357 * only be effective the first time. Set the timestamp provided as the fence
360 * Unlike dma_fence_signal_timestamp(), this function must be called with
361 * &dma_fence.lock held.
363 * Returns 0 on success and a negative error value when @fence has been
366 int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
369 struct dma_fence_cb *cur, *tmp;
370 struct list_head cb_list;
372 lockdep_assert_held(fence->lock);
374 if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
378 /* Stash the cb_list before replacing it with the timestamp */
379 list_replace(&fence->cb_list, &cb_list);
381 fence->timestamp = timestamp;
382 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
383 trace_dma_fence_signaled(fence);
385 list_for_each_entry_safe(cur, tmp, &cb_list, node) {
386 INIT_LIST_HEAD(&cur->node);
387 cur->func(fence, cur);
392 EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
395 * dma_fence_signal_timestamp - signal completion of a fence
396 * @fence: the fence to signal
397 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
399 * Signal completion for software callbacks on a fence, this will unblock
400 * dma_fence_wait() calls and run all the callbacks added with
401 * dma_fence_add_callback(). Can be called multiple times, but since a fence
402 * can only go from the unsignaled to the signaled state and not back, it will
403 * only be effective the first time. Set the timestamp provided as the fence
406 * Returns 0 on success and a negative error value when @fence has been
409 int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
417 spin_lock_irqsave(fence->lock, flags);
418 ret = dma_fence_signal_timestamp_locked(fence, timestamp);
419 spin_unlock_irqrestore(fence->lock, flags);
423 EXPORT_SYMBOL(dma_fence_signal_timestamp);
426 * dma_fence_signal_locked - signal completion of a fence
427 * @fence: the fence to signal
429 * Signal completion for software callbacks on a fence, this will unblock
430 * dma_fence_wait() calls and run all the callbacks added with
431 * dma_fence_add_callback(). Can be called multiple times, but since a fence
432 * can only go from the unsignaled to the signaled state and not back, it will
433 * only be effective the first time.
435 * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
438 * Returns 0 on success and a negative error value when @fence has been
441 int dma_fence_signal_locked(struct dma_fence *fence)
443 return dma_fence_signal_timestamp_locked(fence, ktime_get());
445 EXPORT_SYMBOL(dma_fence_signal_locked);
448 * dma_fence_signal - signal completion of a fence
449 * @fence: the fence to signal
451 * Signal completion for software callbacks on a fence, this will unblock
452 * dma_fence_wait() calls and run all the callbacks added with
453 * dma_fence_add_callback(). Can be called multiple times, but since a fence
454 * can only go from the unsignaled to the signaled state and not back, it will
455 * only be effective the first time.
457 * Returns 0 on success and a negative error value when @fence has been
460 int dma_fence_signal(struct dma_fence *fence)
469 tmp = dma_fence_begin_signalling();
471 spin_lock_irqsave(fence->lock, flags);
472 ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
473 spin_unlock_irqrestore(fence->lock, flags);
475 dma_fence_end_signalling(tmp);
479 EXPORT_SYMBOL(dma_fence_signal);
482 * dma_fence_wait_timeout - sleep until the fence gets signaled
483 * or until timeout elapses
484 * @fence: the fence to wait on
485 * @intr: if true, do an interruptible wait
486 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
488 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
489 * remaining timeout in jiffies on success. Other error values may be
490 * returned on custom implementations.
492 * Performs a synchronous wait on this fence. It is assumed the caller
493 * directly or indirectly (buf-mgr between reservation and committing)
494 * holds a reference to the fence, otherwise the fence might be
495 * freed before return, resulting in undefined behavior.
497 * See also dma_fence_wait() and dma_fence_wait_any_timeout().
500 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
504 if (WARN_ON(timeout < 0))
509 __dma_fence_might_wait();
511 dma_fence_enable_sw_signaling(fence);
513 trace_dma_fence_wait_start(fence);
514 if (fence->ops->wait)
515 ret = fence->ops->wait(fence, intr, timeout);
517 ret = dma_fence_default_wait(fence, intr, timeout);
518 trace_dma_fence_wait_end(fence);
521 EXPORT_SYMBOL(dma_fence_wait_timeout);
524 * dma_fence_release - default relese function for fences
525 * @kref: &dma_fence.recfount
527 * This is the default release functions for &dma_fence. Drivers shouldn't call
528 * this directly, but instead call dma_fence_put().
530 void dma_fence_release(struct kref *kref)
532 struct dma_fence *fence =
533 container_of(kref, struct dma_fence, refcount);
535 trace_dma_fence_destroy(fence);
537 if (WARN(!list_empty(&fence->cb_list) &&
538 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
539 "Fence %s:%s:%llx:%llx released with pending signals!\n",
540 fence->ops->get_driver_name(fence),
541 fence->ops->get_timeline_name(fence),
542 fence->context, fence->seqno)) {
546 * Failed to signal before release, likely a refcounting issue.
548 * This should never happen, but if it does make sure that we
549 * don't leave chains dangling. We set the error flag first
550 * so that the callbacks know this signal is due to an error.
552 spin_lock_irqsave(fence->lock, flags);
553 fence->error = -EDEADLK;
554 dma_fence_signal_locked(fence);
555 spin_unlock_irqrestore(fence->lock, flags);
558 if (fence->ops->release)
559 fence->ops->release(fence);
561 dma_fence_free(fence);
563 EXPORT_SYMBOL(dma_fence_release);
566 * dma_fence_free - default release function for &dma_fence.
567 * @fence: fence to release
569 * This is the default implementation for &dma_fence_ops.release. It calls
570 * kfree_rcu() on @fence.
572 void dma_fence_free(struct dma_fence *fence)
574 kfree_rcu(fence, rcu);
576 EXPORT_SYMBOL(dma_fence_free);
578 static bool __dma_fence_enable_signaling(struct dma_fence *fence)
582 lockdep_assert_held(fence->lock);
584 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
587 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
590 if (!was_set && fence->ops->enable_signaling) {
591 trace_dma_fence_enable_signal(fence);
593 if (!fence->ops->enable_signaling(fence)) {
594 dma_fence_signal_locked(fence);
603 * dma_fence_enable_sw_signaling - enable signaling on fence
604 * @fence: the fence to enable
606 * This will request for sw signaling to be enabled, to make the fence
607 * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
610 void dma_fence_enable_sw_signaling(struct dma_fence *fence)
614 spin_lock_irqsave(fence->lock, flags);
615 __dma_fence_enable_signaling(fence);
616 spin_unlock_irqrestore(fence->lock, flags);
618 EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
621 * dma_fence_add_callback - add a callback to be called when the fence
623 * @fence: the fence to wait on
624 * @cb: the callback to register
625 * @func: the function to call
627 * Add a software callback to the fence. The caller should keep a reference to
630 * @cb will be initialized by dma_fence_add_callback(), no initialization
631 * by the caller is required. Any number of callbacks can be registered
632 * to a fence, but a callback can only be registered to one fence at a time.
634 * If fence is already signaled, this function will return -ENOENT (and
635 * *not* call the callback).
637 * Note that the callback can be called from an atomic context or irq context.
639 * Returns 0 in case of success, -ENOENT if the fence is already signaled
640 * and -EINVAL in case of error.
642 int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
643 dma_fence_func_t func)
648 if (WARN_ON(!fence || !func))
651 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
652 INIT_LIST_HEAD(&cb->node);
656 spin_lock_irqsave(fence->lock, flags);
658 if (__dma_fence_enable_signaling(fence)) {
660 list_add_tail(&cb->node, &fence->cb_list);
662 INIT_LIST_HEAD(&cb->node);
666 spin_unlock_irqrestore(fence->lock, flags);
670 EXPORT_SYMBOL(dma_fence_add_callback);
673 * dma_fence_get_status - returns the status upon completion
674 * @fence: the dma_fence to query
676 * This wraps dma_fence_get_status_locked() to return the error status
677 * condition on a signaled fence. See dma_fence_get_status_locked() for more
680 * Returns 0 if the fence has not yet been signaled, 1 if the fence has
681 * been signaled without an error condition, or a negative error code
682 * if the fence has been completed in err.
684 int dma_fence_get_status(struct dma_fence *fence)
689 spin_lock_irqsave(fence->lock, flags);
690 status = dma_fence_get_status_locked(fence);
691 spin_unlock_irqrestore(fence->lock, flags);
695 EXPORT_SYMBOL(dma_fence_get_status);
698 * dma_fence_remove_callback - remove a callback from the signaling list
699 * @fence: the fence to wait on
700 * @cb: the callback to remove
702 * Remove a previously queued callback from the fence. This function returns
703 * true if the callback is successfully removed, or false if the fence has
704 * already been signaled.
707 * Cancelling a callback should only be done if you really know what you're
708 * doing, since deadlocks and race conditions could occur all too easily. For
709 * this reason, it should only ever be done on hardware lockup recovery,
710 * with a reference held to the fence.
712 * Behaviour is undefined if @cb has not been added to @fence using
713 * dma_fence_add_callback() beforehand.
716 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
721 spin_lock_irqsave(fence->lock, flags);
723 ret = !list_empty(&cb->node);
725 list_del_init(&cb->node);
727 spin_unlock_irqrestore(fence->lock, flags);
731 EXPORT_SYMBOL(dma_fence_remove_callback);
733 struct default_wait_cb {
734 struct dma_fence_cb base;
735 struct task_struct *task;
739 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
741 struct default_wait_cb *wait =
742 container_of(cb, struct default_wait_cb, base);
744 wake_up_state(wait->task, TASK_NORMAL);
748 * dma_fence_default_wait - default sleep until the fence gets signaled
749 * or until timeout elapses
750 * @fence: the fence to wait on
751 * @intr: if true, do an interruptible wait
752 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
754 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
755 * remaining timeout in jiffies on success. If timeout is zero the value one is
756 * returned if the fence is already signaled for consistency with other
757 * functions taking a jiffies timeout.
760 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
762 struct default_wait_cb cb;
764 signed long ret = timeout ? timeout : 1;
766 spin_lock_irqsave(fence->lock, flags);
768 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
771 if (intr && signal_pending(current)) {
781 cb.base.func = dma_fence_default_wait_cb;
783 list_add(&cb.base.node, &fence->cb_list);
785 while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
787 __set_current_state(TASK_INTERRUPTIBLE);
789 __set_current_state(TASK_UNINTERRUPTIBLE);
790 spin_unlock_irqrestore(fence->lock, flags);
792 ret = schedule_timeout(ret);
794 spin_lock_irqsave(fence->lock, flags);
795 if (ret > 0 && intr && signal_pending(current))
799 if (!list_empty(&cb.base.node))
800 list_del(&cb.base.node);
801 __set_current_state(TASK_RUNNING);
804 spin_unlock_irqrestore(fence->lock, flags);
807 EXPORT_SYMBOL(dma_fence_default_wait);
810 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
815 for (i = 0; i < count; ++i) {
816 struct dma_fence *fence = fences[i];
817 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
827 * dma_fence_wait_any_timeout - sleep until any fence gets signaled
828 * or until timeout elapses
829 * @fences: array of fences to wait on
830 * @count: number of fences to wait on
831 * @intr: if true, do an interruptible wait
832 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
833 * @idx: used to store the first signaled fence index, meaningful only on
836 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
837 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
840 * Synchronous waits for the first fence in the array to be signaled. The
841 * caller needs to hold a reference to all fences in the array, otherwise a
842 * fence might be freed before return, resulting in undefined behavior.
844 * See also dma_fence_wait() and dma_fence_wait_timeout().
847 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
848 bool intr, signed long timeout, uint32_t *idx)
850 struct default_wait_cb *cb;
851 signed long ret = timeout;
854 if (WARN_ON(!fences || !count || timeout < 0))
858 for (i = 0; i < count; ++i)
859 if (dma_fence_is_signaled(fences[i])) {
868 cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
874 for (i = 0; i < count; ++i) {
875 struct dma_fence *fence = fences[i];
877 cb[i].task = current;
878 if (dma_fence_add_callback(fence, &cb[i].base,
879 dma_fence_default_wait_cb)) {
880 /* This fence is already signaled */
889 set_current_state(TASK_INTERRUPTIBLE);
891 set_current_state(TASK_UNINTERRUPTIBLE);
893 if (dma_fence_test_signaled_any(fences, count, idx))
896 ret = schedule_timeout(ret);
898 if (ret > 0 && intr && signal_pending(current))
902 __set_current_state(TASK_RUNNING);
906 dma_fence_remove_callback(fences[i], &cb[i].base);
913 EXPORT_SYMBOL(dma_fence_wait_any_timeout);
916 * dma_fence_describe - Dump fence describtion into seq_file
917 * @fence: the 6fence to describe
918 * @seq: the seq_file to put the textual description into
920 * Dump a textual description of the fence and it's state into the seq_file.
922 void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
924 seq_printf(seq, "%s %s seq %llu %ssignalled\n",
925 fence->ops->get_driver_name(fence),
926 fence->ops->get_timeline_name(fence), fence->seqno,
927 dma_fence_is_signaled(fence) ? "" : "un");
929 EXPORT_SYMBOL(dma_fence_describe);
932 * dma_fence_init - Initialize a custom fence.
933 * @fence: the fence to initialize
934 * @ops: the dma_fence_ops for operations on this fence
935 * @lock: the irqsafe spinlock to use for locking this fence
936 * @context: the execution context this fence is run on
937 * @seqno: a linear increasing sequence number for this context
939 * Initializes an allocated fence, the caller doesn't have to keep its
940 * refcount after committing with this fence, but it will need to hold a
941 * refcount again if &dma_fence_ops.enable_signaling gets called.
943 * context and seqno are used for easy comparison between fences, allowing
944 * to check which fence is later by simply using dma_fence_later().
947 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
948 spinlock_t *lock, u64 context, u64 seqno)
951 BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
953 kref_init(&fence->refcount);
955 INIT_LIST_HEAD(&fence->cb_list);
957 fence->context = context;
958 fence->seqno = seqno;
962 trace_dma_fence_init(fence);
964 EXPORT_SYMBOL(dma_fence_init);