2 * Header file for reservations for dma-buf and ttm
4 * Copyright(C) 2011 Linaro Limited. All rights reserved.
5 * Copyright (C) 2012-2013 Canonical Ltd
6 * Copyright (C) 2012 Texas Instruments
9 * Rob Clark <robdclark@gmail.com>
10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11 * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
13 * Based on bo.c which bears the following copyright notice,
14 * but is dual licensed:
16 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
17 * All Rights Reserved.
19 * Permission is hereby granted, free of charge, to any person obtaining a
20 * copy of this software and associated documentation files (the
21 * "Software"), to deal in the Software without restriction, including
22 * without limitation the rights to use, copy, modify, merge, publish,
23 * distribute, sub license, and/or sell copies of the Software, and to
24 * permit persons to whom the Software is furnished to do so, subject to
25 * the following conditions:
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
36 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37 * USE OR OTHER DEALINGS IN THE SOFTWARE.
39 #ifndef _LINUX_RESERVATION_H
40 #define _LINUX_RESERVATION_H
42 #include <linux/ww_mutex.h>
43 #include <linux/dma-fence.h>
44 #include <linux/slab.h>
45 #include <linux/seqlock.h>
46 #include <linux/rcupdate.h>
48 extern struct ww_class reservation_ww_class;
51 * struct dma_resv_list - a list of shared fences
52 * @rcu: for internal use
53 * @shared_count: table of shared fences
54 * @shared_max: for growing shared fence table
55 * @shared: shared fence table
57 struct dma_resv_list {
59 u32 shared_count, shared_max;
60 struct dma_fence __rcu *shared[];
64 * struct dma_resv - a reservation object manages fences for a buffer
65 * @lock: update side lock
66 * @seq: sequence count for managing RCU read-side synchronization
67 * @fence_excl: the exclusive fence, if there is one currently
68 * @fence: list of current shared fences
72 seqcount_ww_mutex_t seq;
74 struct dma_fence __rcu *fence_excl;
75 struct dma_resv_list __rcu *fence;
78 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
79 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
81 #ifdef CONFIG_DEBUG_MUTEXES
82 void dma_resv_reset_shared_max(struct dma_resv *obj);
84 static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {}
88 * dma_resv_lock - lock the reservation object
89 * @obj: the reservation object
90 * @ctx: the locking context
92 * Locks the reservation object for exclusive access and modification. Note,
93 * that the lock is only against other writers, readers will run concurrently
94 * with a writer under RCU. The seqlock is used to notify readers if they
95 * overlap with a writer.
97 * As the reservation object may be locked by multiple parties in an
98 * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
99 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
100 * object may be locked by itself by passing NULL as @ctx.
102 static inline int dma_resv_lock(struct dma_resv *obj,
103 struct ww_acquire_ctx *ctx)
105 return ww_mutex_lock(&obj->lock, ctx);
109 * dma_resv_lock_interruptible - lock the reservation object
110 * @obj: the reservation object
111 * @ctx: the locking context
113 * Locks the reservation object interruptible for exclusive access and
114 * modification. Note, that the lock is only against other writers, readers
115 * will run concurrently with a writer under RCU. The seqlock is used to
116 * notify readers if they overlap with a writer.
118 * As the reservation object may be locked by multiple parties in an
119 * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
120 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
121 * object may be locked by itself by passing NULL as @ctx.
123 static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
124 struct ww_acquire_ctx *ctx)
126 return ww_mutex_lock_interruptible(&obj->lock, ctx);
130 * dma_resv_lock_slow - slowpath lock the reservation object
131 * @obj: the reservation object
132 * @ctx: the locking context
134 * Acquires the reservation object after a die case. This function
135 * will sleep until the lock becomes available. See dma_resv_lock() as
138 static inline void dma_resv_lock_slow(struct dma_resv *obj,
139 struct ww_acquire_ctx *ctx)
141 ww_mutex_lock_slow(&obj->lock, ctx);
145 * dma_resv_lock_slow_interruptible - slowpath lock the reservation
146 * object, interruptible
147 * @obj: the reservation object
148 * @ctx: the locking context
150 * Acquires the reservation object interruptible after a die case. This function
151 * will sleep until the lock becomes available. See
152 * dma_resv_lock_interruptible() as well.
154 static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
155 struct ww_acquire_ctx *ctx)
157 return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
161 * dma_resv_trylock - trylock the reservation object
162 * @obj: the reservation object
164 * Tries to lock the reservation object for exclusive access and modification.
165 * Note, that the lock is only against other writers, readers will run
166 * concurrently with a writer under RCU. The seqlock is used to notify readers
167 * if they overlap with a writer.
169 * Also note that since no context is provided, no deadlock protection is
172 * Returns true if the lock was acquired, false otherwise.
174 static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
176 return ww_mutex_trylock(&obj->lock);
180 * dma_resv_is_locked - is the reservation object locked
181 * @obj: the reservation object
183 * Returns true if the mutex is locked, false if unlocked.
185 static inline bool dma_resv_is_locked(struct dma_resv *obj)
187 return ww_mutex_is_locked(&obj->lock);
191 * dma_resv_locking_ctx - returns the context used to lock the object
192 * @obj: the reservation object
194 * Returns the context used to lock a reservation object or NULL if no context
195 * was used or the object is not locked at all.
197 static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
199 return READ_ONCE(obj->lock.ctx);
203 * dma_resv_unlock - unlock the reservation object
204 * @obj: the reservation object
206 * Unlocks the reservation object following exclusive access.
208 static inline void dma_resv_unlock(struct dma_resv *obj)
210 dma_resv_reset_shared_max(obj);
211 ww_mutex_unlock(&obj->lock);
215 * dma_resv_excl_fence - return the object's exclusive fence
216 * @obj: the reservation object
218 * Returns the exclusive fence (if any). Caller must either hold the objects
219 * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
220 * or one of the variants of each
223 * The exclusive fence or NULL
225 static inline struct dma_fence *
226 dma_resv_excl_fence(struct dma_resv *obj)
228 return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
232 * dma_resv_get_excl_unlocked - get the reservation object's
233 * exclusive fence, without lock held.
234 * @obj: the reservation object
236 * If there is an exclusive fence, this atomically increments it's
237 * reference count and returns it.
240 * The exclusive fence or NULL if none
242 static inline struct dma_fence *
243 dma_resv_get_excl_unlocked(struct dma_resv *obj)
245 struct dma_fence *fence;
247 if (!rcu_access_pointer(obj->fence_excl))
251 fence = dma_fence_get_rcu_safe(&obj->fence_excl);
258 * dma_resv_shared_list - get the reservation object's shared fence list
259 * @obj: the reservation object
261 * Returns the shared fence list. Caller must either hold the objects
262 * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
263 * or one of the variants of each
265 static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
267 return rcu_dereference_check(obj->fence, dma_resv_held(obj));
270 void dma_resv_init(struct dma_resv *obj);
271 void dma_resv_fini(struct dma_resv *obj);
272 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
273 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
274 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
275 int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
276 unsigned *pshared_count, struct dma_fence ***pshared);
277 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
278 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
279 unsigned long timeout);
280 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
282 #endif /* _LINUX_RESERVATION_H */