1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 **************************************************************************/
29 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
36 * Typically called by the IRQ handler.
39 void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
44 drm_fence_manager_t *fm = &dev->fm;
45 drm_fence_driver_t *driver = dev->driver->fence_driver;
46 struct list_head *list, *prev;
47 drm_fence_object_t *fence;
50 if (list_empty(&fm->ring))
53 list_for_each_entry(fence, &fm->ring, ring) {
54 diff = (sequence - fence->sequence) & driver->sequence_mask;
55 if (diff > driver->wrap_diff) {
61 list = (found) ? fence->ring.prev : fm->ring.prev;
64 for (; list != &fm->ring; list = prev, prev = list->prev) {
65 fence = list_entry(list, drm_fence_object_t, ring);
67 type |= fence->native_type;
68 relevant = type & fence->type;
70 if ((fence->signaled | relevant) != fence->signaled) {
71 fence->signaled |= relevant;
72 DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
73 fence->base.hash.key, fence->signaled);
74 fence->submitted_flush |= relevant;
78 relevant = fence->flush_mask &
79 ~(fence->signaled | fence->submitted_flush);
82 fm->pending_flush |= relevant;
83 fence->submitted_flush = fence->flush_mask;
86 if (!(fence->type & ~fence->signaled)) {
87 DRM_DEBUG("Fence completely signaled 0x%08lx\n",
88 fence->base.hash.key);
89 list_del_init(&fence->ring);
95 DRM_WAKEUP(&fm->fence_queue);
99 EXPORT_SYMBOL(drm_fence_handler);
101 static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
103 drm_fence_manager_t *fm = &dev->fm;
106 write_lock_irqsave(&fm->lock, flags);
108 write_unlock_irqrestore(&fm->lock, flags);
111 void drm_fence_usage_deref_locked(drm_device_t * dev,
112 drm_fence_object_t * fence)
114 if (atomic_dec_and_test(&fence->usage)) {
115 drm_fence_unring(dev, &fence->ring);
116 DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
117 fence->base.hash.key);
118 kmem_cache_free(drm_cache.fence_object, fence);
122 void drm_fence_usage_deref_unlocked(drm_device_t * dev,
123 drm_fence_object_t * fence)
125 if (atomic_dec_and_test(&fence->usage)) {
126 mutex_lock(&dev->struct_mutex);
127 if (atomic_read(&fence->usage) == 0) {
128 drm_fence_unring(dev, &fence->ring);
129 kmem_cache_free(drm_cache.fence_object, fence);
131 mutex_unlock(&dev->struct_mutex);
135 static void drm_fence_object_destroy(drm_file_t * priv,
136 drm_user_object_t * base)
138 drm_device_t *dev = priv->head->dev;
139 drm_fence_object_t *fence =
140 drm_user_object_entry(base, drm_fence_object_t, base);
142 drm_fence_usage_deref_locked(dev, fence);
145 static int fence_signaled(drm_device_t * dev, drm_fence_object_t * fence,
146 uint32_t mask, int poke_flush)
150 drm_fence_manager_t *fm = &dev->fm;
151 drm_fence_driver_t *driver = dev->driver->fence_driver;
154 driver->poke_flush(dev);
155 read_lock_irqsave(&fm->lock, flags);
157 (fence->type & mask & fence->signaled) == (fence->type & mask);
158 read_unlock_irqrestore(&fm->lock, flags);
163 static void drm_fence_flush_exe(drm_fence_manager_t * fm,
164 drm_fence_driver_t * driver, uint32_t sequence)
168 if (!fm->pending_exe_flush) {
169 struct list_head *list;
172 * Last_exe_flush is invalid. Find oldest sequence.
175 /* list = fm->fence_types[_DRM_FENCE_TYPE_EXE];*/
177 if (list->next == &fm->ring) {
180 drm_fence_object_t *fence =
181 list_entry(list->next, drm_fence_object_t, ring);
182 fm->last_exe_flush = (fence->sequence - 1) &
183 driver->sequence_mask;
185 diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
186 if (diff >= driver->wrap_diff)
188 fm->exe_flush_sequence = sequence;
189 fm->pending_exe_flush = 1;
192 (sequence - fm->exe_flush_sequence) & driver->sequence_mask;
193 if (diff < driver->wrap_diff) {
194 fm->exe_flush_sequence = sequence;
199 int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type)
201 return ((fence->signaled & type) == type);
205 * Make sure old fence objects are signaled before their fence sequences are
206 * wrapped around and reused.
209 int drm_fence_object_flush(drm_device_t * dev,
210 drm_fence_object_t * fence, uint32_t type)
212 drm_fence_manager_t *fm = &dev->fm;
213 drm_fence_driver_t *driver = dev->driver->fence_driver;
216 if (type & ~fence->type) {
217 DRM_ERROR("Flush trying to extend fence type, "
218 "0x%x, 0x%x\n", type, fence->type);
222 write_lock_irqsave(&fm->lock, flags);
223 fence->flush_mask |= type;
224 if (fence->submitted_flush == fence->signaled) {
225 if ((fence->type & DRM_FENCE_TYPE_EXE) &&
226 !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
227 drm_fence_flush_exe(fm, driver, fence->sequence);
228 fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
230 fm->pending_flush |= (fence->flush_mask &
231 ~fence->submitted_flush);
232 fence->submitted_flush = fence->flush_mask;
235 write_unlock_irqrestore(&fm->lock, flags);
236 driver->poke_flush(dev);
240 void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
242 drm_fence_manager_t *fm = &dev->fm;
243 drm_fence_driver_t *driver = dev->driver->fence_driver;
244 uint32_t old_sequence;
246 drm_fence_object_t *fence;
249 mutex_lock(&dev->struct_mutex);
250 read_lock_irqsave(&fm->lock, flags);
251 if (fm->ring.next == &fm->ring) {
252 read_unlock_irqrestore(&fm->lock, flags);
253 mutex_unlock(&dev->struct_mutex);
256 old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
257 fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
258 atomic_inc(&fence->usage);
259 mutex_unlock(&dev->struct_mutex);
260 diff = (old_sequence - fence->sequence) & driver->sequence_mask;
261 read_unlock_irqrestore(&fm->lock, flags);
262 if (diff < driver->wrap_diff) {
263 drm_fence_object_flush(dev, fence, fence->type);
265 drm_fence_usage_deref_unlocked(dev, fence);
268 EXPORT_SYMBOL(drm_fence_flush_old);
270 int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence,
271 int lazy, int ignore_signals, uint32_t mask)
273 drm_fence_manager_t *fm = &dev->fm;
274 drm_fence_driver_t *driver = dev->driver->fence_driver;
279 if (mask & ~fence->type) {
280 DRM_ERROR("Wait trying to extend fence type"
281 " 0x%08x 0x%08x\n", mask, fence->type);
285 if (fence_signaled(dev, fence, mask, 0))
288 _end = jiffies + 3 * DRM_HZ;
290 drm_fence_object_flush(dev, fence, mask);
292 if (lazy && driver->lazy_capable) {
295 DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
296 fence_signaled(dev, fence, mask, 1));
297 if (time_after_eq(jiffies, _end))
299 } while (ret == -EINTR && ignore_signals);
300 if (time_after_eq(jiffies, _end) && (ret != 0))
304 DRM_ERROR("Fence timout. GPU lockup.\n");
306 return ((ret == -EINTR) ? -EAGAIN : ret);
308 } else if ((fence->class == 0) && (mask & DRM_FENCE_TYPE_EXE) &&
309 driver->lazy_capable) {
312 * We use IRQ wait for EXE fence if available to gain
317 DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
318 fence_signaled(dev, fence, DRM_FENCE_TYPE_EXE,
320 if (time_after_eq(jiffies, _end))
322 } while (ret == -EINTR && ignore_signals);
323 if (time_after_eq(jiffies, _end) && (ret != 0))
326 return ((ret == -EINTR) ? -EAGAIN : ret);
329 if (fence_signaled(dev, fence, mask, 0))
333 * Avoid kernel-space busy-waits.
341 signaled = fence_signaled(dev, fence, mask, 1);
342 } while (!signaled && !time_after_eq(jiffies, _end));
350 int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
351 uint32_t fence_flags, uint32_t type)
353 drm_fence_manager_t *fm = &dev->fm;
354 drm_fence_driver_t *driver = dev->driver->fence_driver;
357 uint32_t native_type;
360 drm_fence_unring(dev, &fence->ring);
361 ret = driver->emit(dev, fence_flags, &sequence, &native_type);
365 write_lock_irqsave(&fm->lock, flags);
367 fence->flush_mask = 0x00;
368 fence->submitted_flush = 0x00;
369 fence->signaled = 0x00;
370 fence->sequence = sequence;
371 fence->native_type = native_type;
372 list_add_tail(&fence->ring, &fm->ring);
373 write_unlock_irqrestore(&fm->lock, flags);
377 static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
378 uint32_t fence_flags,
379 drm_fence_object_t * fence)
383 drm_fence_manager_t *fm = &dev->fm;
385 mutex_lock(&dev->struct_mutex);
386 atomic_set(&fence->usage, 1);
387 mutex_unlock(&dev->struct_mutex);
389 write_lock_irqsave(&fm->lock, flags);
390 INIT_LIST_HEAD(&fence->ring);
393 fence->flush_mask = 0;
394 fence->submitted_flush = 0;
397 write_unlock_irqrestore(&fm->lock, flags);
398 if (fence_flags & DRM_FENCE_FLAG_EMIT) {
399 ret = drm_fence_object_emit(dev, fence, fence_flags, type);
405 int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
408 drm_device_t *dev = priv->head->dev;
411 mutex_lock(&dev->struct_mutex);
412 ret = drm_add_user_object(priv, &fence->base, shareable);
413 mutex_unlock(&dev->struct_mutex);
416 fence->base.type = drm_fence_type;
417 fence->base.remove = &drm_fence_object_destroy;
418 DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
422 EXPORT_SYMBOL(drm_fence_add_user_object);
424 int drm_fence_object_create(drm_device_t * dev, uint32_t type,
425 unsigned flags, drm_fence_object_t ** c_fence)
427 drm_fence_object_t *fence;
430 fence = kmem_cache_alloc(drm_cache.fence_object, GFP_KERNEL);
433 ret = drm_fence_object_init(dev, type, flags, fence);
435 drm_fence_usage_deref_unlocked(dev, fence);
442 EXPORT_SYMBOL(drm_fence_object_create);
444 void drm_fence_manager_init(drm_device_t * dev)
446 drm_fence_manager_t *fm = &dev->fm;
447 drm_fence_driver_t *fed = dev->driver->fence_driver;
450 fm->lock = RW_LOCK_UNLOCKED;
451 INIT_LIST_HEAD(&fm->ring);
452 fm->pending_flush = 0;
453 DRM_INIT_WAITQUEUE(&fm->fence_queue);
457 for (i = 0; i < fed->no_types; ++i) {
458 fm->fence_types[i] = &fm->ring;
463 void drm_fence_manager_takedown(drm_device_t * dev)
467 drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
469 drm_device_t *dev = priv->head->dev;
470 drm_user_object_t *uo;
471 drm_fence_object_t *fence;
473 mutex_lock(&dev->struct_mutex);
474 uo = drm_lookup_user_object(priv, handle);
475 if (!uo || (uo->type != drm_fence_type)) {
476 mutex_unlock(&dev->struct_mutex);
479 fence = drm_user_object_entry(uo, drm_fence_object_t, base);
480 atomic_inc(&fence->usage);
481 mutex_unlock(&dev->struct_mutex);
485 int drm_fence_ioctl(DRM_IOCTL_ARGS)
489 drm_fence_manager_t *fm = &dev->fm;
491 drm_fence_object_t *fence;
492 drm_user_object_t *uo;
496 if (!fm->initialized) {
497 DRM_ERROR("The DRM driver does not support fencing.\n");
501 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
503 case drm_fence_create:
504 if (arg.flags & DRM_FENCE_FLAG_EMIT)
505 LOCK_TEST_WITH_RETURN(dev, filp);
506 ret = drm_fence_object_create(dev, arg.type,
511 ret = drm_fence_add_user_object(priv, fence,
513 DRM_FENCE_FLAG_SHAREABLE);
515 drm_fence_usage_deref_unlocked(dev, fence);
520 * usage > 0. No need to lock dev->struct_mutex;
523 atomic_inc(&fence->usage);
524 arg.handle = fence->base.hash.key;
526 case drm_fence_destroy:
527 mutex_lock(&dev->struct_mutex);
528 uo = drm_lookup_user_object(priv, arg.handle);
529 if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
530 mutex_unlock(&dev->struct_mutex);
533 ret = drm_remove_user_object(priv, uo);
534 mutex_unlock(&dev->struct_mutex);
536 case drm_fence_reference:
538 drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
541 fence = drm_lookup_fence_object(priv, arg.handle);
543 case drm_fence_unreference:
544 ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
546 case drm_fence_signaled:
547 fence = drm_lookup_fence_object(priv, arg.handle);
551 case drm_fence_flush:
552 fence = drm_lookup_fence_object(priv, arg.handle);
555 ret = drm_fence_object_flush(dev, fence, arg.type);
558 fence = drm_lookup_fence_object(priv, arg.handle);
562 drm_fence_object_wait(dev, fence,
563 arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
567 LOCK_TEST_WITH_RETURN(dev, filp);
568 fence = drm_lookup_fence_object(priv, arg.handle);
571 ret = drm_fence_object_emit(dev, fence, arg.flags, arg.type);
573 case drm_fence_buffers:
574 if (!dev->bm.initialized) {
575 DRM_ERROR("Buffer object manager is not initialized\n");
578 LOCK_TEST_WITH_RETURN(dev, filp);
579 ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
583 ret = drm_fence_add_user_object(priv, fence,
585 DRM_FENCE_FLAG_SHAREABLE);
588 atomic_inc(&fence->usage);
589 arg.handle = fence->base.hash.key;
594 read_lock_irqsave(&fm->lock, flags);
595 arg.class = fence->class;
596 arg.type = fence->type;
597 arg.signaled = fence->signaled;
598 read_unlock_irqrestore(&fm->lock, flags);
599 drm_fence_usage_deref_unlocked(dev, fence);
601 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));