1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Typically called by the IRQ handler.
37 void drm_fence_handler(struct drm_device * dev, uint32_t class,
38 uint32_t sequence, uint32_t type)
43 struct drm_fence_manager *fm = &dev->fm;
44 struct drm_fence_class_manager *fc = &fm->class[class];
45 struct drm_fence_driver *driver = dev->driver->fence_driver;
46 struct list_head *head;
47 struct drm_fence_object *fence, *next;
49 int is_exe = (type & DRM_FENCE_TYPE_EXE);
52 diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
54 if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
55 fc->pending_exe_flush = 0;
57 diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
58 ge_last_exe = diff < driver->wrap_diff;
61 fc->pending_flush &= ~type;
63 if (is_exe && ge_last_exe) {
64 fc->last_exe_flush = sequence;
67 if (list_empty(&fc->ring))
70 list_for_each_entry(fence, &fc->ring, ring) {
71 diff = (sequence - fence->sequence) & driver->sequence_mask;
72 if (diff > driver->wrap_diff) {
78 head = (found) ? &fence->ring : &fc->ring;
80 list_for_each_entry_safe_reverse(fence, next, head, ring) {
81 if (&fence->ring == &fc->ring)
84 type |= fence->native_type;
85 relevant = type & fence->type;
87 if ((fence->signaled | relevant) != fence->signaled) {
88 fence->signaled |= relevant;
89 DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
90 fence->base.hash.key, fence->signaled);
91 fence->submitted_flush |= relevant;
95 relevant = fence->flush_mask &
96 ~(fence->signaled | fence->submitted_flush);
99 fc->pending_flush |= relevant;
100 fence->submitted_flush = fence->flush_mask;
103 if (!(fence->type & ~fence->signaled)) {
104 DRM_DEBUG("Fence completely signaled 0x%08lx\n",
105 fence->base.hash.key);
106 list_del_init(&fence->ring);
111 DRM_WAKEUP(&fc->fence_queue);
115 EXPORT_SYMBOL(drm_fence_handler);
117 static void drm_fence_unring(struct drm_device * dev, struct list_head *ring)
119 struct drm_fence_manager *fm = &dev->fm;
122 write_lock_irqsave(&fm->lock, flags);
124 write_unlock_irqrestore(&fm->lock, flags);
127 void drm_fence_usage_deref_locked(struct drm_fence_object ** fence)
129 struct drm_fence_object *tmp_fence = *fence;
130 struct drm_device *dev = tmp_fence->dev;
131 struct drm_fence_manager *fm = &dev->fm;
133 DRM_ASSERT_LOCKED(&dev->struct_mutex);
135 if (atomic_dec_and_test(&tmp_fence->usage)) {
136 drm_fence_unring(dev, &tmp_fence->ring);
137 DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
138 tmp_fence->base.hash.key);
139 atomic_dec(&fm->count);
140 BUG_ON(!list_empty(&tmp_fence->base.list));
141 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
145 void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)
147 struct drm_fence_object *tmp_fence = *fence;
148 struct drm_device *dev = tmp_fence->dev;
149 struct drm_fence_manager *fm = &dev->fm;
152 if (atomic_dec_and_test(&tmp_fence->usage)) {
153 mutex_lock(&dev->struct_mutex);
154 if (atomic_read(&tmp_fence->usage) == 0) {
155 drm_fence_unring(dev, &tmp_fence->ring);
156 atomic_dec(&fm->count);
157 BUG_ON(!list_empty(&tmp_fence->base.list));
158 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
160 mutex_unlock(&dev->struct_mutex);
164 struct drm_fence_object
165 *drm_fence_reference_locked(struct drm_fence_object *src)
167 DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
169 atomic_inc(&src->usage);
173 void drm_fence_reference_unlocked(struct drm_fence_object **dst,
174 struct drm_fence_object *src)
176 mutex_lock(&src->dev->struct_mutex);
178 atomic_inc(&src->usage);
179 mutex_unlock(&src->dev->struct_mutex);
183 static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base)
185 struct drm_fence_object *fence =
186 drm_user_object_entry(base, struct drm_fence_object, base);
188 drm_fence_usage_deref_locked(&fence);
191 int drm_fence_object_signaled(struct drm_fence_object * fence,
192 uint32_t mask, int poke_flush)
196 struct drm_device *dev = fence->dev;
197 struct drm_fence_manager *fm = &dev->fm;
198 struct drm_fence_driver *driver = dev->driver->fence_driver;
201 driver->poke_flush(dev, fence->class);
202 read_lock_irqsave(&fm->lock, flags);
204 (fence->type & mask & fence->signaled) == (fence->type & mask);
205 read_unlock_irqrestore(&fm->lock, flags);
210 static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,
211 struct drm_fence_driver * driver, uint32_t sequence)
215 if (!fc->pending_exe_flush) {
216 fc->exe_flush_sequence = sequence;
217 fc->pending_exe_flush = 1;
220 (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
221 if (diff < driver->wrap_diff) {
222 fc->exe_flush_sequence = sequence;
227 int drm_fence_object_flush(struct drm_fence_object * fence,
230 struct drm_device *dev = fence->dev;
231 struct drm_fence_manager *fm = &dev->fm;
232 struct drm_fence_class_manager *fc = &fm->class[fence->class];
233 struct drm_fence_driver *driver = dev->driver->fence_driver;
236 if (type & ~fence->type) {
237 DRM_ERROR("Flush trying to extend fence type, "
238 "0x%x, 0x%x\n", type, fence->type);
242 write_lock_irqsave(&fm->lock, flags);
243 fence->flush_mask |= type;
244 if (fence->submitted_flush == fence->signaled) {
245 if ((fence->type & DRM_FENCE_TYPE_EXE) &&
246 !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
247 drm_fence_flush_exe(fc, driver, fence->sequence);
248 fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
250 fc->pending_flush |= (fence->flush_mask &
251 ~fence->submitted_flush);
252 fence->submitted_flush = fence->flush_mask;
255 write_unlock_irqrestore(&fm->lock, flags);
256 driver->poke_flush(dev, fence->class);
261 * Make sure old fence objects are signaled before their fence sequences are
262 * wrapped around and reused.
265 void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t sequence)
267 struct drm_fence_manager *fm = &dev->fm;
268 struct drm_fence_class_manager *fc = &fm->class[class];
269 struct drm_fence_driver *driver = dev->driver->fence_driver;
270 uint32_t old_sequence;
272 struct drm_fence_object *fence;
275 write_lock_irqsave(&fm->lock, flags);
276 old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
277 diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask;
279 if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) {
280 fc->pending_exe_flush = 1;
281 fc->exe_flush_sequence = sequence - (driver->flush_diff / 2);
283 write_unlock_irqrestore(&fm->lock, flags);
285 mutex_lock(&dev->struct_mutex);
286 read_lock_irqsave(&fm->lock, flags);
288 if (list_empty(&fc->ring)) {
289 read_unlock_irqrestore(&fm->lock, flags);
290 mutex_unlock(&dev->struct_mutex);
293 fence = drm_fence_reference_locked(list_entry(fc->ring.next, struct drm_fence_object, ring));
294 mutex_unlock(&dev->struct_mutex);
295 diff = (old_sequence - fence->sequence) & driver->sequence_mask;
296 read_unlock_irqrestore(&fm->lock, flags);
297 if (diff < driver->wrap_diff) {
298 drm_fence_object_flush(fence, fence->type);
300 drm_fence_usage_deref_unlocked(&fence);
303 EXPORT_SYMBOL(drm_fence_flush_old);
305 static int drm_fence_lazy_wait(struct drm_fence_object *fence,
309 struct drm_device *dev = fence->dev;
310 struct drm_fence_manager *fm = &dev->fm;
311 struct drm_fence_class_manager *fc = &fm->class[fence->class];
313 unsigned long _end = jiffies + 3*DRM_HZ;
317 DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
318 (signaled = drm_fence_object_signaled(fence, mask, 1)));
321 if (time_after_eq(jiffies, _end))
323 } while (ret == -EINTR && ignore_signals);
324 if (drm_fence_object_signaled(fence, mask, 0))
326 if (time_after_eq(jiffies, _end))
330 DRM_ERROR("Fence timeout. "
331 "GPU lockup or fence driver was "
334 return ((ret == -EINTR) ? -EAGAIN : ret);
339 int drm_fence_object_wait(struct drm_fence_object * fence,
340 int lazy, int ignore_signals, uint32_t mask)
342 struct drm_device *dev = fence->dev;
343 struct drm_fence_driver *driver = dev->driver->fence_driver;
348 if (mask & ~fence->type) {
349 DRM_ERROR("Wait trying to extend fence type"
350 " 0x%08x 0x%08x\n", mask, fence->type);
354 if (drm_fence_object_signaled(fence, mask, 0))
357 _end = jiffies + 3 * DRM_HZ;
359 drm_fence_object_flush(fence, mask);
361 if (lazy && driver->lazy_capable) {
363 ret = drm_fence_lazy_wait(fence, ignore_signals, mask);
369 if (driver->has_irq(dev, fence->class,
370 DRM_FENCE_TYPE_EXE)) {
371 ret = drm_fence_lazy_wait(fence, ignore_signals,
377 if (driver->has_irq(dev, fence->class,
378 mask & ~DRM_FENCE_TYPE_EXE)) {
379 ret = drm_fence_lazy_wait(fence, ignore_signals,
385 if (drm_fence_object_signaled(fence, mask, 0))
389 * Avoid kernel-space busy-waits.
397 signaled = drm_fence_object_signaled(fence, mask, 1);
398 } while (!signaled && !time_after_eq(jiffies, _end));
406 int drm_fence_object_emit(struct drm_fence_object * fence,
407 uint32_t fence_flags, uint32_t class, uint32_t type)
409 struct drm_device *dev = fence->dev;
410 struct drm_fence_manager *fm = &dev->fm;
411 struct drm_fence_driver *driver = dev->driver->fence_driver;
412 struct drm_fence_class_manager *fc = &fm->class[fence->class];
415 uint32_t native_type;
418 drm_fence_unring(dev, &fence->ring);
419 ret = driver->emit(dev, class, fence_flags, &sequence, &native_type);
423 write_lock_irqsave(&fm->lock, flags);
424 fence->class = class;
426 fence->flush_mask = 0x00;
427 fence->submitted_flush = 0x00;
428 fence->signaled = 0x00;
429 fence->sequence = sequence;
430 fence->native_type = native_type;
431 if (list_empty(&fc->ring))
432 fc->last_exe_flush = sequence - 1;
433 list_add_tail(&fence->ring, &fc->ring);
434 write_unlock_irqrestore(&fm->lock, flags);
438 static int drm_fence_object_init(struct drm_device * dev, uint32_t class,
440 uint32_t fence_flags,
441 struct drm_fence_object * fence)
445 struct drm_fence_manager *fm = &dev->fm;
447 mutex_lock(&dev->struct_mutex);
448 atomic_set(&fence->usage, 1);
449 mutex_unlock(&dev->struct_mutex);
451 write_lock_irqsave(&fm->lock, flags);
452 INIT_LIST_HEAD(&fence->ring);
455 * Avoid hitting BUG() for kernel-only fence objects.
458 INIT_LIST_HEAD(&fence->base.list);
459 fence->class = class;
461 fence->flush_mask = 0;
462 fence->submitted_flush = 0;
466 write_unlock_irqrestore(&fm->lock, flags);
467 if (fence_flags & DRM_FENCE_FLAG_EMIT) {
468 ret = drm_fence_object_emit(fence, fence_flags,
474 int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence,
477 struct drm_device *dev = priv->head->dev;
480 mutex_lock(&dev->struct_mutex);
481 ret = drm_add_user_object(priv, &fence->base, shareable);
484 atomic_inc(&fence->usage);
485 fence->base.type = drm_fence_type;
486 fence->base.remove = &drm_fence_object_destroy;
487 DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
489 mutex_unlock(&dev->struct_mutex);
492 EXPORT_SYMBOL(drm_fence_add_user_object);
494 int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t type,
495 unsigned flags, struct drm_fence_object ** c_fence)
497 struct drm_fence_object *fence;
499 struct drm_fence_manager *fm = &dev->fm;
501 fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
504 ret = drm_fence_object_init(dev, class, type, flags, fence);
506 drm_fence_usage_deref_unlocked(&fence);
510 atomic_inc(&fm->count);
515 EXPORT_SYMBOL(drm_fence_object_create);
517 void drm_fence_manager_init(struct drm_device * dev)
519 struct drm_fence_manager *fm = &dev->fm;
520 struct drm_fence_class_manager *class;
521 struct drm_fence_driver *fed = dev->driver->fence_driver;
525 rwlock_init(&fm->lock);
526 write_lock_irqsave(&fm->lock, flags);
532 fm->num_classes = fed->num_classes;
533 BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
535 for (i=0; i<fm->num_classes; ++i) {
536 class = &fm->class[i];
538 INIT_LIST_HEAD(&class->ring);
539 class->pending_flush = 0;
540 DRM_INIT_WAITQUEUE(&class->fence_queue);
543 atomic_set(&fm->count, 0);
545 write_unlock_irqrestore(&fm->lock, flags);
548 void drm_fence_manager_takedown(struct drm_device * dev)
552 struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle)
554 struct drm_device *dev = priv->head->dev;
555 struct drm_user_object *uo;
556 struct drm_fence_object *fence;
558 mutex_lock(&dev->struct_mutex);
559 uo = drm_lookup_user_object(priv, handle);
560 if (!uo || (uo->type != drm_fence_type)) {
561 mutex_unlock(&dev->struct_mutex);
564 fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
565 mutex_unlock(&dev->struct_mutex);
569 int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
572 struct drm_fence_manager *fm = &dev->fm;
573 struct drm_fence_arg *arg = data;
574 struct drm_fence_object *fence;
578 if (!fm->initialized) {
579 DRM_ERROR("The DRM driver does not support fencing.\n");
583 if (arg->flags & DRM_FENCE_FLAG_EMIT)
584 LOCK_TEST_WITH_RETURN(dev, file_priv);
585 ret = drm_fence_object_create(dev, arg->class,
586 arg->type, arg->flags, &fence);
589 ret = drm_fence_add_user_object(file_priv, fence,
591 DRM_FENCE_FLAG_SHAREABLE);
593 drm_fence_usage_deref_unlocked(&fence);
598 * usage > 0. No need to lock dev->struct_mutex;
601 arg->handle = fence->base.hash.key;
603 read_lock_irqsave(&fm->lock, flags);
604 arg->class = fence->class;
605 arg->type = fence->type;
606 arg->signaled = fence->signaled;
607 read_unlock_irqrestore(&fm->lock, flags);
608 drm_fence_usage_deref_unlocked(&fence);
613 int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
616 struct drm_fence_manager *fm = &dev->fm;
617 struct drm_fence_arg *arg = data;
618 struct drm_user_object *uo;
621 if (!fm->initialized) {
622 DRM_ERROR("The DRM driver does not support fencing.\n");
626 mutex_lock(&dev->struct_mutex);
627 uo = drm_lookup_user_object(file_priv, arg->handle);
628 if (!uo || (uo->type != drm_fence_type) || uo->owner != file_priv) {
629 mutex_unlock(&dev->struct_mutex);
632 ret = drm_remove_user_object(file_priv, uo);
633 mutex_unlock(&dev->struct_mutex);
638 int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
641 struct drm_fence_manager *fm = &dev->fm;
642 struct drm_fence_arg *arg = data;
643 struct drm_fence_object *fence;
644 struct drm_user_object *uo;
648 if (!fm->initialized) {
649 DRM_ERROR("The DRM driver does not support fencing.\n");
653 ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
656 fence = drm_lookup_fence_object(file_priv, arg->handle);
658 read_lock_irqsave(&fm->lock, flags);
659 arg->class = fence->class;
660 arg->type = fence->type;
661 arg->signaled = fence->signaled;
662 read_unlock_irqrestore(&fm->lock, flags);
663 drm_fence_usage_deref_unlocked(&fence);
669 int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
672 struct drm_fence_manager *fm = &dev->fm;
673 struct drm_fence_arg *arg = data;
676 if (!fm->initialized) {
677 DRM_ERROR("The DRM driver does not support fencing.\n");
681 return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
684 int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
687 struct drm_fence_manager *fm = &dev->fm;
688 struct drm_fence_arg *arg = data;
689 struct drm_fence_object *fence;
693 if (!fm->initialized) {
694 DRM_ERROR("The DRM driver does not support fencing.\n");
698 fence = drm_lookup_fence_object(file_priv, arg->handle);
702 read_lock_irqsave(&fm->lock, flags);
703 arg->class = fence->class;
704 arg->type = fence->type;
705 arg->signaled = fence->signaled;
706 read_unlock_irqrestore(&fm->lock, flags);
707 drm_fence_usage_deref_unlocked(&fence);
712 int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
715 struct drm_fence_manager *fm = &dev->fm;
716 struct drm_fence_arg *arg = data;
717 struct drm_fence_object *fence;
721 if (!fm->initialized) {
722 DRM_ERROR("The DRM driver does not support fencing.\n");
726 fence = drm_lookup_fence_object(file_priv, arg->handle);
729 ret = drm_fence_object_flush(fence, arg->type);
731 read_lock_irqsave(&fm->lock, flags);
732 arg->class = fence->class;
733 arg->type = fence->type;
734 arg->signaled = fence->signaled;
735 read_unlock_irqrestore(&fm->lock, flags);
736 drm_fence_usage_deref_unlocked(&fence);
742 int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
745 struct drm_fence_manager *fm = &dev->fm;
746 struct drm_fence_arg *arg = data;
747 struct drm_fence_object *fence;
751 if (!fm->initialized) {
752 DRM_ERROR("The DRM driver does not support fencing.\n");
756 fence = drm_lookup_fence_object(file_priv, arg->handle);
759 ret = drm_fence_object_wait(fence,
760 arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
763 read_lock_irqsave(&fm->lock, flags);
764 arg->class = fence->class;
765 arg->type = fence->type;
766 arg->signaled = fence->signaled;
767 read_unlock_irqrestore(&fm->lock, flags);
768 drm_fence_usage_deref_unlocked(&fence);
774 int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
777 struct drm_fence_manager *fm = &dev->fm;
778 struct drm_fence_arg *arg = data;
779 struct drm_fence_object *fence;
783 if (!fm->initialized) {
784 DRM_ERROR("The DRM driver does not support fencing.\n");
788 LOCK_TEST_WITH_RETURN(dev, file_priv);
789 fence = drm_lookup_fence_object(file_priv, arg->handle);
792 ret = drm_fence_object_emit(fence, arg->flags, arg->class,
795 read_lock_irqsave(&fm->lock, flags);
796 arg->class = fence->class;
797 arg->type = fence->type;
798 arg->signaled = fence->signaled;
799 read_unlock_irqrestore(&fm->lock, flags);
800 drm_fence_usage_deref_unlocked(&fence);
805 int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
808 struct drm_fence_manager *fm = &dev->fm;
809 struct drm_fence_arg *arg = data;
810 struct drm_fence_object *fence;
814 if (!fm->initialized) {
815 DRM_ERROR("The DRM driver does not support fencing.\n");
819 if (!dev->bm.initialized) {
820 DRM_ERROR("Buffer object manager is not initialized\n");
823 LOCK_TEST_WITH_RETURN(dev, file_priv);
824 ret = drm_fence_buffer_objects(file_priv, NULL, arg->flags,
828 ret = drm_fence_add_user_object(file_priv, fence,
830 DRM_FENCE_FLAG_SHAREABLE);
834 arg->handle = fence->base.hash.key;
836 read_lock_irqsave(&fm->lock, flags);
837 arg->class = fence->class;
838 arg->type = fence->type;
839 arg->signaled = fence->signaled;
840 read_unlock_irqrestore(&fm->lock, flags);
841 drm_fence_usage_deref_unlocked(&fence);