1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
35 * Convenience function to be called by fence::wait methods that
39 int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
40 int interruptible, uint32_t mask,
41 unsigned long end_jiffies)
43 struct drm_device *dev = fence->dev;
44 struct drm_fence_manager *fm = &dev->fm;
45 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
49 DECLARE_WAITQUEUE(entry, current);
50 add_wait_queue(&fc->fence_queue, &entry);
55 __set_current_state((interruptible) ?
57 TASK_UNINTERRUPTIBLE);
58 if (drm_fence_object_signaled(fence, mask))
60 if (time_after_eq(jiffies, end_jiffies)) {
66 else if ((++count & 0x0F) == 0){
67 __set_current_state(TASK_RUNNING);
69 __set_current_state((interruptible) ?
71 TASK_UNINTERRUPTIBLE);
73 if (interruptible && signal_pending(current)) {
78 __set_current_state(TASK_RUNNING);
79 remove_wait_queue(&fc->fence_queue, &entry);
82 EXPORT_SYMBOL(drm_fence_wait_polling);
85 * Typically called by the IRQ handler.
88 void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
89 uint32_t sequence, uint32_t type, uint32_t error)
93 uint32_t relevant_type;
95 struct drm_fence_manager *fm = &dev->fm;
96 struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
97 struct drm_fence_driver *driver = dev->driver->fence_driver;
98 struct list_head *head;
99 struct drm_fence_object *fence, *next;
102 if (list_empty(&fc->ring))
105 list_for_each_entry(fence, &fc->ring, ring) {
106 diff = (sequence - fence->sequence) & driver->sequence_mask;
107 if (diff > driver->wrap_diff) {
113 fc->waiting_types &= ~type;
114 head = (found) ? &fence->ring : &fc->ring;
116 list_for_each_entry_safe_reverse(fence, next, head, ring) {
117 if (&fence->ring == &fc->ring)
121 fence->error = error;
122 fence->signaled_types = fence->type;
123 list_del_init(&fence->ring);
128 if (type & DRM_FENCE_TYPE_EXE)
129 type |= fence->native_types;
131 relevant_type = type & fence->type;
132 new_type = (fence->signaled_types | relevant_type) ^
133 fence->signaled_types;
136 fence->signaled_types |= new_type;
137 DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
138 fence->base.hash.key, fence->signaled_types);
140 if (driver->needed_flush)
141 fc->pending_flush |= driver->needed_flush(fence);
143 if (new_type & fence->waiting_types)
147 fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
149 if (!(fence->type & ~fence->signaled_types)) {
150 DRM_DEBUG("Fence completely signaled 0x%08lx\n",
151 fence->base.hash.key);
152 list_del_init(&fence->ring);
157 * Reinstate lost waiting types.
160 if ((fc->waiting_types & type) != type) {
162 list_for_each_entry(fence, head, ring) {
163 if (&fence->ring == &fc->ring)
165 diff = (fc->highest_waiting_sequence - fence->sequence) &
166 driver->sequence_mask;
167 if (diff > driver->wrap_diff)
170 fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
175 wake_up_all(&fc->fence_queue);
177 EXPORT_SYMBOL(drm_fence_handler);
179 static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
181 struct drm_fence_manager *fm = &dev->fm;
184 write_lock_irqsave(&fm->lock, flags);
186 write_unlock_irqrestore(&fm->lock, flags);
189 void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
191 struct drm_fence_object *tmp_fence = *fence;
192 struct drm_device *dev = tmp_fence->dev;
193 struct drm_fence_manager *fm = &dev->fm;
195 DRM_ASSERT_LOCKED(&dev->struct_mutex);
197 if (atomic_dec_and_test(&tmp_fence->usage)) {
198 drm_fence_unring(dev, &tmp_fence->ring);
199 DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
200 tmp_fence->base.hash.key);
201 atomic_dec(&fm->count);
202 BUG_ON(!list_empty(&tmp_fence->base.list));
203 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
206 EXPORT_SYMBOL(drm_fence_usage_deref_locked);
208 void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
210 struct drm_fence_object *tmp_fence = *fence;
211 struct drm_device *dev = tmp_fence->dev;
212 struct drm_fence_manager *fm = &dev->fm;
215 if (atomic_dec_and_test(&tmp_fence->usage)) {
216 mutex_lock(&dev->struct_mutex);
217 if (atomic_read(&tmp_fence->usage) == 0) {
218 drm_fence_unring(dev, &tmp_fence->ring);
219 atomic_dec(&fm->count);
220 BUG_ON(!list_empty(&tmp_fence->base.list));
221 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
223 mutex_unlock(&dev->struct_mutex);
226 EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
228 struct drm_fence_object
229 *drm_fence_reference_locked(struct drm_fence_object *src)
231 DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
233 atomic_inc(&src->usage);
237 void drm_fence_reference_unlocked(struct drm_fence_object **dst,
238 struct drm_fence_object *src)
240 mutex_lock(&src->dev->struct_mutex);
242 atomic_inc(&src->usage);
243 mutex_unlock(&src->dev->struct_mutex);
245 EXPORT_SYMBOL(drm_fence_reference_unlocked);
247 static void drm_fence_object_destroy(struct drm_file *priv,
248 struct drm_user_object *base)
250 struct drm_fence_object *fence =
251 drm_user_object_entry(base, struct drm_fence_object, base);
253 drm_fence_usage_deref_locked(&fence);
256 int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
260 struct drm_device *dev = fence->dev;
261 struct drm_fence_manager *fm = &dev->fm;
262 struct drm_fence_driver *driver = dev->driver->fence_driver;
265 read_lock_irqsave(&fm->lock, flags);
266 signaled = (mask & fence->signaled_types) == mask;
267 read_unlock_irqrestore(&fm->lock, flags);
268 if (!signaled && driver->poll) {
269 write_lock_irqsave(&fm->lock, flags);
270 driver->poll(dev, fence->fence_class, mask);
271 signaled = (mask & fence->signaled_types) == mask;
272 write_unlock_irqrestore(&fm->lock, flags);
276 EXPORT_SYMBOL(drm_fence_object_signaled);
279 int drm_fence_object_flush(struct drm_fence_object *fence,
282 struct drm_device *dev = fence->dev;
283 struct drm_fence_manager *fm = &dev->fm;
284 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
285 struct drm_fence_driver *driver = dev->driver->fence_driver;
286 unsigned long irq_flags;
287 uint32_t saved_pending_flush;
291 if (type & ~fence->type) {
292 DRM_ERROR("Flush trying to extend fence type, "
293 "0x%x, 0x%x\n", type, fence->type);
297 write_lock_irqsave(&fm->lock, irq_flags);
298 fence->waiting_types |= type;
299 fc->waiting_types |= fence->waiting_types;
300 diff = (fence->sequence - fc->highest_waiting_sequence) &
301 driver->sequence_mask;
303 if (diff < driver->wrap_diff)
304 fc->highest_waiting_sequence = fence->sequence;
307 * fence->waiting_types has changed. Determine whether
308 * we need to initiate some kind of flush as a result of this.
311 saved_pending_flush = fc->pending_flush;
312 if (driver->needed_flush)
313 fc->pending_flush |= driver->needed_flush(fence);
316 driver->poll(dev, fence->fence_class, fence->waiting_types);
318 call_flush = fc->pending_flush;
319 write_unlock_irqrestore(&fm->lock, irq_flags);
321 if (call_flush && driver->flush)
322 driver->flush(dev, fence->fence_class);
326 EXPORT_SYMBOL(drm_fence_object_flush);
329 * Make sure old fence objects are signaled before their fence sequences are
330 * wrapped around and reused.
333 void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
336 struct drm_fence_manager *fm = &dev->fm;
337 struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
338 struct drm_fence_object *fence;
339 unsigned long irq_flags;
340 struct drm_fence_driver *driver = dev->driver->fence_driver;
345 write_lock_irqsave(&fm->lock, irq_flags);
347 list_for_each_entry_reverse(fence, &fc->ring, ring) {
348 diff = (sequence - fence->sequence) & driver->sequence_mask;
349 if (diff <= driver->flush_diff)
352 fence->waiting_types = fence->type;
353 fc->waiting_types |= fence->type;
355 if (driver->needed_flush)
356 fc->pending_flush |= driver->needed_flush(fence);
360 driver->poll(dev, fence_class, fc->waiting_types);
362 call_flush = fc->pending_flush;
363 write_unlock_irqrestore(&fm->lock, irq_flags);
365 if (call_flush && driver->flush)
366 driver->flush(dev, fence->fence_class);
369 * FIXME: Shold we implement a wait here for really old fences?
373 EXPORT_SYMBOL(drm_fence_flush_old);
375 int drm_fence_object_wait(struct drm_fence_object *fence,
376 int lazy, int ignore_signals, uint32_t mask)
378 struct drm_device *dev = fence->dev;
379 struct drm_fence_driver *driver = dev->driver->fence_driver;
380 struct drm_fence_manager *fm = &dev->fm;
381 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
383 unsigned long _end = 3 * DRM_HZ;
385 if (mask & ~fence->type) {
386 DRM_ERROR("Wait trying to extend fence type"
387 " 0x%08x 0x%08x\n", mask, fence->type);
393 return driver->wait(fence, lazy, !ignore_signals, mask);
396 drm_fence_object_flush(fence, mask);
397 if (driver->has_irq(dev, fence->fence_class, mask)) {
399 ret = wait_event_interruptible_timeout
401 drm_fence_object_signaled(fence, mask),
404 ret = wait_event_timeout
406 drm_fence_object_signaled(fence, mask),
409 if (unlikely(ret == -ERESTARTSYS))
412 if (unlikely(ret == 0))
418 return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
421 EXPORT_SYMBOL(drm_fence_object_wait);
425 int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
426 uint32_t fence_class, uint32_t type)
428 struct drm_device *dev = fence->dev;
429 struct drm_fence_manager *fm = &dev->fm;
430 struct drm_fence_driver *driver = dev->driver->fence_driver;
431 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
434 uint32_t native_types;
437 drm_fence_unring(dev, &fence->ring);
438 ret = driver->emit(dev, fence_class, fence_flags, &sequence,
443 write_lock_irqsave(&fm->lock, flags);
444 fence->fence_class = fence_class;
446 fence->waiting_types = 0;
447 fence->signaled_types = 0;
449 fence->sequence = sequence;
450 fence->native_types = native_types;
451 if (list_empty(&fc->ring))
452 fc->highest_waiting_sequence = sequence - 1;
453 list_add_tail(&fence->ring, &fc->ring);
454 fc->latest_queued_sequence = sequence;
455 write_unlock_irqrestore(&fm->lock, flags);
458 EXPORT_SYMBOL(drm_fence_object_emit);
460 static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
462 uint32_t fence_flags,
463 struct drm_fence_object *fence)
467 struct drm_fence_manager *fm = &dev->fm;
469 mutex_lock(&dev->struct_mutex);
470 atomic_set(&fence->usage, 1);
471 mutex_unlock(&dev->struct_mutex);
473 write_lock_irqsave(&fm->lock, flags);
474 INIT_LIST_HEAD(&fence->ring);
477 * Avoid hitting BUG() for kernel-only fence objects.
480 INIT_LIST_HEAD(&fence->base.list);
481 fence->fence_class = fence_class;
483 fence->signaled_types = 0;
484 fence->waiting_types = 0;
488 write_unlock_irqrestore(&fm->lock, flags);
489 if (fence_flags & DRM_FENCE_FLAG_EMIT) {
490 ret = drm_fence_object_emit(fence, fence_flags,
491 fence->fence_class, type);
496 int drm_fence_add_user_object(struct drm_file *priv,
497 struct drm_fence_object *fence, int shareable)
499 struct drm_device *dev = priv->minor->dev;
502 mutex_lock(&dev->struct_mutex);
503 ret = drm_add_user_object(priv, &fence->base, shareable);
506 atomic_inc(&fence->usage);
507 fence->base.type = drm_fence_type;
508 fence->base.remove = &drm_fence_object_destroy;
509 DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
511 mutex_unlock(&dev->struct_mutex);
514 EXPORT_SYMBOL(drm_fence_add_user_object);
516 int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
517 uint32_t type, unsigned flags,
518 struct drm_fence_object **c_fence)
520 struct drm_fence_object *fence;
522 struct drm_fence_manager *fm = &dev->fm;
524 fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
526 DRM_ERROR("Out of memory creating fence object\n");
529 ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
531 drm_fence_usage_deref_unlocked(&fence);
535 atomic_inc(&fm->count);
539 EXPORT_SYMBOL(drm_fence_object_create);
541 void drm_fence_manager_init(struct drm_device *dev)
543 struct drm_fence_manager *fm = &dev->fm;
544 struct drm_fence_class_manager *fence_class;
545 struct drm_fence_driver *fed = dev->driver->fence_driver;
549 rwlock_init(&fm->lock);
550 write_lock_irqsave(&fm->lock, flags);
556 fm->num_classes = fed->num_classes;
557 BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
559 for (i = 0; i < fm->num_classes; ++i) {
560 fence_class = &fm->fence_class[i];
562 memset(fence_class, 0, sizeof(*fence_class));
563 INIT_LIST_HEAD(&fence_class->ring);
564 DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
567 atomic_set(&fm->count, 0);
569 write_unlock_irqrestore(&fm->lock, flags);
572 void drm_fence_fill_arg(struct drm_fence_object *fence,
573 struct drm_fence_arg *arg)
575 struct drm_device *dev = fence->dev;
576 struct drm_fence_manager *fm = &dev->fm;
577 unsigned long irq_flags;
579 read_lock_irqsave(&fm->lock, irq_flags);
580 arg->handle = fence->base.hash.key;
581 arg->fence_class = fence->fence_class;
582 arg->type = fence->type;
583 arg->signaled = fence->signaled_types;
584 arg->error = fence->error;
585 arg->sequence = fence->sequence;
586 read_unlock_irqrestore(&fm->lock, irq_flags);
588 EXPORT_SYMBOL(drm_fence_fill_arg);
590 void drm_fence_manager_takedown(struct drm_device *dev)
594 struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
597 struct drm_device *dev = priv->minor->dev;
598 struct drm_user_object *uo;
599 struct drm_fence_object *fence;
601 mutex_lock(&dev->struct_mutex);
602 uo = drm_lookup_user_object(priv, handle);
603 if (!uo || (uo->type != drm_fence_type)) {
604 mutex_unlock(&dev->struct_mutex);
607 fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
608 mutex_unlock(&dev->struct_mutex);
612 int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
615 struct drm_fence_manager *fm = &dev->fm;
616 struct drm_fence_arg *arg = data;
617 struct drm_fence_object *fence;
620 if (!fm->initialized) {
621 DRM_ERROR("The DRM driver does not support fencing.\n");
625 if (arg->flags & DRM_FENCE_FLAG_EMIT)
626 LOCK_TEST_WITH_RETURN(dev, file_priv);
627 ret = drm_fence_object_create(dev, arg->fence_class,
628 arg->type, arg->flags, &fence);
631 ret = drm_fence_add_user_object(file_priv, fence,
633 DRM_FENCE_FLAG_SHAREABLE);
635 drm_fence_usage_deref_unlocked(&fence);
640 * usage > 0. No need to lock dev->struct_mutex;
643 arg->handle = fence->base.hash.key;
645 drm_fence_fill_arg(fence, arg);
646 drm_fence_usage_deref_unlocked(&fence);
651 int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
654 struct drm_fence_manager *fm = &dev->fm;
655 struct drm_fence_arg *arg = data;
656 struct drm_fence_object *fence;
657 struct drm_user_object *uo;
660 if (!fm->initialized) {
661 DRM_ERROR("The DRM driver does not support fencing.\n");
665 ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
668 fence = drm_lookup_fence_object(file_priv, arg->handle);
669 drm_fence_fill_arg(fence, arg);
670 drm_fence_usage_deref_unlocked(&fence);
676 int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
679 struct drm_fence_manager *fm = &dev->fm;
680 struct drm_fence_arg *arg = data;
683 if (!fm->initialized) {
684 DRM_ERROR("The DRM driver does not support fencing.\n");
688 return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
691 int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
694 struct drm_fence_manager *fm = &dev->fm;
695 struct drm_fence_arg *arg = data;
696 struct drm_fence_object *fence;
699 if (!fm->initialized) {
700 DRM_ERROR("The DRM driver does not support fencing.\n");
704 fence = drm_lookup_fence_object(file_priv, arg->handle);
708 drm_fence_fill_arg(fence, arg);
709 drm_fence_usage_deref_unlocked(&fence);
714 int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
717 struct drm_fence_manager *fm = &dev->fm;
718 struct drm_fence_arg *arg = data;
719 struct drm_fence_object *fence;
722 if (!fm->initialized) {
723 DRM_ERROR("The DRM driver does not support fencing.\n");
727 fence = drm_lookup_fence_object(file_priv, arg->handle);
730 ret = drm_fence_object_flush(fence, arg->type);
732 drm_fence_fill_arg(fence, arg);
733 drm_fence_usage_deref_unlocked(&fence);
739 int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
742 struct drm_fence_manager *fm = &dev->fm;
743 struct drm_fence_arg *arg = data;
744 struct drm_fence_object *fence;
747 if (!fm->initialized) {
748 DRM_ERROR("The DRM driver does not support fencing.\n");
752 fence = drm_lookup_fence_object(file_priv, arg->handle);
755 ret = drm_fence_object_wait(fence,
756 arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
759 drm_fence_fill_arg(fence, arg);
760 drm_fence_usage_deref_unlocked(&fence);
766 int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
769 struct drm_fence_manager *fm = &dev->fm;
770 struct drm_fence_arg *arg = data;
771 struct drm_fence_object *fence;
774 if (!fm->initialized) {
775 DRM_ERROR("The DRM driver does not support fencing.\n");
779 LOCK_TEST_WITH_RETURN(dev, file_priv);
780 fence = drm_lookup_fence_object(file_priv, arg->handle);
783 ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
786 drm_fence_fill_arg(fence, arg);
787 drm_fence_usage_deref_unlocked(&fence);
792 int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
795 struct drm_fence_manager *fm = &dev->fm;
796 struct drm_fence_arg *arg = data;
797 struct drm_fence_object *fence;
800 if (!fm->initialized) {
801 DRM_ERROR("The DRM driver does not support fencing.\n");
805 if (!dev->bm.initialized) {
806 DRM_ERROR("Buffer object manager is not initialized\n");
809 LOCK_TEST_WITH_RETURN(dev, file_priv);
810 ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
815 if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
816 ret = drm_fence_add_user_object(file_priv, fence,
818 DRM_FENCE_FLAG_SHAREABLE);
823 arg->handle = fence->base.hash.key;
825 drm_fence_fill_arg(fence, arg);
826 drm_fence_usage_deref_unlocked(&fence);