1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
35 * Convenience function to be called by fence::wait methods that
39 int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
40 int interruptible, uint32_t mask,
41 unsigned long end_jiffies)
43 struct drm_device *dev = fence->dev;
44 struct drm_fence_manager *fm = &dev->fm;
45 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
49 DECLARE_WAITQUEUE(entry, current);
50 add_wait_queue(&fc->fence_queue, &entry);
55 __set_current_state((interruptible) ?
57 TASK_UNINTERRUPTIBLE);
58 if (drm_fence_object_signaled(fence, mask))
60 if (time_after_eq(jiffies, end_jiffies)) {
66 else if ((++count & 0x0F) == 0){
67 __set_current_state(TASK_RUNNING);
69 __set_current_state((interruptible) ?
71 TASK_UNINTERRUPTIBLE);
73 if (interruptible && signal_pending(current)) {
78 __set_current_state(TASK_RUNNING);
79 remove_wait_queue(&fc->fence_queue, &entry);
82 EXPORT_SYMBOL(drm_fence_wait_polling);
85 * Typically called by the IRQ handler.
88 void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
89 uint32_t sequence, uint32_t type, uint32_t error)
93 uint32_t relevant_type;
95 struct drm_fence_manager *fm = &dev->fm;
96 struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
97 struct drm_fence_driver *driver = dev->driver->fence_driver;
98 struct list_head *head;
99 struct drm_fence_object *fence, *next;
102 if (list_empty(&fc->ring))
105 list_for_each_entry(fence, &fc->ring, ring) {
106 diff = (sequence - fence->sequence) & driver->sequence_mask;
107 if (diff > driver->wrap_diff) {
113 fc->waiting_types &= ~type;
114 head = (found) ? &fence->ring : &fc->ring;
116 list_for_each_entry_safe_reverse(fence, next, head, ring) {
117 if (&fence->ring == &fc->ring)
121 fence->error = error;
122 fence->signaled_types = fence->type;
123 list_del_init(&fence->ring);
128 if (type & DRM_FENCE_TYPE_EXE)
129 type |= fence->native_types;
131 relevant_type = type & fence->type;
132 new_type = (fence->signaled_types | relevant_type) ^
133 fence->signaled_types;
136 fence->signaled_types |= new_type;
137 DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
138 fence->base.hash.key, fence->signaled_types);
140 if (driver->needed_flush)
141 fc->pending_flush |= driver->needed_flush(fence);
143 if (new_type & fence->waiting_types)
147 fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
149 if (!(fence->type & ~fence->signaled_types)) {
150 DRM_DEBUG("Fence completely signaled 0x%08lx\n",
151 fence->base.hash.key);
152 list_del_init(&fence->ring);
157 * Reinstate lost waiting types.
160 if ((fc->waiting_types & type) != type) {
162 list_for_each_entry(fence, head, ring) {
163 if (&fence->ring == &fc->ring)
165 diff = (fc->highest_waiting_sequence - fence->sequence) &
166 driver->sequence_mask;
167 if (diff > driver->wrap_diff)
170 fc->waiting_types |= fence->waiting_types & ~fence->signaled_types;
175 wake_up_all(&fc->fence_queue);
177 EXPORT_SYMBOL(drm_fence_handler);
179 static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
181 struct drm_fence_manager *fm = &dev->fm;
184 write_lock_irqsave(&fm->lock, flags);
186 write_unlock_irqrestore(&fm->lock, flags);
189 void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
191 struct drm_fence_object *tmp_fence = *fence;
192 struct drm_device *dev = tmp_fence->dev;
193 struct drm_fence_manager *fm = &dev->fm;
195 DRM_ASSERT_LOCKED(&dev->struct_mutex);
197 if (atomic_dec_and_test(&tmp_fence->usage)) {
198 drm_fence_unring(dev, &tmp_fence->ring);
199 DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
200 tmp_fence->base.hash.key);
201 atomic_dec(&fm->count);
202 BUG_ON(!list_empty(&tmp_fence->base.list));
203 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
206 EXPORT_SYMBOL(drm_fence_usage_deref_locked);
208 void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
210 struct drm_fence_object *tmp_fence = *fence;
211 struct drm_device *dev = tmp_fence->dev;
212 struct drm_fence_manager *fm = &dev->fm;
215 if (atomic_dec_and_test(&tmp_fence->usage)) {
216 mutex_lock(&dev->struct_mutex);
217 if (atomic_read(&tmp_fence->usage) == 0) {
218 drm_fence_unring(dev, &tmp_fence->ring);
219 atomic_dec(&fm->count);
220 BUG_ON(!list_empty(&tmp_fence->base.list));
221 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
223 mutex_unlock(&dev->struct_mutex);
226 EXPORT_SYMBOL(drm_fence_usage_deref_unlocked);
228 struct drm_fence_object
229 *drm_fence_reference_locked(struct drm_fence_object *src)
231 DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
233 atomic_inc(&src->usage);
237 void drm_fence_reference_unlocked(struct drm_fence_object **dst,
238 struct drm_fence_object *src)
240 mutex_lock(&src->dev->struct_mutex);
242 atomic_inc(&src->usage);
243 mutex_unlock(&src->dev->struct_mutex);
245 EXPORT_SYMBOL(drm_fence_reference_unlocked);
247 static void drm_fence_object_destroy(struct drm_file *priv,
248 struct drm_user_object *base)
250 struct drm_fence_object *fence =
251 drm_user_object_entry(base, struct drm_fence_object, base);
253 drm_fence_usage_deref_locked(&fence);
256 int drm_fence_object_signaled(struct drm_fence_object *fence, uint32_t mask)
260 struct drm_device *dev = fence->dev;
261 struct drm_fence_manager *fm = &dev->fm;
262 struct drm_fence_driver *driver = dev->driver->fence_driver;
265 read_lock_irqsave(&fm->lock, flags);
266 signaled = (mask & fence->signaled_types) == mask;
267 read_unlock_irqrestore(&fm->lock, flags);
268 if (!signaled && driver->poll) {
269 write_lock_irqsave(&fm->lock, flags);
270 driver->poll(dev, fence->fence_class, mask);
271 signaled = (mask & fence->signaled_types) == mask;
272 write_unlock_irqrestore(&fm->lock, flags);
276 EXPORT_SYMBOL(drm_fence_object_signaled);
279 int drm_fence_object_flush(struct drm_fence_object *fence,
282 struct drm_device *dev = fence->dev;
283 struct drm_fence_manager *fm = &dev->fm;
284 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
285 struct drm_fence_driver *driver = dev->driver->fence_driver;
286 unsigned long irq_flags;
287 uint32_t saved_pending_flush;
291 if (type & ~fence->type) {
292 DRM_ERROR("Flush trying to extend fence type, "
293 "0x%x, 0x%x\n", type, fence->type);
297 write_lock_irqsave(&fm->lock, irq_flags);
298 fence->waiting_types |= type;
299 fc->waiting_types |= fence->waiting_types;
300 diff = (fence->sequence - fc->highest_waiting_sequence) &
301 driver->sequence_mask;
303 if (diff < driver->wrap_diff)
304 fc->highest_waiting_sequence = fence->sequence;
307 * fence->waiting_types has changed. Determine whether
308 * we need to initiate some kind of flush as a result of this.
311 saved_pending_flush = fc->pending_flush;
312 if (driver->needed_flush)
313 fc->pending_flush |= driver->needed_flush(fence);
316 driver->poll(dev, fence->fence_class, fence->waiting_types);
318 call_flush = fc->pending_flush;
319 write_unlock_irqrestore(&fm->lock, irq_flags);
321 if (call_flush && driver->flush)
322 driver->flush(dev, fence->fence_class);
326 EXPORT_SYMBOL(drm_fence_object_flush);
329 * Make sure old fence objects are signaled before their fence sequences are
330 * wrapped around and reused.
333 void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
336 struct drm_fence_manager *fm = &dev->fm;
337 struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
338 struct drm_fence_object *fence;
339 unsigned long irq_flags;
340 struct drm_fence_driver *driver = dev->driver->fence_driver;
345 write_lock_irqsave(&fm->lock, irq_flags);
347 list_for_each_entry_reverse(fence, &fc->ring, ring) {
348 diff = (sequence - fence->sequence) & driver->sequence_mask;
349 if (diff <= driver->flush_diff)
352 fence->waiting_types = fence->type;
353 fc->waiting_types |= fence->type;
355 if (driver->needed_flush)
356 fc->pending_flush |= driver->needed_flush(fence);
360 driver->poll(dev, fence_class, fc->waiting_types);
362 call_flush = fc->pending_flush;
363 write_unlock_irqrestore(&fm->lock, irq_flags);
365 if (call_flush && driver->flush)
366 driver->flush(dev, fence->fence_class);
369 * FIXME: Shold we implement a wait here for really old fences?
373 EXPORT_SYMBOL(drm_fence_flush_old);
375 int drm_fence_object_wait(struct drm_fence_object *fence,
376 int lazy, int ignore_signals, uint32_t mask)
378 struct drm_device *dev = fence->dev;
379 struct drm_fence_driver *driver = dev->driver->fence_driver;
380 struct drm_fence_manager *fm = &dev->fm;
381 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
383 unsigned long _end = 3 * DRM_HZ;
385 if (mask & ~fence->type) {
386 DRM_ERROR("Wait trying to extend fence type"
387 " 0x%08x 0x%08x\n", mask, fence->type);
393 return driver->wait(fence, lazy, !ignore_signals, mask);
396 drm_fence_object_flush(fence, mask);
397 if (driver->has_irq(dev, fence->fence_class, mask)) {
399 ret = wait_event_interruptible_timeout
401 drm_fence_object_signaled(fence, mask),
404 ret = wait_event_timeout
406 drm_fence_object_signaled(fence, mask),
409 if (unlikely(ret == -ERESTARTSYS))
412 if (unlikely(ret == 0))
418 return drm_fence_wait_polling(fence, lazy, !ignore_signals, mask,
421 EXPORT_SYMBOL(drm_fence_object_wait);
425 int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
426 uint32_t fence_class, uint32_t type)
428 struct drm_device *dev = fence->dev;
429 struct drm_fence_manager *fm = &dev->fm;
430 struct drm_fence_driver *driver = dev->driver->fence_driver;
431 struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class];
434 uint32_t native_types;
437 drm_fence_unring(dev, &fence->ring);
438 ret = driver->emit(dev, fence_class, fence_flags, &sequence,
443 write_lock_irqsave(&fm->lock, flags);
444 fence->fence_class = fence_class;
446 fence->waiting_types = 0;
447 fence->signaled_types = 0;
448 fence->sequence = sequence;
449 fence->native_types = native_types;
450 if (list_empty(&fc->ring))
451 fc->highest_waiting_sequence = sequence - 1;
452 list_add_tail(&fence->ring, &fc->ring);
453 fc->latest_queued_sequence = sequence;
454 write_unlock_irqrestore(&fm->lock, flags);
457 EXPORT_SYMBOL(drm_fence_object_emit);
459 static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
461 uint32_t fence_flags,
462 struct drm_fence_object *fence)
466 struct drm_fence_manager *fm = &dev->fm;
468 mutex_lock(&dev->struct_mutex);
469 atomic_set(&fence->usage, 1);
470 mutex_unlock(&dev->struct_mutex);
472 write_lock_irqsave(&fm->lock, flags);
473 INIT_LIST_HEAD(&fence->ring);
476 * Avoid hitting BUG() for kernel-only fence objects.
479 INIT_LIST_HEAD(&fence->base.list);
480 fence->fence_class = fence_class;
482 fence->signaled_types = 0;
483 fence->waiting_types = 0;
486 write_unlock_irqrestore(&fm->lock, flags);
487 if (fence_flags & DRM_FENCE_FLAG_EMIT) {
488 ret = drm_fence_object_emit(fence, fence_flags,
489 fence->fence_class, type);
494 int drm_fence_add_user_object(struct drm_file *priv,
495 struct drm_fence_object *fence, int shareable)
497 struct drm_device *dev = priv->minor->dev;
500 mutex_lock(&dev->struct_mutex);
501 ret = drm_add_user_object(priv, &fence->base, shareable);
504 atomic_inc(&fence->usage);
505 fence->base.type = drm_fence_type;
506 fence->base.remove = &drm_fence_object_destroy;
507 DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
509 mutex_unlock(&dev->struct_mutex);
512 EXPORT_SYMBOL(drm_fence_add_user_object);
514 int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
515 uint32_t type, unsigned flags,
516 struct drm_fence_object **c_fence)
518 struct drm_fence_object *fence;
520 struct drm_fence_manager *fm = &dev->fm;
522 fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
525 ret = drm_fence_object_init(dev, fence_class, type, flags, fence);
527 drm_fence_usage_deref_unlocked(&fence);
531 atomic_inc(&fm->count);
535 EXPORT_SYMBOL(drm_fence_object_create);
537 void drm_fence_manager_init(struct drm_device *dev)
539 struct drm_fence_manager *fm = &dev->fm;
540 struct drm_fence_class_manager *fence_class;
541 struct drm_fence_driver *fed = dev->driver->fence_driver;
545 rwlock_init(&fm->lock);
546 write_lock_irqsave(&fm->lock, flags);
552 fm->num_classes = fed->num_classes;
553 BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
555 for (i = 0; i < fm->num_classes; ++i) {
556 fence_class = &fm->fence_class[i];
558 memset(fence_class, 0, sizeof(*fence_class));
559 INIT_LIST_HEAD(&fence_class->ring);
560 DRM_INIT_WAITQUEUE(&fence_class->fence_queue);
563 atomic_set(&fm->count, 0);
565 write_unlock_irqrestore(&fm->lock, flags);
568 void drm_fence_fill_arg(struct drm_fence_object *fence,
569 struct drm_fence_arg *arg)
571 struct drm_device *dev = fence->dev;
572 struct drm_fence_manager *fm = &dev->fm;
573 unsigned long irq_flags;
575 read_lock_irqsave(&fm->lock, irq_flags);
576 arg->handle = fence->base.hash.key;
577 arg->fence_class = fence->fence_class;
578 arg->type = fence->type;
579 arg->signaled = fence->signaled_types;
580 arg->error = fence->error;
581 arg->sequence = fence->sequence;
582 read_unlock_irqrestore(&fm->lock, irq_flags);
584 EXPORT_SYMBOL(drm_fence_fill_arg);
586 void drm_fence_manager_takedown(struct drm_device *dev)
590 struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
593 struct drm_device *dev = priv->minor->dev;
594 struct drm_user_object *uo;
595 struct drm_fence_object *fence;
597 mutex_lock(&dev->struct_mutex);
598 uo = drm_lookup_user_object(priv, handle);
599 if (!uo || (uo->type != drm_fence_type)) {
600 mutex_unlock(&dev->struct_mutex);
603 fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
604 mutex_unlock(&dev->struct_mutex);
608 int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
611 struct drm_fence_manager *fm = &dev->fm;
612 struct drm_fence_arg *arg = data;
613 struct drm_fence_object *fence;
616 if (!fm->initialized) {
617 DRM_ERROR("The DRM driver does not support fencing.\n");
621 if (arg->flags & DRM_FENCE_FLAG_EMIT)
622 LOCK_TEST_WITH_RETURN(dev, file_priv);
623 ret = drm_fence_object_create(dev, arg->fence_class,
624 arg->type, arg->flags, &fence);
627 ret = drm_fence_add_user_object(file_priv, fence,
629 DRM_FENCE_FLAG_SHAREABLE);
631 drm_fence_usage_deref_unlocked(&fence);
636 * usage > 0. No need to lock dev->struct_mutex;
639 arg->handle = fence->base.hash.key;
641 drm_fence_fill_arg(fence, arg);
642 drm_fence_usage_deref_unlocked(&fence);
647 int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
650 struct drm_fence_manager *fm = &dev->fm;
651 struct drm_fence_arg *arg = data;
652 struct drm_fence_object *fence;
653 struct drm_user_object *uo;
656 if (!fm->initialized) {
657 DRM_ERROR("The DRM driver does not support fencing.\n");
661 ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
664 fence = drm_lookup_fence_object(file_priv, arg->handle);
665 drm_fence_fill_arg(fence, arg);
666 drm_fence_usage_deref_unlocked(&fence);
672 int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
675 struct drm_fence_manager *fm = &dev->fm;
676 struct drm_fence_arg *arg = data;
679 if (!fm->initialized) {
680 DRM_ERROR("The DRM driver does not support fencing.\n");
684 return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
687 int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
690 struct drm_fence_manager *fm = &dev->fm;
691 struct drm_fence_arg *arg = data;
692 struct drm_fence_object *fence;
695 if (!fm->initialized) {
696 DRM_ERROR("The DRM driver does not support fencing.\n");
700 fence = drm_lookup_fence_object(file_priv, arg->handle);
704 drm_fence_fill_arg(fence, arg);
705 drm_fence_usage_deref_unlocked(&fence);
710 int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
713 struct drm_fence_manager *fm = &dev->fm;
714 struct drm_fence_arg *arg = data;
715 struct drm_fence_object *fence;
718 if (!fm->initialized) {
719 DRM_ERROR("The DRM driver does not support fencing.\n");
723 fence = drm_lookup_fence_object(file_priv, arg->handle);
726 ret = drm_fence_object_flush(fence, arg->type);
728 drm_fence_fill_arg(fence, arg);
729 drm_fence_usage_deref_unlocked(&fence);
735 int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
738 struct drm_fence_manager *fm = &dev->fm;
739 struct drm_fence_arg *arg = data;
740 struct drm_fence_object *fence;
743 if (!fm->initialized) {
744 DRM_ERROR("The DRM driver does not support fencing.\n");
748 fence = drm_lookup_fence_object(file_priv, arg->handle);
751 ret = drm_fence_object_wait(fence,
752 arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
755 drm_fence_fill_arg(fence, arg);
756 drm_fence_usage_deref_unlocked(&fence);
762 int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
765 struct drm_fence_manager *fm = &dev->fm;
766 struct drm_fence_arg *arg = data;
767 struct drm_fence_object *fence;
770 if (!fm->initialized) {
771 DRM_ERROR("The DRM driver does not support fencing.\n");
775 LOCK_TEST_WITH_RETURN(dev, file_priv);
776 fence = drm_lookup_fence_object(file_priv, arg->handle);
779 ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class,
782 drm_fence_fill_arg(fence, arg);
783 drm_fence_usage_deref_unlocked(&fence);
788 int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
791 struct drm_fence_manager *fm = &dev->fm;
792 struct drm_fence_arg *arg = data;
793 struct drm_fence_object *fence;
796 if (!fm->initialized) {
797 DRM_ERROR("The DRM driver does not support fencing.\n");
801 if (!dev->bm.initialized) {
802 DRM_ERROR("Buffer object manager is not initialized\n");
805 LOCK_TEST_WITH_RETURN(dev, file_priv);
806 ret = drm_fence_buffer_objects(dev, NULL, arg->flags,
811 if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) {
812 ret = drm_fence_add_user_object(file_priv, fence,
814 DRM_FENCE_FLAG_SHAREABLE);
819 arg->handle = fence->base.hash.key;
821 drm_fence_fill_arg(fence, arg);
822 drm_fence_usage_deref_unlocked(&fence);