1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Typically called by the IRQ handler.
37 void drm_fence_handler(drm_device_t * dev, uint32_t class,
38 uint32_t sequence, uint32_t type)
43 drm_fence_manager_t *fm = &dev->fm;
44 drm_fence_class_manager_t *fc = &fm->class[class];
45 drm_fence_driver_t *driver = dev->driver->fence_driver;
46 struct list_head *head;
47 drm_fence_object_t *fence, *next;
49 int is_exe = (type & DRM_FENCE_TYPE_EXE);
52 diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
54 if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
55 fc->pending_exe_flush = 0;
57 diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
58 ge_last_exe = diff < driver->wrap_diff;
61 fc->pending_flush &= ~type;
63 if (is_exe && ge_last_exe) {
64 fc->last_exe_flush = sequence;
67 if (list_empty(&fc->ring))
70 list_for_each_entry(fence, &fc->ring, ring) {
71 diff = (sequence - fence->sequence) & driver->sequence_mask;
72 if (diff > driver->wrap_diff) {
78 head = (found) ? &fence->ring : &fc->ring;
80 list_for_each_entry_safe_reverse(fence, next, head, ring) {
81 if (&fence->ring == &fc->ring)
84 type |= fence->native_type;
85 relevant = type & fence->type;
87 if ((fence->signaled | relevant) != fence->signaled) {
88 fence->signaled |= relevant;
89 DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
90 fence->base.hash.key, fence->signaled);
91 fence->submitted_flush |= relevant;
95 relevant = fence->flush_mask &
96 ~(fence->signaled | fence->submitted_flush);
99 fc->pending_flush |= relevant;
100 fence->submitted_flush = fence->flush_mask;
103 if (!(fence->type & ~fence->signaled)) {
104 DRM_DEBUG("Fence completely signaled 0x%08lx\n",
105 fence->base.hash.key);
106 list_del_init(&fence->ring);
111 DRM_WAKEUP(&fc->fence_queue);
115 EXPORT_SYMBOL(drm_fence_handler);
117 static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
119 drm_fence_manager_t *fm = &dev->fm;
122 write_lock_irqsave(&fm->lock, flags);
124 write_unlock_irqrestore(&fm->lock, flags);
127 void drm_fence_usage_deref_locked(drm_device_t * dev,
128 drm_fence_object_t * fence)
130 drm_fence_manager_t *fm = &dev->fm;
132 if (atomic_dec_and_test(&fence->usage)) {
133 drm_fence_unring(dev, &fence->ring);
134 DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
135 fence->base.hash.key);
136 atomic_dec(&fm->count);
137 drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
141 void drm_fence_usage_deref_unlocked(drm_device_t * dev,
142 drm_fence_object_t * fence)
144 drm_fence_manager_t *fm = &dev->fm;
146 if (atomic_dec_and_test(&fence->usage)) {
147 mutex_lock(&dev->struct_mutex);
148 if (atomic_read(&fence->usage) == 0) {
149 drm_fence_unring(dev, &fence->ring);
150 atomic_dec(&fm->count);
151 drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
153 mutex_unlock(&dev->struct_mutex);
157 static void drm_fence_object_destroy(drm_file_t * priv,
158 drm_user_object_t * base)
160 drm_device_t *dev = priv->head->dev;
161 drm_fence_object_t *fence =
162 drm_user_object_entry(base, drm_fence_object_t, base);
164 drm_fence_usage_deref_locked(dev, fence);
167 static int fence_signaled(drm_device_t * dev,
168 drm_fence_object_t * fence,
169 uint32_t mask, int poke_flush)
173 drm_fence_manager_t *fm = &dev->fm;
174 drm_fence_driver_t *driver = dev->driver->fence_driver;
177 driver->poke_flush(dev, fence->class);
178 read_lock_irqsave(&fm->lock, flags);
180 (fence->type & mask & fence->signaled) == (fence->type & mask);
181 read_unlock_irqrestore(&fm->lock, flags);
186 static void drm_fence_flush_exe(drm_fence_class_manager_t * fc,
187 drm_fence_driver_t * driver, uint32_t sequence)
191 if (!fc->pending_exe_flush) {
192 fc->exe_flush_sequence = sequence;
193 fc->pending_exe_flush = 1;
196 (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
197 if (diff < driver->wrap_diff) {
198 fc->exe_flush_sequence = sequence;
203 int drm_fence_object_signaled(drm_fence_object_t * fence,
206 return ((fence->signaled & type) == type);
209 int drm_fence_object_flush(drm_device_t * dev,
210 drm_fence_object_t * fence,
213 drm_fence_manager_t *fm = &dev->fm;
214 drm_fence_class_manager_t *fc = &fm->class[fence->class];
215 drm_fence_driver_t *driver = dev->driver->fence_driver;
218 if (type & ~fence->type) {
219 DRM_ERROR("Flush trying to extend fence type, "
220 "0x%x, 0x%x\n", type, fence->type);
224 write_lock_irqsave(&fm->lock, flags);
225 fence->flush_mask |= type;
226 if (fence->submitted_flush == fence->signaled) {
227 if ((fence->type & DRM_FENCE_TYPE_EXE) &&
228 !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
229 drm_fence_flush_exe(fc, driver, fence->sequence);
230 fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
232 fc->pending_flush |= (fence->flush_mask &
233 ~fence->submitted_flush);
234 fence->submitted_flush = fence->flush_mask;
237 write_unlock_irqrestore(&fm->lock, flags);
238 driver->poke_flush(dev, fence->class);
243 * Make sure old fence objects are signaled before their fence sequences are
244 * wrapped around and reused.
247 void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
249 drm_fence_manager_t *fm = &dev->fm;
250 drm_fence_class_manager_t *fc = &fm->class[class];
251 drm_fence_driver_t *driver = dev->driver->fence_driver;
252 uint32_t old_sequence;
254 drm_fence_object_t *fence;
257 write_lock_irqsave(&fm->lock, flags);
258 old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
259 diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask;
261 if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) {
262 fc->pending_exe_flush = 1;
263 fc->exe_flush_sequence = sequence - (driver->flush_diff / 2);
265 write_unlock_irqrestore(&fm->lock, flags);
267 mutex_lock(&dev->struct_mutex);
268 read_lock_irqsave(&fm->lock, flags);
270 if (list_empty(&fc->ring)) {
271 read_unlock_irqrestore(&fm->lock, flags);
272 mutex_unlock(&dev->struct_mutex);
275 fence = list_entry(fc->ring.next, drm_fence_object_t, ring);
276 atomic_inc(&fence->usage);
277 mutex_unlock(&dev->struct_mutex);
278 diff = (old_sequence - fence->sequence) & driver->sequence_mask;
279 read_unlock_irqrestore(&fm->lock, flags);
280 if (diff < driver->wrap_diff) {
281 drm_fence_object_flush(dev, fence, fence->type);
283 drm_fence_usage_deref_unlocked(dev, fence);
286 EXPORT_SYMBOL(drm_fence_flush_old);
288 static int drm_fence_lazy_wait(drm_device_t *dev,
289 drm_fence_object_t *fence,
293 drm_fence_manager_t *fm = &dev->fm;
294 drm_fence_class_manager_t *fc = &fm->class[fence->class];
296 unsigned long _end = jiffies + 3*DRM_HZ;
300 DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
301 (signaled = fence_signaled(dev, fence, mask, 1)));
304 if (time_after_eq(jiffies, _end))
306 } while (ret == -EINTR && ignore_signals);
307 if (fence_signaled(dev, fence, mask, 0))
309 if (time_after_eq(jiffies, _end))
313 DRM_ERROR("Fence timeout. "
314 "GPU lockup or fence driver was "
317 return ((ret == -EINTR) ? -EAGAIN : ret);
322 int drm_fence_object_wait(drm_device_t * dev,
323 drm_fence_object_t * fence,
324 int lazy, int ignore_signals, uint32_t mask)
326 drm_fence_driver_t *driver = dev->driver->fence_driver;
331 if (mask & ~fence->type) {
332 DRM_ERROR("Wait trying to extend fence type"
333 " 0x%08x 0x%08x\n", mask, fence->type);
337 if (fence_signaled(dev, fence, mask, 0))
340 _end = jiffies + 3 * DRM_HZ;
342 drm_fence_object_flush(dev, fence, mask);
344 if (lazy && driver->lazy_capable) {
346 ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask);
352 if (driver->has_irq(dev, fence->class,
353 DRM_FENCE_TYPE_EXE)) {
354 ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
360 if (driver->has_irq(dev, fence->class,
361 mask & ~DRM_FENCE_TYPE_EXE)) {
362 ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
368 if (drm_fence_object_signaled(fence, mask))
372 * Avoid kernel-space busy-waits.
380 signaled = fence_signaled(dev, fence, mask, 1);
381 } while (!signaled && !time_after_eq(jiffies, _end));
389 int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
390 uint32_t fence_flags, uint32_t class, uint32_t type)
392 drm_fence_manager_t *fm = &dev->fm;
393 drm_fence_driver_t *driver = dev->driver->fence_driver;
394 drm_fence_class_manager_t *fc = &fm->class[fence->class];
397 uint32_t native_type;
400 drm_fence_unring(dev, &fence->ring);
401 ret = driver->emit(dev, class, fence_flags, &sequence, &native_type);
405 write_lock_irqsave(&fm->lock, flags);
406 fence->class = class;
408 fence->flush_mask = 0x00;
409 fence->submitted_flush = 0x00;
410 fence->signaled = 0x00;
411 fence->sequence = sequence;
412 fence->native_type = native_type;
413 if (list_empty(&fc->ring))
414 fc->last_exe_flush = sequence - 1;
415 list_add_tail(&fence->ring, &fc->ring);
416 write_unlock_irqrestore(&fm->lock, flags);
420 static int drm_fence_object_init(drm_device_t * dev, uint32_t class,
422 uint32_t fence_flags,
423 drm_fence_object_t * fence)
427 drm_fence_manager_t *fm = &dev->fm;
429 mutex_lock(&dev->struct_mutex);
430 atomic_set(&fence->usage, 1);
431 mutex_unlock(&dev->struct_mutex);
433 write_lock_irqsave(&fm->lock, flags);
434 INIT_LIST_HEAD(&fence->ring);
435 fence->class = class;
437 fence->flush_mask = 0;
438 fence->submitted_flush = 0;
441 write_unlock_irqrestore(&fm->lock, flags);
442 if (fence_flags & DRM_FENCE_FLAG_EMIT) {
443 ret = drm_fence_object_emit(dev, fence, fence_flags,
449 int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
452 drm_device_t *dev = priv->head->dev;
455 mutex_lock(&dev->struct_mutex);
456 ret = drm_add_user_object(priv, &fence->base, shareable);
457 mutex_unlock(&dev->struct_mutex);
460 fence->base.type = drm_fence_type;
461 fence->base.remove = &drm_fence_object_destroy;
462 DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
466 EXPORT_SYMBOL(drm_fence_add_user_object);
468 int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type,
469 unsigned flags, drm_fence_object_t ** c_fence)
471 drm_fence_object_t *fence;
473 drm_fence_manager_t *fm = &dev->fm;
475 fence = drm_ctl_alloc(sizeof(*fence), DRM_MEM_FENCE);
478 ret = drm_fence_object_init(dev, class, type, flags, fence);
480 drm_fence_usage_deref_unlocked(dev, fence);
484 atomic_inc(&fm->count);
489 EXPORT_SYMBOL(drm_fence_object_create);
491 void drm_fence_manager_init(drm_device_t * dev)
493 drm_fence_manager_t *fm = &dev->fm;
494 drm_fence_class_manager_t *class;
495 drm_fence_driver_t *fed = dev->driver->fence_driver;
498 rwlock_init(&fm->lock);
499 write_lock(&fm->lock);
505 fm->num_classes = fed->num_classes;
506 BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
508 for (i=0; i<fm->num_classes; ++i) {
509 class = &fm->class[i];
511 INIT_LIST_HEAD(&class->ring);
512 class->pending_flush = 0;
513 DRM_INIT_WAITQUEUE(&class->fence_queue);
516 atomic_set(&fm->count, 0);
518 write_unlock(&fm->lock);
521 void drm_fence_manager_takedown(drm_device_t * dev)
525 drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
527 drm_device_t *dev = priv->head->dev;
528 drm_user_object_t *uo;
529 drm_fence_object_t *fence;
531 mutex_lock(&dev->struct_mutex);
532 uo = drm_lookup_user_object(priv, handle);
533 if (!uo || (uo->type != drm_fence_type)) {
534 mutex_unlock(&dev->struct_mutex);
537 fence = drm_user_object_entry(uo, drm_fence_object_t, base);
538 atomic_inc(&fence->usage);
539 mutex_unlock(&dev->struct_mutex);
543 int drm_fence_ioctl(DRM_IOCTL_ARGS)
547 drm_fence_manager_t *fm = &dev->fm;
549 drm_fence_object_t *fence;
550 drm_user_object_t *uo;
554 if (!fm->initialized) {
555 DRM_ERROR("The DRM driver does not support fencing.\n");
559 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
561 case drm_fence_create:
562 if (arg.flags & DRM_FENCE_FLAG_EMIT)
563 LOCK_TEST_WITH_RETURN(dev, filp);
564 ret = drm_fence_object_create(dev, arg.class,
565 arg.type, arg.flags, &fence);
568 ret = drm_fence_add_user_object(priv, fence,
570 DRM_FENCE_FLAG_SHAREABLE);
572 drm_fence_usage_deref_unlocked(dev, fence);
577 * usage > 0. No need to lock dev->struct_mutex;
580 atomic_inc(&fence->usage);
581 arg.handle = fence->base.hash.key;
583 case drm_fence_destroy:
584 mutex_lock(&dev->struct_mutex);
585 uo = drm_lookup_user_object(priv, arg.handle);
586 if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
587 mutex_unlock(&dev->struct_mutex);
590 ret = drm_remove_user_object(priv, uo);
591 mutex_unlock(&dev->struct_mutex);
593 case drm_fence_reference:
595 drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
598 fence = drm_lookup_fence_object(priv, arg.handle);
600 case drm_fence_unreference:
601 ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
603 case drm_fence_signaled:
604 fence = drm_lookup_fence_object(priv, arg.handle);
608 case drm_fence_flush:
609 fence = drm_lookup_fence_object(priv, arg.handle);
612 ret = drm_fence_object_flush(dev, fence, arg.type);
615 fence = drm_lookup_fence_object(priv, arg.handle);
619 drm_fence_object_wait(dev, fence,
620 arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
624 LOCK_TEST_WITH_RETURN(dev, filp);
625 fence = drm_lookup_fence_object(priv, arg.handle);
628 ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class,
631 case drm_fence_buffers:
632 if (!dev->bm.initialized) {
633 DRM_ERROR("Buffer object manager is not initialized\n");
636 LOCK_TEST_WITH_RETURN(dev, filp);
637 ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
641 ret = drm_fence_add_user_object(priv, fence,
643 DRM_FENCE_FLAG_SHAREABLE);
646 atomic_inc(&fence->usage);
647 arg.handle = fence->base.hash.key;
652 read_lock_irqsave(&fm->lock, flags);
653 arg.class = fence->class;
654 arg.type = fence->type;
655 arg.signaled = fence->signaled;
656 read_unlock_irqrestore(&fm->lock, flags);
657 drm_fence_usage_deref_unlocked(dev, fence);
659 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));