1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Typically called by the IRQ handler.
37 void drm_fence_handler(drm_device_t * dev, uint32_t class,
38 uint32_t sequence, uint32_t type)
43 drm_fence_manager_t *fm = &dev->fm;
44 drm_fence_class_manager_t *fc = &fm->class[class];
45 drm_fence_driver_t *driver = dev->driver->fence_driver;
46 struct list_head *list, *prev;
47 drm_fence_object_t *fence;
50 if (list_empty(&fc->ring))
53 list_for_each_entry(fence, &fc->ring, ring) {
54 diff = (sequence - fence->sequence) & driver->sequence_mask;
55 if (diff > driver->wrap_diff) {
61 list = (found) ? fence->ring.prev : fc->ring.prev;
64 for (; list != &fc->ring; list = prev, prev = list->prev) {
65 fence = list_entry(list, drm_fence_object_t, ring);
67 type |= fence->native_type;
68 relevant = type & fence->type;
70 if ((fence->signaled | relevant) != fence->signaled) {
71 fence->signaled |= relevant;
72 DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
73 fence->base.hash.key, fence->signaled);
74 fence->submitted_flush |= relevant;
78 relevant = fence->flush_mask &
79 ~(fence->signaled | fence->submitted_flush);
82 fc->pending_flush |= relevant;
83 fence->submitted_flush = fence->flush_mask;
86 if (!(fence->type & ~fence->signaled)) {
87 DRM_DEBUG("Fence completely signaled 0x%08lx\n",
88 fence->base.hash.key);
89 list_del_init(&fence->ring);
95 DRM_WAKEUP(&fc->fence_queue);
99 EXPORT_SYMBOL(drm_fence_handler);
101 static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
103 drm_fence_manager_t *fm = &dev->fm;
106 write_lock_irqsave(&fm->lock, flags);
108 write_unlock_irqrestore(&fm->lock, flags);
111 void drm_fence_usage_deref_locked(drm_device_t * dev,
112 drm_fence_object_t * fence)
114 drm_fence_manager_t *fm = &dev->fm;
116 if (atomic_dec_and_test(&fence->usage)) {
117 drm_fence_unring(dev, &fence->ring);
118 DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
119 fence->base.hash.key);
120 atomic_dec(&fm->count);
121 drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
125 void drm_fence_usage_deref_unlocked(drm_device_t * dev,
126 drm_fence_object_t * fence)
128 drm_fence_manager_t *fm = &dev->fm;
130 if (atomic_dec_and_test(&fence->usage)) {
131 mutex_lock(&dev->struct_mutex);
132 if (atomic_read(&fence->usage) == 0) {
133 drm_fence_unring(dev, &fence->ring);
134 atomic_dec(&fm->count);
135 drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
137 mutex_unlock(&dev->struct_mutex);
141 static void drm_fence_object_destroy(drm_file_t * priv,
142 drm_user_object_t * base)
144 drm_device_t *dev = priv->head->dev;
145 drm_fence_object_t *fence =
146 drm_user_object_entry(base, drm_fence_object_t, base);
148 drm_fence_usage_deref_locked(dev, fence);
151 static int fence_signaled(drm_device_t * dev,
152 drm_fence_object_t * fence,
153 uint32_t mask, int poke_flush)
157 drm_fence_manager_t *fm = &dev->fm;
158 drm_fence_driver_t *driver = dev->driver->fence_driver;
161 driver->poke_flush(dev, fence->class);
162 read_lock_irqsave(&fm->lock, flags);
164 (fence->type & mask & fence->signaled) == (fence->type & mask);
165 read_unlock_irqrestore(&fm->lock, flags);
170 static void drm_fence_flush_exe(drm_fence_class_manager_t * fc,
171 drm_fence_driver_t * driver, uint32_t sequence)
175 if (!fc->pending_exe_flush) {
176 struct list_head *list;
179 * Last_exe_flush is invalid. Find oldest sequence.
183 if (list->next == &fc->ring) {
186 drm_fence_object_t *fence =
187 list_entry(list->next, drm_fence_object_t, ring);
188 fc->last_exe_flush = (fence->sequence - 1) &
189 driver->sequence_mask;
191 diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
192 if (diff >= driver->wrap_diff)
194 fc->exe_flush_sequence = sequence;
195 fc->pending_exe_flush = 1;
198 (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
199 if (diff < driver->wrap_diff) {
200 fc->exe_flush_sequence = sequence;
205 int drm_fence_object_signaled(drm_fence_object_t * fence,
208 return ((fence->signaled & type) == type);
211 int drm_fence_object_flush(drm_device_t * dev,
212 drm_fence_object_t * fence,
215 drm_fence_manager_t *fm = &dev->fm;
216 drm_fence_class_manager_t *fc = &fm->class[fence->class];
217 drm_fence_driver_t *driver = dev->driver->fence_driver;
220 if (type & ~fence->type) {
221 DRM_ERROR("Flush trying to extend fence type, "
222 "0x%x, 0x%x\n", type, fence->type);
226 write_lock_irqsave(&fm->lock, flags);
227 fence->flush_mask |= type;
228 if (fence->submitted_flush == fence->signaled) {
229 if ((fence->type & DRM_FENCE_TYPE_EXE) &&
230 !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
231 drm_fence_flush_exe(fc, driver, fence->sequence);
232 fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
234 fc->pending_flush |= (fence->flush_mask &
235 ~fence->submitted_flush);
236 fence->submitted_flush = fence->flush_mask;
239 write_unlock_irqrestore(&fm->lock, flags);
240 driver->poke_flush(dev, fence->class);
245 * Make sure old fence objects are signaled before their fence sequences are
246 * wrapped around and reused.
249 void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
251 drm_fence_manager_t *fm = &dev->fm;
252 drm_fence_class_manager_t *fc = &fm->class[class];
253 drm_fence_driver_t *driver = dev->driver->fence_driver;
254 uint32_t old_sequence;
256 drm_fence_object_t *fence;
259 mutex_lock(&dev->struct_mutex);
260 read_lock_irqsave(&fm->lock, flags);
261 if (fc->ring.next == &fc->ring) {
262 read_unlock_irqrestore(&fm->lock, flags);
263 mutex_unlock(&dev->struct_mutex);
266 old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
267 fence = list_entry(fc->ring.next, drm_fence_object_t, ring);
268 atomic_inc(&fence->usage);
269 mutex_unlock(&dev->struct_mutex);
270 diff = (old_sequence - fence->sequence) & driver->sequence_mask;
271 read_unlock_irqrestore(&fm->lock, flags);
272 if (diff < driver->wrap_diff) {
273 drm_fence_object_flush(dev, fence, fence->type);
275 drm_fence_usage_deref_unlocked(dev, fence);
278 EXPORT_SYMBOL(drm_fence_flush_old);
280 static int drm_fence_lazy_wait(drm_device_t *dev,
281 drm_fence_object_t *fence,
282 int ignore_signals, uint32_t mask)
284 drm_fence_manager_t *fm = &dev->fm;
285 drm_fence_class_manager_t *fc = &fm->class[fence->class];
287 unsigned long _end = jiffies + 3*DRM_HZ;
291 DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
292 fence_signaled(dev, fence, mask, 1));
293 if (time_after_eq(jiffies, _end))
295 } while (ret == -EINTR && ignore_signals);
296 if (time_after_eq(jiffies, _end) && (ret != 0))
300 DRM_ERROR("Fence timeout. "
301 "GPU lockup or fence driver was "
304 return ((ret == -EINTR) ? -EAGAIN : ret);
309 int drm_fence_object_wait(drm_device_t * dev,
310 drm_fence_object_t * fence,
311 int lazy, int ignore_signals, uint32_t mask)
313 drm_fence_driver_t *driver = dev->driver->fence_driver;
318 if (mask & ~fence->type) {
319 DRM_ERROR("Wait trying to extend fence type"
320 " 0x%08x 0x%08x\n", mask, fence->type);
324 if (fence_signaled(dev, fence, mask, 0))
327 _end = jiffies + 3 * DRM_HZ;
329 drm_fence_object_flush(dev, fence, mask);
331 if (lazy && driver->lazy_capable) {
333 ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask);
339 if (driver->has_irq(dev, fence->class,
340 DRM_FENCE_TYPE_EXE)) {
341 ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
347 if (driver->has_irq(dev, fence->class,
348 mask & ~DRM_FENCE_TYPE_EXE)) {
349 ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
355 if (fence_signaled(dev, fence, mask, 0))
358 DRM_ERROR("Busy wait\n");
360 * Avoid kernel-space busy-waits.
368 signaled = fence_signaled(dev, fence, mask, 1);
369 } while (!signaled && !time_after_eq(jiffies, _end));
377 int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
378 uint32_t fence_flags, uint32_t class, uint32_t type)
380 drm_fence_manager_t *fm = &dev->fm;
381 drm_fence_driver_t *driver = dev->driver->fence_driver;
384 uint32_t native_type;
387 drm_fence_unring(dev, &fence->ring);
388 ret = driver->emit(dev, class, fence_flags, &sequence, &native_type);
392 write_lock_irqsave(&fm->lock, flags);
393 fence->class = class;
395 fence->flush_mask = 0x00;
396 fence->submitted_flush = 0x00;
397 fence->signaled = 0x00;
398 fence->sequence = sequence;
399 fence->native_type = native_type;
400 list_add_tail(&fence->ring, &fm->class[class].ring);
401 write_unlock_irqrestore(&fm->lock, flags);
405 static int drm_fence_object_init(drm_device_t * dev, uint32_t class,
407 uint32_t fence_flags,
408 drm_fence_object_t * fence)
412 drm_fence_manager_t *fm = &dev->fm;
414 mutex_lock(&dev->struct_mutex);
415 atomic_set(&fence->usage, 1);
416 mutex_unlock(&dev->struct_mutex);
418 write_lock_irqsave(&fm->lock, flags);
419 INIT_LIST_HEAD(&fence->ring);
420 fence->class = class;
422 fence->flush_mask = 0;
423 fence->submitted_flush = 0;
426 write_unlock_irqrestore(&fm->lock, flags);
427 if (fence_flags & DRM_FENCE_FLAG_EMIT) {
428 ret = drm_fence_object_emit(dev, fence, fence_flags,
434 int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
437 drm_device_t *dev = priv->head->dev;
440 mutex_lock(&dev->struct_mutex);
441 ret = drm_add_user_object(priv, &fence->base, shareable);
442 mutex_unlock(&dev->struct_mutex);
445 fence->base.type = drm_fence_type;
446 fence->base.remove = &drm_fence_object_destroy;
447 DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
451 EXPORT_SYMBOL(drm_fence_add_user_object);
453 int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type,
454 unsigned flags, drm_fence_object_t ** c_fence)
456 drm_fence_object_t *fence;
458 drm_fence_manager_t *fm = &dev->fm;
460 fence = drm_ctl_alloc(sizeof(*fence), DRM_MEM_FENCE);
463 ret = drm_fence_object_init(dev, class, type, flags, fence);
465 drm_fence_usage_deref_unlocked(dev, fence);
469 atomic_inc(&fm->count);
474 EXPORT_SYMBOL(drm_fence_object_create);
476 void drm_fence_manager_init(drm_device_t * dev)
478 drm_fence_manager_t *fm = &dev->fm;
479 drm_fence_class_manager_t *class;
480 drm_fence_driver_t *fed = dev->driver->fence_driver;
484 fm->lock = RW_LOCK_UNLOCKED;
485 write_lock(&fm->lock);
491 fm->num_classes = fed->num_classes;
492 BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
494 for (i=0; i<fm->num_classes; ++i) {
495 class = &fm->class[i];
497 INIT_LIST_HEAD(&class->ring);
498 class->pending_flush = 0;
499 DRM_INIT_WAITQUEUE(&class->fence_queue);
502 atomic_set(&fm->count, 0);
504 write_unlock(&fm->lock);
507 void drm_fence_manager_takedown(drm_device_t * dev)
511 drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
513 drm_device_t *dev = priv->head->dev;
514 drm_user_object_t *uo;
515 drm_fence_object_t *fence;
517 mutex_lock(&dev->struct_mutex);
518 uo = drm_lookup_user_object(priv, handle);
519 if (!uo || (uo->type != drm_fence_type)) {
520 mutex_unlock(&dev->struct_mutex);
523 fence = drm_user_object_entry(uo, drm_fence_object_t, base);
524 atomic_inc(&fence->usage);
525 mutex_unlock(&dev->struct_mutex);
529 int drm_fence_ioctl(DRM_IOCTL_ARGS)
533 drm_fence_manager_t *fm = &dev->fm;
535 drm_fence_object_t *fence;
536 drm_user_object_t *uo;
540 if (!fm->initialized) {
541 DRM_ERROR("The DRM driver does not support fencing.\n");
545 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
547 case drm_fence_create:
548 if (arg.flags & DRM_FENCE_FLAG_EMIT)
549 LOCK_TEST_WITH_RETURN(dev, filp);
550 ret = drm_fence_object_create(dev, arg.class,
551 arg.type, arg.flags, &fence);
554 ret = drm_fence_add_user_object(priv, fence,
556 DRM_FENCE_FLAG_SHAREABLE);
558 drm_fence_usage_deref_unlocked(dev, fence);
563 * usage > 0. No need to lock dev->struct_mutex;
566 atomic_inc(&fence->usage);
567 arg.handle = fence->base.hash.key;
569 case drm_fence_destroy:
570 mutex_lock(&dev->struct_mutex);
571 uo = drm_lookup_user_object(priv, arg.handle);
572 if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
573 mutex_unlock(&dev->struct_mutex);
576 ret = drm_remove_user_object(priv, uo);
577 mutex_unlock(&dev->struct_mutex);
579 case drm_fence_reference:
581 drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
584 fence = drm_lookup_fence_object(priv, arg.handle);
586 case drm_fence_unreference:
587 ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
589 case drm_fence_signaled:
590 fence = drm_lookup_fence_object(priv, arg.handle);
594 case drm_fence_flush:
595 fence = drm_lookup_fence_object(priv, arg.handle);
598 ret = drm_fence_object_flush(dev, fence, arg.type);
601 fence = drm_lookup_fence_object(priv, arg.handle);
605 drm_fence_object_wait(dev, fence,
606 arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
610 LOCK_TEST_WITH_RETURN(dev, filp);
611 fence = drm_lookup_fence_object(priv, arg.handle);
614 ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class,
617 case drm_fence_buffers:
618 if (!dev->bm.initialized) {
619 DRM_ERROR("Buffer object manager is not initialized\n");
622 LOCK_TEST_WITH_RETURN(dev, filp);
623 ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
627 ret = drm_fence_add_user_object(priv, fence,
629 DRM_FENCE_FLAG_SHAREABLE);
632 atomic_inc(&fence->usage);
633 arg.handle = fence->base.hash.key;
638 read_lock_irqsave(&fm->lock, flags);
639 arg.class = fence->class;
640 arg.type = fence->type;
641 arg.signaled = fence->signaled;
642 read_unlock_irqrestore(&fm->lock, flags);
643 drm_fence_usage_deref_unlocked(dev, fence);
645 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));