1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Typically called by the IRQ handler.
37 void drm_fence_handler(drm_device_t * dev, uint32_t class,
38 uint32_t sequence, uint32_t type)
43 drm_fence_manager_t *fm = &dev->fm;
44 drm_fence_driver_t *driver = dev->driver->fence_driver;
45 struct list_head *list, *prev;
46 drm_fence_object_t *fence;
49 if (list_empty(&fm->ring))
52 list_for_each_entry(fence, &fm->ring, ring) {
53 diff = (sequence - fence->sequence) & driver->sequence_mask;
54 if (diff > driver->wrap_diff) {
60 list = (found) ? fence->ring.prev : fm->ring.prev;
63 for (; list != &fm->ring; list = prev, prev = list->prev) {
64 fence = list_entry(list, drm_fence_object_t, ring);
66 type |= fence->native_type;
67 relevant = type & fence->type;
69 if ((fence->signaled | relevant) != fence->signaled) {
70 fence->signaled |= relevant;
71 DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
72 fence->base.hash.key, fence->signaled);
73 fence->submitted_flush |= relevant;
77 relevant = fence->flush_mask &
78 ~(fence->signaled | fence->submitted_flush);
81 fm->pending_flush |= relevant;
82 fence->submitted_flush = fence->flush_mask;
85 if (!(fence->type & ~fence->signaled)) {
86 DRM_DEBUG("Fence completely signaled 0x%08lx\n",
87 fence->base.hash.key);
88 list_del_init(&fence->ring);
94 DRM_WAKEUP(&fm->fence_queue);
98 EXPORT_SYMBOL(drm_fence_handler);
100 static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
102 drm_fence_manager_t *fm = &dev->fm;
105 write_lock_irqsave(&fm->lock, flags);
107 write_unlock_irqrestore(&fm->lock, flags);
110 void drm_fence_usage_deref_locked(drm_device_t * dev,
111 drm_fence_object_t * fence)
113 drm_fence_manager_t *fm = &dev->fm;
115 if (atomic_dec_and_test(&fence->usage)) {
116 drm_fence_unring(dev, &fence->ring);
117 DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
118 fence->base.hash.key);
119 atomic_dec(&fm->count);
120 drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
124 void drm_fence_usage_deref_unlocked(drm_device_t * dev,
125 drm_fence_object_t * fence)
127 drm_fence_manager_t *fm = &dev->fm;
129 if (atomic_dec_and_test(&fence->usage)) {
130 mutex_lock(&dev->struct_mutex);
131 if (atomic_read(&fence->usage) == 0) {
132 drm_fence_unring(dev, &fence->ring);
133 atomic_dec(&fm->count);
134 drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
136 mutex_unlock(&dev->struct_mutex);
140 static void drm_fence_object_destroy(drm_file_t * priv,
141 drm_user_object_t * base)
143 drm_device_t *dev = priv->head->dev;
144 drm_fence_object_t *fence =
145 drm_user_object_entry(base, drm_fence_object_t, base);
147 drm_fence_usage_deref_locked(dev, fence);
150 static int fence_signaled(drm_device_t * dev,
151 drm_fence_object_t * fence,
152 uint32_t mask, int poke_flush)
156 drm_fence_manager_t *fm = &dev->fm;
157 drm_fence_driver_t *driver = dev->driver->fence_driver;
160 driver->poke_flush(dev, fence->class);
161 read_lock_irqsave(&fm->lock, flags);
163 (fence->type & mask & fence->signaled) == (fence->type & mask);
164 read_unlock_irqrestore(&fm->lock, flags);
169 static void drm_fence_flush_exe(drm_fence_manager_t * fm,
170 drm_fence_driver_t * driver, uint32_t sequence)
174 if (!fm->pending_exe_flush) {
175 struct list_head *list;
178 * Last_exe_flush is invalid. Find oldest sequence.
182 if (list->next == &fm->ring) {
185 drm_fence_object_t *fence =
186 list_entry(list->next, drm_fence_object_t, ring);
187 fm->last_exe_flush = (fence->sequence - 1) &
188 driver->sequence_mask;
190 diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
191 if (diff >= driver->wrap_diff)
193 fm->exe_flush_sequence = sequence;
194 fm->pending_exe_flush = 1;
197 (sequence - fm->exe_flush_sequence) & driver->sequence_mask;
198 if (diff < driver->wrap_diff) {
199 fm->exe_flush_sequence = sequence;
204 int drm_fence_object_signaled(drm_fence_object_t * fence,
207 return ((fence->signaled & type) == type);
210 int drm_fence_object_flush(drm_device_t * dev,
211 drm_fence_object_t * fence,
214 drm_fence_manager_t *fm = &dev->fm;
215 drm_fence_driver_t *driver = dev->driver->fence_driver;
218 if (type & ~fence->type) {
219 DRM_ERROR("Flush trying to extend fence type, "
220 "0x%x, 0x%x\n", type, fence->type);
224 write_lock_irqsave(&fm->lock, flags);
225 fence->flush_mask |= type;
226 if (fence->submitted_flush == fence->signaled) {
227 if ((fence->type & DRM_FENCE_TYPE_EXE) &&
228 !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
229 drm_fence_flush_exe(fm, driver, fence->sequence);
230 fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
232 fm->pending_flush |= (fence->flush_mask &
233 ~fence->submitted_flush);
234 fence->submitted_flush = fence->flush_mask;
237 write_unlock_irqrestore(&fm->lock, flags);
238 driver->poke_flush(dev, fence->class);
243 * Make sure old fence objects are signaled before their fence sequences are
244 * wrapped around and reused.
247 void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
249 drm_fence_manager_t *fm = &dev->fm;
250 drm_fence_driver_t *driver = dev->driver->fence_driver;
251 uint32_t old_sequence;
253 drm_fence_object_t *fence;
256 mutex_lock(&dev->struct_mutex);
257 read_lock_irqsave(&fm->lock, flags);
258 if (fm->ring.next == &fm->ring) {
259 read_unlock_irqrestore(&fm->lock, flags);
260 mutex_unlock(&dev->struct_mutex);
263 old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
264 fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
265 atomic_inc(&fence->usage);
266 mutex_unlock(&dev->struct_mutex);
267 diff = (old_sequence - fence->sequence) & driver->sequence_mask;
268 read_unlock_irqrestore(&fm->lock, flags);
269 if (diff < driver->wrap_diff) {
270 drm_fence_object_flush(dev, fence, fence->type);
272 drm_fence_usage_deref_unlocked(dev, fence);
275 EXPORT_SYMBOL(drm_fence_flush_old);
277 static int drm_fence_lazy_wait(drm_device_t *dev,
278 drm_fence_object_t *fence,
279 int ignore_signals, uint32_t mask)
281 drm_fence_manager_t *fm = &dev->fm;
282 unsigned long _end = jiffies + 3*DRM_HZ;
286 DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
287 fence_signaled(dev, fence, mask, 1));
288 if (time_after_eq(jiffies, _end))
290 } while (ret == -EINTR && ignore_signals);
291 if (time_after_eq(jiffies, _end) && (ret != 0))
295 DRM_ERROR("Fence timeout. "
296 "GPU lockup or fence driver was "
299 return ((ret == -EINTR) ? -EAGAIN : ret);
304 int drm_fence_object_wait(drm_device_t * dev,
305 drm_fence_object_t * fence,
306 int lazy, int ignore_signals, uint32_t mask)
308 drm_fence_driver_t *driver = dev->driver->fence_driver;
313 if (mask & ~fence->type) {
314 DRM_ERROR("Wait trying to extend fence type"
315 " 0x%08x 0x%08x\n", mask, fence->type);
319 if (fence_signaled(dev, fence, mask, 0))
322 _end = jiffies + 3 * DRM_HZ;
324 drm_fence_object_flush(dev, fence, mask);
326 if (lazy && driver->lazy_capable) {
328 ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask);
334 if (driver->has_irq(dev, fence->class,
335 DRM_FENCE_TYPE_EXE)) {
336 ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
342 if (driver->has_irq(dev, fence->class,
343 mask & ~DRM_FENCE_TYPE_EXE)) {
344 ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
350 if (fence_signaled(dev, fence, mask, 0))
353 DRM_ERROR("Busy wait\n");
355 * Avoid kernel-space busy-waits.
363 signaled = fence_signaled(dev, fence, mask, 1);
364 } while (!signaled && !time_after_eq(jiffies, _end));
372 int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
373 uint32_t fence_flags, uint32_t type)
375 drm_fence_manager_t *fm = &dev->fm;
376 drm_fence_driver_t *driver = dev->driver->fence_driver;
379 uint32_t native_type;
382 drm_fence_unring(dev, &fence->ring);
383 ret = driver->emit(dev, fence->class, fence_flags, &sequence, &native_type);
387 write_lock_irqsave(&fm->lock, flags);
389 fence->flush_mask = 0x00;
390 fence->submitted_flush = 0x00;
391 fence->signaled = 0x00;
392 fence->sequence = sequence;
393 fence->native_type = native_type;
394 list_add_tail(&fence->ring, &fm->ring);
395 write_unlock_irqrestore(&fm->lock, flags);
399 static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
400 uint32_t fence_flags,
401 drm_fence_object_t * fence)
405 drm_fence_manager_t *fm = &dev->fm;
407 mutex_lock(&dev->struct_mutex);
408 atomic_set(&fence->usage, 1);
409 mutex_unlock(&dev->struct_mutex);
411 write_lock_irqsave(&fm->lock, flags);
412 INIT_LIST_HEAD(&fence->ring);
415 fence->flush_mask = 0;
416 fence->submitted_flush = 0;
419 write_unlock_irqrestore(&fm->lock, flags);
420 if (fence_flags & DRM_FENCE_FLAG_EMIT) {
421 ret = drm_fence_object_emit(dev, fence, fence_flags, type);
426 int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
429 drm_device_t *dev = priv->head->dev;
432 mutex_lock(&dev->struct_mutex);
433 ret = drm_add_user_object(priv, &fence->base, shareable);
434 mutex_unlock(&dev->struct_mutex);
437 fence->base.type = drm_fence_type;
438 fence->base.remove = &drm_fence_object_destroy;
439 DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
443 EXPORT_SYMBOL(drm_fence_add_user_object);
445 int drm_fence_object_create(drm_device_t * dev, uint32_t type,
446 unsigned flags, drm_fence_object_t ** c_fence)
448 drm_fence_object_t *fence;
450 drm_fence_manager_t *fm = &dev->fm;
452 fence = drm_ctl_alloc(sizeof(*fence), DRM_MEM_FENCE);
455 ret = drm_fence_object_init(dev, type, flags, fence);
457 drm_fence_usage_deref_unlocked(dev, fence);
461 atomic_inc(&fm->count);
466 EXPORT_SYMBOL(drm_fence_object_create);
468 void drm_fence_manager_init(drm_device_t * dev)
470 drm_fence_manager_t *fm = &dev->fm;
471 drm_fence_driver_t *fed = dev->driver->fence_driver;
474 fm->lock = RW_LOCK_UNLOCKED;
475 write_lock(&fm->lock);
476 INIT_LIST_HEAD(&fm->ring);
477 fm->pending_flush = 0;
478 DRM_INIT_WAITQUEUE(&fm->fence_queue);
482 atomic_set(&fm->count, 0);
483 for (i = 0; i < fed->no_types; ++i) {
484 fm->fence_types[i] = &fm->ring;
487 write_unlock(&fm->lock);
490 void drm_fence_manager_takedown(drm_device_t * dev)
494 drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
496 drm_device_t *dev = priv->head->dev;
497 drm_user_object_t *uo;
498 drm_fence_object_t *fence;
500 mutex_lock(&dev->struct_mutex);
501 uo = drm_lookup_user_object(priv, handle);
502 if (!uo || (uo->type != drm_fence_type)) {
503 mutex_unlock(&dev->struct_mutex);
506 fence = drm_user_object_entry(uo, drm_fence_object_t, base);
507 atomic_inc(&fence->usage);
508 mutex_unlock(&dev->struct_mutex);
512 int drm_fence_ioctl(DRM_IOCTL_ARGS)
516 drm_fence_manager_t *fm = &dev->fm;
518 drm_fence_object_t *fence;
519 drm_user_object_t *uo;
523 if (!fm->initialized) {
524 DRM_ERROR("The DRM driver does not support fencing.\n");
528 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
530 case drm_fence_create:
531 if (arg.flags & DRM_FENCE_FLAG_EMIT)
532 LOCK_TEST_WITH_RETURN(dev, filp);
533 ret = drm_fence_object_create(dev, arg.type, arg.flags, &fence);
536 ret = drm_fence_add_user_object(priv, fence,
538 DRM_FENCE_FLAG_SHAREABLE);
540 drm_fence_usage_deref_unlocked(dev, fence);
545 * usage > 0. No need to lock dev->struct_mutex;
548 atomic_inc(&fence->usage);
549 arg.handle = fence->base.hash.key;
551 case drm_fence_destroy:
552 mutex_lock(&dev->struct_mutex);
553 uo = drm_lookup_user_object(priv, arg.handle);
554 if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
555 mutex_unlock(&dev->struct_mutex);
558 ret = drm_remove_user_object(priv, uo);
559 mutex_unlock(&dev->struct_mutex);
561 case drm_fence_reference:
563 drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
566 fence = drm_lookup_fence_object(priv, arg.handle);
568 case drm_fence_unreference:
569 ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
571 case drm_fence_signaled:
572 fence = drm_lookup_fence_object(priv, arg.handle);
576 case drm_fence_flush:
577 fence = drm_lookup_fence_object(priv, arg.handle);
580 ret = drm_fence_object_flush(dev, fence, arg.type);
583 fence = drm_lookup_fence_object(priv, arg.handle);
587 drm_fence_object_wait(dev, fence,
588 arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
592 LOCK_TEST_WITH_RETURN(dev, filp);
593 fence = drm_lookup_fence_object(priv, arg.handle);
596 ret = drm_fence_object_emit(dev, fence, arg.flags, arg.type);
598 case drm_fence_buffers:
599 if (!dev->bm.initialized) {
600 DRM_ERROR("Buffer object manager is not initialized\n");
603 LOCK_TEST_WITH_RETURN(dev, filp);
604 ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
608 ret = drm_fence_add_user_object(priv, fence,
610 DRM_FENCE_FLAG_SHAREABLE);
613 atomic_inc(&fence->usage);
614 arg.handle = fence->base.hash.key;
619 read_lock_irqsave(&fm->lock, flags);
620 arg.class = fence->class;
621 arg.type = fence->type;
622 arg.signaled = fence->signaled;
623 read_unlock_irqrestore(&fm->lock, flags);
624 drm_fence_usage_deref_unlocked(dev, fence);
626 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));