1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 **************************************************************************/
29 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 static void drm_fm_update_pointers(drm_fence_manager_t * fm,
35 struct list_head *list, int no_types,
39 for (i = 0; i < no_types; ++i) {
40 if (type & (1 << i)) {
41 fm->fence_types[i] = list;
47 * Typically called by the IRQ handler.
50 void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
58 drm_fence_manager_t *fm = &dev->fm;
59 drm_fence_driver_t *driver = dev->driver->fence_driver;
60 struct list_head *list;
61 struct list_head *fence_list;
62 drm_fence_object_t *fence;
65 for (i = 0; i < driver->no_types; ++i) {
66 if (!(type & (1 << i)))
69 list = fm->fence_types[i];
70 fence_list = list->next;
72 if (fence_list == &fm->ring)
75 fence = list_entry(fence_list, drm_fence_object_t, ring);
77 diff = (sequence - fence->sequence) & driver->sequence_mask;
79 if (diff < driver->wrap_diff) {
80 if (diff >= largest) {
92 * Start with the fence object with the lowest sequence number, affected by
93 * the type mask of this call. Update signaled fields,
94 * Check if we need to wake sleeping processes
97 list = fm->fence_types[index]->next;
99 if (list == &fm->ring) {
100 drm_fm_update_pointers(fm, list->prev,
101 driver->no_types, type);
104 fence = list_entry(list, drm_fence_object_t, ring);
105 diff = (sequence - fence->sequence) & driver->sequence_mask;
106 if (diff >= driver->wrap_diff) {
107 drm_fm_update_pointers(fm, fence->ring.prev,
108 driver->no_types, type);
111 relevant = type & fence->type;
112 if ((fence->signaled | relevant) != fence->signaled) {
113 fence->signaled |= relevant;
115 DRM_ERROR("Fence 0x%08lx signaled 0x%08x\n",
116 fence->base.hash.key, fence->signaled);
118 fence->submitted_flush |= relevant;
122 relevant = fence->flush_mask &
123 ~(fence->signaled | fence->submitted_flush);
125 fm->pending_flush |= relevant;
126 fence->submitted_flush = fence->flush_mask;
132 * Remove a completely signaled fence from the
133 * fence manager ring.
136 if (!(fence->type & ~fence->signaled)) {
138 DRM_ERROR("Fence completely signaled 0x%08lx\n",
139 fence->base.hash.key);
141 fence_list = &fence->ring;
142 for (i = 0; i < driver->no_types; ++i) {
143 if (fm->fence_types[i] == fence_list)
144 fm->fence_types[i] = fence_list->prev;
146 list_del_init(fence_list);
152 * Wake sleeping processes.
156 DRM_WAKEUP(&fm->fence_queue);
160 EXPORT_SYMBOL(drm_fence_handler);
162 static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
164 drm_fence_manager_t *fm = &dev->fm;
165 drm_fence_driver_t *driver = dev->driver->fence_driver;
169 write_lock_irqsave(&fm->lock, flags);
170 for (i = 0; i < driver->no_types; ++i) {
171 if (fm->fence_types[i] == ring)
172 fm->fence_types[i] = ring->prev;
175 write_unlock_irqrestore(&fm->lock, flags);
178 void drm_fence_usage_deref_locked(drm_device_t * dev,
179 drm_fence_object_t * fence)
181 if (atomic_dec_and_test(&fence->usage)) {
182 drm_fence_unring(dev, &fence->ring);
184 DRM_ERROR("Destroyed a fence object 0x%08lx\n",
185 fence->base.hash.key);
187 kmem_cache_free(drm_cache.fence_object, fence);
191 void drm_fence_usage_deref_unlocked(drm_device_t * dev,
192 drm_fence_object_t * fence)
194 if (atomic_dec_and_test(&fence->usage)) {
195 mutex_lock(&dev->struct_mutex);
196 if (atomic_read(&fence->usage) == 0) {
197 drm_fence_unring(dev, &fence->ring);
198 kmem_cache_free(drm_cache.fence_object, fence);
200 mutex_unlock(&dev->struct_mutex);
204 static void drm_fence_object_destroy(drm_file_t * priv,
205 drm_user_object_t * base)
207 drm_device_t *dev = priv->head->dev;
208 drm_fence_object_t *fence =
209 drm_user_object_entry(base, drm_fence_object_t, base);
211 drm_fence_usage_deref_locked(dev, fence);
214 static int fence_signaled(drm_device_t * dev, drm_fence_object_t * fence,
215 uint32_t mask, int poke_flush)
219 drm_fence_manager_t *fm = &dev->fm;
220 drm_fence_driver_t *driver = dev->driver->fence_driver;
223 driver->poke_flush(dev);
224 read_lock_irqsave(&fm->lock, flags);
226 (fence->type & mask & fence->signaled) == (fence->type & mask);
227 read_unlock_irqrestore(&fm->lock, flags);
232 static void drm_fence_flush_exe(drm_fence_manager_t * fm,
233 drm_fence_driver_t * driver, uint32_t sequence)
237 if (!fm->pending_exe_flush) {
238 struct list_head *list;
241 * Last_exe_flush is invalid. Find oldest sequence.
244 list = fm->fence_types[_DRM_FENCE_TYPE_EXE];
245 if (list->next == &fm->ring) {
248 drm_fence_object_t *fence =
249 list_entry(list->next, drm_fence_object_t, ring);
250 fm->last_exe_flush = (fence->sequence - 1) &
251 driver->sequence_mask;
253 diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
254 if (diff >= driver->wrap_diff)
256 fm->exe_flush_sequence = sequence;
257 fm->pending_exe_flush = 1;
260 (sequence - fm->exe_flush_sequence) & driver->sequence_mask;
261 if (diff < driver->wrap_diff) {
262 fm->exe_flush_sequence = sequence;
267 int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type)
269 return ((fence->signaled & type) == type);
273 * Make sure old fence objects are signaled before their fence sequences are
274 * wrapped around and reused.
277 int drm_fence_object_flush(drm_device_t * dev,
278 drm_fence_object_t * fence, uint32_t type)
280 drm_fence_manager_t *fm = &dev->fm;
281 drm_fence_driver_t *driver = dev->driver->fence_driver;
284 if (type & ~fence->type) {
285 DRM_ERROR("Flush trying to extend fence type\n");
289 write_lock_irqsave(&fm->lock, flags);
290 fence->flush_mask |= type;
291 if (fence->submitted_flush == fence->signaled) {
292 if ((fence->type & DRM_FENCE_EXE) &&
293 !(fence->submitted_flush & DRM_FENCE_EXE)) {
294 drm_fence_flush_exe(fm, driver, fence->sequence);
295 fence->submitted_flush |= DRM_FENCE_EXE;
297 fm->pending_flush |= (fence->flush_mask &
298 ~fence->submitted_flush);
299 fence->submitted_flush = fence->flush_mask;
302 write_unlock_irqrestore(&fm->lock, flags);
303 driver->poke_flush(dev);
307 void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
309 drm_fence_manager_t *fm = &dev->fm;
310 drm_fence_driver_t *driver = dev->driver->fence_driver;
311 uint32_t old_sequence;
313 drm_fence_object_t *fence;
316 mutex_lock(&dev->struct_mutex);
317 read_lock_irqsave(&fm->lock, flags);
318 if (fm->ring.next == &fm->ring) {
319 read_unlock_irqrestore(&fm->lock, flags);
320 mutex_unlock(&dev->struct_mutex);
323 old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
324 fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
325 atomic_inc(&fence->usage);
326 mutex_unlock(&dev->struct_mutex);
327 diff = (old_sequence - fence->sequence) & driver->sequence_mask;
328 read_unlock_irqrestore(&fm->lock, flags);
329 if (diff < driver->wrap_diff) {
330 drm_fence_object_flush(dev, fence, fence->type);
332 drm_fence_usage_deref_unlocked(dev, fence);
335 EXPORT_SYMBOL(drm_fence_flush_old);
337 int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence,
338 int lazy, int ignore_signals, uint32_t mask)
340 drm_fence_manager_t *fm = &dev->fm;
341 drm_fence_driver_t *driver = dev->driver->fence_driver;
345 if (mask & ~fence->type) {
346 DRM_ERROR("Wait trying to extend fence type\n");
350 if (fence_signaled(dev, fence, mask, 0))
353 _end = jiffies + 3 * DRM_HZ;
355 drm_fence_object_flush(dev, fence, mask);
356 if (lazy && driver->lazy_capable) {
358 DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
359 fence_signaled(dev, fence, mask, 1));
360 if (time_after_eq(jiffies, _end))
362 } while (ret == -EINTR && ignore_signals);
364 if (time_after_eq(jiffies, _end) && (ret != 0))
371 signaled = fence_signaled(dev, fence, mask, 1);
372 } while (!signaled && !time_after_eq(jiffies, _end));
379 int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
382 drm_fence_manager_t *fm = &dev->fm;
383 drm_fence_driver_t *driver = dev->driver->fence_driver;
388 drm_fence_unring(dev, &fence->ring);
389 ret = driver->emit(dev, &sequence);
393 write_lock_irqsave(&fm->lock, flags);
395 fence->flush_mask = 0x00;
396 fence->submitted_flush = 0x00;
397 fence->signaled = 0x00;
398 fence->sequence = sequence;
399 list_add_tail(&fence->ring, &fm->ring);
400 write_unlock_irqrestore(&fm->lock, flags);
404 int drm_fence_object_init(drm_device_t * dev, uint32_t type, int emit,
405 drm_fence_object_t * fence)
409 drm_fence_manager_t *fm = &dev->fm;
411 mutex_lock(&dev->struct_mutex);
412 atomic_set(&fence->usage, 1);
413 mutex_unlock(&dev->struct_mutex);
415 write_lock_irqsave(&fm->lock, flags);
416 INIT_LIST_HEAD(&fence->ring);
419 fence->flush_mask = 0;
420 fence->submitted_flush = 0;
423 write_unlock_irqrestore(&fm->lock, flags);
425 ret = drm_fence_object_emit(dev, fence, type);
430 EXPORT_SYMBOL(drm_fence_object_init);
432 int drm_fence_add_user_object(drm_file_t *priv, drm_fence_object_t *fence,
435 drm_device_t *dev = priv->head->dev;
438 mutex_lock(&dev->struct_mutex);
439 ret = drm_add_user_object(priv, &fence->base, shareable);
440 mutex_unlock(&dev->struct_mutex);
443 fence->base.type = drm_fence_type;
444 fence->base.remove = &drm_fence_object_destroy;
446 DRM_ERROR("Fence 0x%08lx created\n", fence->base.hash.key);
450 EXPORT_SYMBOL(drm_fence_add_user_object);
452 int drm_fence_object_create(drm_device_t *dev, uint32_t type,
453 int emit, drm_fence_object_t **c_fence)
455 drm_fence_object_t *fence;
458 fence = kmem_cache_alloc(drm_cache.fence_object, GFP_KERNEL);
461 ret = drm_fence_object_init(dev, type, emit, fence);
463 drm_fence_usage_deref_unlocked(dev, fence);
469 EXPORT_SYMBOL(drm_fence_object_create);
472 void drm_fence_manager_init(drm_device_t * dev)
474 drm_fence_manager_t *fm = &dev->fm;
475 drm_fence_driver_t *fed = dev->driver->fence_driver;
478 fm->lock = RW_LOCK_UNLOCKED;
479 INIT_LIST_HEAD(&fm->ring);
480 fm->pending_flush = 0;
481 DRM_INIT_WAITQUEUE(&fm->fence_queue);
485 for (i = 0; i < fed->no_types; ++i) {
486 fm->fence_types[i] = &fm->ring;
491 void drm_fence_manager_takedown(drm_device_t * dev)
495 drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
497 drm_device_t *dev = priv->head->dev;
498 drm_user_object_t *uo;
499 drm_fence_object_t *fence;
501 mutex_lock(&dev->struct_mutex);
502 uo = drm_lookup_user_object(priv, handle);
503 if (!uo || (uo->type != drm_fence_type)) {
504 mutex_unlock(&dev->struct_mutex);
507 fence = drm_user_object_entry(uo, drm_fence_object_t, base);
508 atomic_inc(&fence->usage);
509 mutex_unlock(&dev->struct_mutex);
513 int drm_fence_ioctl(DRM_IOCTL_ARGS)
517 drm_fence_manager_t *fm = &dev->fm;
519 drm_fence_object_t *fence;
520 drm_user_object_t *uo;
524 if (!fm->initialized) {
525 DRM_ERROR("The DRM driver does not support fencing.\n");
529 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
531 case drm_fence_create:
532 if (arg.flags & DRM_FENCE_FLAG_EMIT)
533 LOCK_TEST_WITH_RETURN(dev, filp);
534 ret = drm_fence_object_create(dev, arg.type,
535 arg.flags & DRM_FENCE_FLAG_EMIT,
539 ret = drm_fence_add_user_object(priv, fence,
541 DRM_FENCE_FLAG_SHAREABLE);
543 drm_fence_usage_deref_unlocked(dev, fence);
548 * usage > 0. No need to lock dev->struct_mutex;
551 atomic_inc(&fence->usage);
552 arg.handle = fence->base.hash.key;
554 case drm_fence_destroy:
555 mutex_lock(&dev->struct_mutex);
556 uo = drm_lookup_user_object(priv, arg.handle);
557 if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
558 mutex_unlock(&dev->struct_mutex);
561 ret = drm_remove_user_object(priv, uo);
562 mutex_unlock(&dev->struct_mutex);
564 case drm_fence_reference:
566 drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
569 fence = drm_lookup_fence_object(priv, arg.handle);
571 case drm_fence_unreference:
572 ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
574 case drm_fence_signaled:
575 fence = drm_lookup_fence_object(priv, arg.handle);
579 case drm_fence_flush:
580 fence = drm_lookup_fence_object(priv, arg.handle);
583 ret = drm_fence_object_flush(dev, fence, arg.type);
586 fence = drm_lookup_fence_object(priv, arg.handle);
590 drm_fence_object_wait(dev, fence,
591 arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
596 LOCK_TEST_WITH_RETURN(dev, filp);
597 fence = drm_lookup_fence_object(priv, arg.handle);
600 ret = drm_fence_object_emit(dev, fence, arg.type);
602 case drm_fence_buffers:
603 if (!dev->bm.initialized) {
604 DRM_ERROR("Buffer object manager is not initialized\n");
607 LOCK_TEST_WITH_RETURN(dev, filp);
608 ret = drm_fence_buffer_objects(priv, NULL, NULL, &fence);
611 ret = drm_fence_add_user_object(priv, fence,
613 DRM_FENCE_FLAG_SHAREABLE);
616 atomic_inc(&fence->usage);
617 arg.handle = fence->base.hash.key;
622 read_lock_irqsave(&fm->lock, flags);
623 arg.class = fence->class;
624 arg.type = fence->type;
625 arg.signaled = fence->signaled;
626 read_unlock_irqrestore(&fm->lock, flags);
627 drm_fence_usage_deref_unlocked(dev, fence);
629 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));