1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Typically called by the IRQ handler.
37 void drm_fence_handler(drm_device_t * dev, uint32_t class,
38 uint32_t sequence, uint32_t type)
43 drm_fence_manager_t *fm = &dev->fm;
44 drm_fence_class_manager_t *fc = &fm->class[class];
45 drm_fence_driver_t *driver = dev->driver->fence_driver;
46 struct list_head *head;
47 drm_fence_object_t *fence, *next;
49 int is_exe = (type & DRM_FENCE_TYPE_EXE);
52 diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
54 if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
55 fc->pending_exe_flush = 0;
57 diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
58 ge_last_exe = diff < driver->wrap_diff;
61 fc->pending_flush &= ~type;
63 if (is_exe && ge_last_exe) {
64 fc->last_exe_flush = sequence;
67 if (list_empty(&fc->ring))
70 list_for_each_entry(fence, &fc->ring, ring) {
71 diff = (sequence - fence->sequence) & driver->sequence_mask;
72 if (diff > driver->wrap_diff) {
78 head = (found) ? &fence->ring : &fc->ring;
80 list_for_each_entry_safe_reverse(fence, next, head, ring) {
81 if (&fence->ring == &fc->ring)
84 type |= fence->native_type;
85 relevant = type & fence->type;
87 if ((fence->signaled | relevant) != fence->signaled) {
88 fence->signaled |= relevant;
89 DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
90 fence->base.hash.key, fence->signaled);
91 fence->submitted_flush |= relevant;
95 relevant = fence->flush_mask &
96 ~(fence->signaled | fence->submitted_flush);
99 fc->pending_flush |= relevant;
100 fence->submitted_flush = fence->flush_mask;
103 if (!(fence->type & ~fence->signaled)) {
104 DRM_DEBUG("Fence completely signaled 0x%08lx\n",
105 fence->base.hash.key);
106 list_del_init(&fence->ring);
111 DRM_WAKEUP(&fc->fence_queue);
115 EXPORT_SYMBOL(drm_fence_handler);
117 static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
119 drm_fence_manager_t *fm = &dev->fm;
122 write_lock_irqsave(&fm->lock, flags);
124 write_unlock_irqrestore(&fm->lock, flags);
127 void drm_fence_usage_deref_locked(drm_fence_object_t ** fence)
129 struct drm_fence_object *tmp_fence = *fence;
130 struct drm_device *dev = tmp_fence->dev;
131 drm_fence_manager_t *fm = &dev->fm;
133 DRM_ASSERT_LOCKED(&dev->struct_mutex);
135 if (atomic_dec_and_test(&tmp_fence->usage)) {
136 drm_fence_unring(dev, &tmp_fence->ring);
137 DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
138 tmp_fence->base.hash.key);
139 atomic_dec(&fm->count);
140 BUG_ON(!list_empty(&tmp_fence->base.list));
141 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
145 void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence)
147 struct drm_fence_object *tmp_fence = *fence;
148 struct drm_device *dev = tmp_fence->dev;
149 drm_fence_manager_t *fm = &dev->fm;
152 if (atomic_dec_and_test(&tmp_fence->usage)) {
153 mutex_lock(&dev->struct_mutex);
154 if (atomic_read(&tmp_fence->usage) == 0) {
155 drm_fence_unring(dev, &tmp_fence->ring);
156 atomic_dec(&fm->count);
157 BUG_ON(!list_empty(&tmp_fence->base.list));
158 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
160 mutex_unlock(&dev->struct_mutex);
164 struct drm_fence_object
165 *drm_fence_reference_locked(struct drm_fence_object *src)
167 DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
169 atomic_inc(&src->usage);
173 void drm_fence_reference_unlocked(struct drm_fence_object **dst,
174 struct drm_fence_object *src)
176 mutex_lock(&src->dev->struct_mutex);
178 atomic_inc(&src->usage);
179 mutex_unlock(&src->dev->struct_mutex);
183 static void drm_fence_object_destroy(drm_file_t *priv, drm_user_object_t * base)
185 drm_fence_object_t *fence =
186 drm_user_object_entry(base, drm_fence_object_t, base);
188 drm_fence_usage_deref_locked(&fence);
191 int drm_fence_object_signaled(drm_fence_object_t * fence,
192 uint32_t mask, int poke_flush)
196 struct drm_device *dev = fence->dev;
197 drm_fence_manager_t *fm = &dev->fm;
198 drm_fence_driver_t *driver = dev->driver->fence_driver;
201 driver->poke_flush(dev, fence->class);
202 read_lock_irqsave(&fm->lock, flags);
204 (fence->type & mask & fence->signaled) == (fence->type & mask);
205 read_unlock_irqrestore(&fm->lock, flags);
210 static void drm_fence_flush_exe(drm_fence_class_manager_t * fc,
211 drm_fence_driver_t * driver, uint32_t sequence)
215 if (!fc->pending_exe_flush) {
216 fc->exe_flush_sequence = sequence;
217 fc->pending_exe_flush = 1;
220 (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
221 if (diff < driver->wrap_diff) {
222 fc->exe_flush_sequence = sequence;
227 int drm_fence_object_flush(drm_fence_object_t * fence,
230 struct drm_device *dev = fence->dev;
231 drm_fence_manager_t *fm = &dev->fm;
232 drm_fence_class_manager_t *fc = &fm->class[fence->class];
233 drm_fence_driver_t *driver = dev->driver->fence_driver;
236 if (type & ~fence->type) {
237 DRM_ERROR("Flush trying to extend fence type, "
238 "0x%x, 0x%x\n", type, fence->type);
242 write_lock_irqsave(&fm->lock, flags);
243 fence->flush_mask |= type;
244 if (fence->submitted_flush == fence->signaled) {
245 if ((fence->type & DRM_FENCE_TYPE_EXE) &&
246 !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
247 drm_fence_flush_exe(fc, driver, fence->sequence);
248 fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
250 fc->pending_flush |= (fence->flush_mask &
251 ~fence->submitted_flush);
252 fence->submitted_flush = fence->flush_mask;
255 write_unlock_irqrestore(&fm->lock, flags);
256 driver->poke_flush(dev, fence->class);
261 * Make sure old fence objects are signaled before their fence sequences are
262 * wrapped around and reused.
265 void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
267 drm_fence_manager_t *fm = &dev->fm;
268 drm_fence_class_manager_t *fc = &fm->class[class];
269 drm_fence_driver_t *driver = dev->driver->fence_driver;
270 uint32_t old_sequence;
272 drm_fence_object_t *fence;
275 write_lock_irqsave(&fm->lock, flags);
276 old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
277 diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask;
279 if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) {
280 fc->pending_exe_flush = 1;
281 fc->exe_flush_sequence = sequence - (driver->flush_diff / 2);
283 write_unlock_irqrestore(&fm->lock, flags);
285 mutex_lock(&dev->struct_mutex);
286 read_lock_irqsave(&fm->lock, flags);
288 if (list_empty(&fc->ring)) {
289 read_unlock_irqrestore(&fm->lock, flags);
290 mutex_unlock(&dev->struct_mutex);
293 fence = drm_fence_reference_locked(list_entry(fc->ring.next, drm_fence_object_t, ring));
294 mutex_unlock(&dev->struct_mutex);
295 diff = (old_sequence - fence->sequence) & driver->sequence_mask;
296 read_unlock_irqrestore(&fm->lock, flags);
297 if (diff < driver->wrap_diff) {
298 drm_fence_object_flush(fence, fence->type);
300 drm_fence_usage_deref_unlocked(&fence);
303 EXPORT_SYMBOL(drm_fence_flush_old);
305 static int drm_fence_lazy_wait(drm_fence_object_t *fence,
309 struct drm_device *dev = fence->dev;
310 drm_fence_manager_t *fm = &dev->fm;
311 drm_fence_class_manager_t *fc = &fm->class[fence->class];
313 unsigned long _end = jiffies + 3*DRM_HZ;
317 DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
318 (signaled = drm_fence_object_signaled(fence, mask, 1)));
321 if (time_after_eq(jiffies, _end))
323 } while (ret == -EINTR && ignore_signals);
324 if (drm_fence_object_signaled(fence, mask, 0))
326 if (time_after_eq(jiffies, _end))
330 DRM_ERROR("Fence timeout. "
331 "GPU lockup or fence driver was "
334 return ((ret == -EINTR) ? -EAGAIN : ret);
339 int drm_fence_object_wait(drm_fence_object_t * fence,
340 int lazy, int ignore_signals, uint32_t mask)
342 struct drm_device *dev = fence->dev;
343 drm_fence_driver_t *driver = dev->driver->fence_driver;
348 if (mask & ~fence->type) {
349 DRM_ERROR("Wait trying to extend fence type"
350 " 0x%08x 0x%08x\n", mask, fence->type);
354 if (drm_fence_object_signaled(fence, mask, 0))
357 _end = jiffies + 3 * DRM_HZ;
359 drm_fence_object_flush(fence, mask);
361 if (lazy && driver->lazy_capable) {
363 ret = drm_fence_lazy_wait(fence, ignore_signals, mask);
369 if (driver->has_irq(dev, fence->class,
370 DRM_FENCE_TYPE_EXE)) {
371 ret = drm_fence_lazy_wait(fence, ignore_signals,
377 if (driver->has_irq(dev, fence->class,
378 mask & ~DRM_FENCE_TYPE_EXE)) {
379 ret = drm_fence_lazy_wait(fence, ignore_signals,
385 if (drm_fence_object_signaled(fence, mask, 0))
389 * Avoid kernel-space busy-waits.
397 signaled = drm_fence_object_signaled(fence, mask, 1);
398 } while (!signaled && !time_after_eq(jiffies, _end));
406 int drm_fence_object_emit(drm_fence_object_t * fence,
407 uint32_t fence_flags, uint32_t class, uint32_t type)
409 struct drm_device *dev = fence->dev;
410 drm_fence_manager_t *fm = &dev->fm;
411 drm_fence_driver_t *driver = dev->driver->fence_driver;
412 drm_fence_class_manager_t *fc = &fm->class[fence->class];
415 uint32_t native_type;
418 drm_fence_unring(dev, &fence->ring);
419 ret = driver->emit(dev, class, fence_flags, &sequence, &native_type);
423 write_lock_irqsave(&fm->lock, flags);
424 fence->class = class;
426 fence->flush_mask = 0x00;
427 fence->submitted_flush = 0x00;
428 fence->signaled = 0x00;
429 fence->sequence = sequence;
430 fence->native_type = native_type;
431 if (list_empty(&fc->ring))
432 fc->last_exe_flush = sequence - 1;
433 list_add_tail(&fence->ring, &fc->ring);
434 write_unlock_irqrestore(&fm->lock, flags);
438 static int drm_fence_object_init(drm_device_t * dev, uint32_t class,
440 uint32_t fence_flags,
441 drm_fence_object_t * fence)
445 drm_fence_manager_t *fm = &dev->fm;
447 mutex_lock(&dev->struct_mutex);
448 atomic_set(&fence->usage, 1);
449 mutex_unlock(&dev->struct_mutex);
451 write_lock_irqsave(&fm->lock, flags);
452 INIT_LIST_HEAD(&fence->ring);
455 * Avoid hitting BUG() for kernel-only fence objects.
458 INIT_LIST_HEAD(&fence->base.list);
459 fence->class = class;
461 fence->flush_mask = 0;
462 fence->submitted_flush = 0;
466 write_unlock_irqrestore(&fm->lock, flags);
467 if (fence_flags & DRM_FENCE_FLAG_EMIT) {
468 ret = drm_fence_object_emit(fence, fence_flags,
474 int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
477 drm_device_t *dev = priv->head->dev;
480 mutex_lock(&dev->struct_mutex);
481 ret = drm_add_user_object(priv, &fence->base, shareable);
484 atomic_inc(&fence->usage);
485 fence->base.type = drm_fence_type;
486 fence->base.remove = &drm_fence_object_destroy;
487 DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
489 mutex_unlock(&dev->struct_mutex);
492 EXPORT_SYMBOL(drm_fence_add_user_object);
494 int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type,
495 unsigned flags, drm_fence_object_t ** c_fence)
497 drm_fence_object_t *fence;
499 drm_fence_manager_t *fm = &dev->fm;
501 fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
504 ret = drm_fence_object_init(dev, class, type, flags, fence);
506 drm_fence_usage_deref_unlocked(&fence);
510 atomic_inc(&fm->count);
515 EXPORT_SYMBOL(drm_fence_object_create);
517 void drm_fence_manager_init(drm_device_t * dev)
519 drm_fence_manager_t *fm = &dev->fm;
520 drm_fence_class_manager_t *class;
521 drm_fence_driver_t *fed = dev->driver->fence_driver;
524 rwlock_init(&fm->lock);
525 write_lock(&fm->lock);
531 fm->num_classes = fed->num_classes;
532 BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
534 for (i=0; i<fm->num_classes; ++i) {
535 class = &fm->class[i];
537 INIT_LIST_HEAD(&class->ring);
538 class->pending_flush = 0;
539 DRM_INIT_WAITQUEUE(&class->fence_queue);
542 atomic_set(&fm->count, 0);
544 write_unlock(&fm->lock);
547 void drm_fence_manager_takedown(drm_device_t * dev)
551 drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
553 drm_device_t *dev = priv->head->dev;
554 drm_user_object_t *uo;
555 drm_fence_object_t *fence;
557 mutex_lock(&dev->struct_mutex);
558 uo = drm_lookup_user_object(priv, handle);
559 if (!uo || (uo->type != drm_fence_type)) {
560 mutex_unlock(&dev->struct_mutex);
563 fence = drm_fence_reference_locked(drm_user_object_entry(uo, drm_fence_object_t, base));
564 mutex_unlock(&dev->struct_mutex);
568 int drm_fence_create_ioctl(DRM_IOCTL_ARGS)
572 drm_fence_manager_t *fm = &dev->fm;
574 drm_fence_object_t *fence;
578 if (!fm->initialized) {
579 DRM_ERROR("The DRM driver does not support fencing.\n");
583 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
584 if (arg.flags & DRM_FENCE_FLAG_EMIT)
585 LOCK_TEST_WITH_RETURN(dev, filp);
586 ret = drm_fence_object_create(dev, arg.class,
587 arg.type, arg.flags, &fence);
590 ret = drm_fence_add_user_object(priv, fence,
592 DRM_FENCE_FLAG_SHAREABLE);
594 drm_fence_usage_deref_unlocked(&fence);
599 * usage > 0. No need to lock dev->struct_mutex;
602 atomic_inc(&fence->usage);
603 arg.handle = fence->base.hash.key;
605 read_lock_irqsave(&fm->lock, flags);
606 arg.class = fence->class;
607 arg.type = fence->type;
608 arg.signaled = fence->signaled;
609 read_unlock_irqrestore(&fm->lock, flags);
610 drm_fence_usage_deref_unlocked(&fence);
612 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
616 int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS)
620 drm_fence_manager_t *fm = &dev->fm;
622 drm_user_object_t *uo;
625 if (!fm->initialized) {
626 DRM_ERROR("The DRM driver does not support fencing.\n");
630 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
632 mutex_lock(&dev->struct_mutex);
633 uo = drm_lookup_user_object(priv, arg.handle);
634 if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
635 mutex_unlock(&dev->struct_mutex);
638 ret = drm_remove_user_object(priv, uo);
639 mutex_unlock(&dev->struct_mutex);
644 int drm_fence_reference_ioctl(DRM_IOCTL_ARGS)
648 drm_fence_manager_t *fm = &dev->fm;
650 drm_fence_object_t *fence;
651 drm_user_object_t *uo;
655 if (!fm->initialized) {
656 DRM_ERROR("The DRM driver does not support fencing.\n");
660 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
661 ret = drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
664 fence = drm_lookup_fence_object(priv, arg.handle);
666 read_lock_irqsave(&fm->lock, flags);
667 arg.class = fence->class;
668 arg.type = fence->type;
669 arg.signaled = fence->signaled;
670 read_unlock_irqrestore(&fm->lock, flags);
671 drm_fence_usage_deref_unlocked(&fence);
673 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
678 int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS)
682 drm_fence_manager_t *fm = &dev->fm;
686 if (!fm->initialized) {
687 DRM_ERROR("The DRM driver does not support fencing.\n");
691 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
692 return drm_user_object_unref(priv, arg.handle, drm_fence_type);
695 int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS)
699 drm_fence_manager_t *fm = &dev->fm;
701 drm_fence_object_t *fence;
705 if (!fm->initialized) {
706 DRM_ERROR("The DRM driver does not support fencing.\n");
710 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
712 fence = drm_lookup_fence_object(priv, arg.handle);
716 read_lock_irqsave(&fm->lock, flags);
717 arg.class = fence->class;
718 arg.type = fence->type;
719 arg.signaled = fence->signaled;
720 read_unlock_irqrestore(&fm->lock, flags);
721 drm_fence_usage_deref_unlocked(&fence);
723 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
727 int drm_fence_flush_ioctl(DRM_IOCTL_ARGS)
731 drm_fence_manager_t *fm = &dev->fm;
733 drm_fence_object_t *fence;
737 if (!fm->initialized) {
738 DRM_ERROR("The DRM driver does not support fencing.\n");
742 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
744 fence = drm_lookup_fence_object(priv, arg.handle);
747 ret = drm_fence_object_flush(fence, arg.type);
749 read_lock_irqsave(&fm->lock, flags);
750 arg.class = fence->class;
751 arg.type = fence->type;
752 arg.signaled = fence->signaled;
753 read_unlock_irqrestore(&fm->lock, flags);
754 drm_fence_usage_deref_unlocked(&fence);
756 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
761 int drm_fence_wait_ioctl(DRM_IOCTL_ARGS)
765 drm_fence_manager_t *fm = &dev->fm;
767 drm_fence_object_t *fence;
771 if (!fm->initialized) {
772 DRM_ERROR("The DRM driver does not support fencing.\n");
776 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
778 fence = drm_lookup_fence_object(priv, arg.handle);
781 ret = drm_fence_object_wait(fence,
782 arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
785 read_lock_irqsave(&fm->lock, flags);
786 arg.class = fence->class;
787 arg.type = fence->type;
788 arg.signaled = fence->signaled;
789 read_unlock_irqrestore(&fm->lock, flags);
790 drm_fence_usage_deref_unlocked(&fence);
792 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
797 int drm_fence_emit_ioctl(DRM_IOCTL_ARGS)
801 drm_fence_manager_t *fm = &dev->fm;
803 drm_fence_object_t *fence;
807 if (!fm->initialized) {
808 DRM_ERROR("The DRM driver does not support fencing.\n");
812 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
814 LOCK_TEST_WITH_RETURN(dev, filp);
815 fence = drm_lookup_fence_object(priv, arg.handle);
818 ret = drm_fence_object_emit(fence, arg.flags, arg.class,
821 read_lock_irqsave(&fm->lock, flags);
822 arg.class = fence->class;
823 arg.type = fence->type;
824 arg.signaled = fence->signaled;
825 read_unlock_irqrestore(&fm->lock, flags);
826 drm_fence_usage_deref_unlocked(&fence);
828 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
832 int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS)
836 drm_fence_manager_t *fm = &dev->fm;
838 drm_fence_object_t *fence;
842 if (!fm->initialized) {
843 DRM_ERROR("The DRM driver does not support fencing.\n");
847 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
849 if (!dev->bm.initialized) {
850 DRM_ERROR("Buffer object manager is not initialized\n");
853 LOCK_TEST_WITH_RETURN(dev, filp);
854 ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
858 ret = drm_fence_add_user_object(priv, fence,
860 DRM_FENCE_FLAG_SHAREABLE);
863 atomic_inc(&fence->usage);
864 arg.handle = fence->base.hash.key;
866 read_lock_irqsave(&fm->lock, flags);
867 arg.class = fence->class;
868 arg.type = fence->type;
869 arg.signaled = fence->signaled;
870 read_unlock_irqrestore(&fm->lock, flags);
871 drm_fence_usage_deref_unlocked(&fence);
873 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));