Merge branch 'master' of git+ssh://git.freedesktop.org/git/mesa/drm into modesetting-101
[platform/upstream/libdrm.git] / linux-core / drm_fence.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Typically called by the IRQ handler.
35  */
36
37 void drm_fence_handler(drm_device_t * dev, uint32_t class,
38                        uint32_t sequence, uint32_t type)
39 {
40         int wake = 0;
41         uint32_t diff;
42         uint32_t relevant;
43         drm_fence_manager_t *fm = &dev->fm;
44         drm_fence_class_manager_t *fc = &fm->class[class];
45         drm_fence_driver_t *driver = dev->driver->fence_driver;
46         struct list_head *head;
47         drm_fence_object_t *fence, *next;
48         int found = 0;
49         int is_exe = (type & DRM_FENCE_TYPE_EXE);
50         int ge_last_exe;
51
52         diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
53
54         if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
55                 fc->pending_exe_flush = 0;
56
57         diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
58         ge_last_exe = diff < driver->wrap_diff;
59
60         if (ge_last_exe)
61                 fc->pending_flush &= ~type;
62
63         if (is_exe && ge_last_exe) {
64                 fc->last_exe_flush = sequence;
65         }
66
67         if (list_empty(&fc->ring))
68                 return;
69
70         list_for_each_entry(fence, &fc->ring, ring) {
71                 diff = (sequence - fence->sequence) & driver->sequence_mask;
72                 if (diff > driver->wrap_diff) {
73                         found = 1;
74                         break;
75                 }
76         }
77
78         head = (found) ? &fence->ring : &fc->ring;
79
80         list_for_each_entry_safe_reverse(fence, next, head, ring) {
81                 if (&fence->ring == &fc->ring)
82                         break;
83
84                 type |= fence->native_type;
85                 relevant = type & fence->type;
86
87                 if ((fence->signaled | relevant) != fence->signaled) {
88                         fence->signaled |= relevant;
89                         DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
90                                   fence->base.hash.key, fence->signaled);
91                         fence->submitted_flush |= relevant;
92                         wake = 1;
93                 }
94
95                 relevant = fence->flush_mask &
96                     ~(fence->signaled | fence->submitted_flush);
97
98                 if (relevant) {
99                         fc->pending_flush |= relevant;
100                         fence->submitted_flush = fence->flush_mask;
101                 }
102
103                 if (!(fence->type & ~fence->signaled)) {
104                         DRM_DEBUG("Fence completely signaled 0x%08lx\n",
105                                   fence->base.hash.key);
106                         list_del_init(&fence->ring);
107                 }
108         }
109
110         if (wake) {
111                 DRM_WAKEUP(&fc->fence_queue);
112         }
113 }
114
115 EXPORT_SYMBOL(drm_fence_handler);
116
117 static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
118 {
119         drm_fence_manager_t *fm = &dev->fm;
120         unsigned long flags;
121
122         write_lock_irqsave(&fm->lock, flags);
123         list_del_init(ring);
124         write_unlock_irqrestore(&fm->lock, flags);
125 }
126
127 void drm_fence_usage_deref_locked(drm_fence_object_t ** fence)
128 {
129         struct drm_fence_object *tmp_fence = *fence;
130         struct drm_device *dev = tmp_fence->dev;
131         drm_fence_manager_t *fm = &dev->fm;
132
133         DRM_ASSERT_LOCKED(&dev->struct_mutex);
134         *fence = NULL;
135         if (atomic_dec_and_test(&tmp_fence->usage)) {
136                 drm_fence_unring(dev, &tmp_fence->ring);
137                 DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
138                           tmp_fence->base.hash.key);
139                 atomic_dec(&fm->count);
140                 BUG_ON(!list_empty(&tmp_fence->base.list));
141                 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
142         }
143 }
144
145 void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence)
146 {
147         struct drm_fence_object *tmp_fence = *fence;
148         struct drm_device *dev = tmp_fence->dev;
149         drm_fence_manager_t *fm = &dev->fm;
150
151         *fence = NULL;
152         if (atomic_dec_and_test(&tmp_fence->usage)) {
153                 mutex_lock(&dev->struct_mutex);
154                 if (atomic_read(&tmp_fence->usage) == 0) {
155                         drm_fence_unring(dev, &tmp_fence->ring);
156                         atomic_dec(&fm->count);
157                         BUG_ON(!list_empty(&tmp_fence->base.list));
158                         drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
159                 }
160                 mutex_unlock(&dev->struct_mutex);
161         }
162 }
163
164 struct drm_fence_object
165 *drm_fence_reference_locked(struct drm_fence_object *src)
166 {
167         DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
168
169         atomic_inc(&src->usage);
170         return src;
171 }
172
173 void drm_fence_reference_unlocked(struct drm_fence_object **dst,
174                                   struct drm_fence_object *src)
175 {
176         mutex_lock(&src->dev->struct_mutex);
177         *dst = src;
178         atomic_inc(&src->usage);
179         mutex_unlock(&src->dev->struct_mutex);
180 }
181
182
183 static void drm_fence_object_destroy(drm_file_t *priv, drm_user_object_t * base)
184 {
185         drm_fence_object_t *fence =
186             drm_user_object_entry(base, drm_fence_object_t, base);
187
188         drm_fence_usage_deref_locked(&fence);
189 }
190
191 int drm_fence_object_signaled(drm_fence_object_t * fence,
192                               uint32_t mask, int poke_flush)
193 {
194         unsigned long flags;
195         int signaled;
196         struct drm_device *dev = fence->dev;
197         drm_fence_manager_t *fm = &dev->fm;
198         drm_fence_driver_t *driver = dev->driver->fence_driver;
199
200         if (poke_flush)
201                 driver->poke_flush(dev, fence->class);
202         read_lock_irqsave(&fm->lock, flags);
203         signaled =
204             (fence->type & mask & fence->signaled) == (fence->type & mask);
205         read_unlock_irqrestore(&fm->lock, flags);
206
207         return signaled;
208 }
209
210 static void drm_fence_flush_exe(drm_fence_class_manager_t * fc,
211                                 drm_fence_driver_t * driver, uint32_t sequence)
212 {
213         uint32_t diff;
214
215         if (!fc->pending_exe_flush) {
216                 fc->exe_flush_sequence = sequence;
217                 fc->pending_exe_flush = 1;
218         } else {
219                 diff =
220                     (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
221                 if (diff < driver->wrap_diff) {
222                         fc->exe_flush_sequence = sequence;
223                 }
224         }
225 }
226
227 int drm_fence_object_flush(drm_fence_object_t * fence,
228                            uint32_t type)
229 {
230         struct drm_device *dev = fence->dev;
231         drm_fence_manager_t *fm = &dev->fm;
232         drm_fence_class_manager_t *fc = &fm->class[fence->class];
233         drm_fence_driver_t *driver = dev->driver->fence_driver;
234         unsigned long flags;
235
236         if (type & ~fence->type) {
237                 DRM_ERROR("Flush trying to extend fence type, "
238                           "0x%x, 0x%x\n", type, fence->type);
239                 return -EINVAL;
240         }
241
242         write_lock_irqsave(&fm->lock, flags);
243         fence->flush_mask |= type;
244         if (fence->submitted_flush == fence->signaled) {
245                 if ((fence->type & DRM_FENCE_TYPE_EXE) &&
246                     !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
247                         drm_fence_flush_exe(fc, driver, fence->sequence);
248                         fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
249                 } else {
250                         fc->pending_flush |= (fence->flush_mask &
251                                               ~fence->submitted_flush);
252                         fence->submitted_flush = fence->flush_mask;
253                 }
254         }
255         write_unlock_irqrestore(&fm->lock, flags);
256         driver->poke_flush(dev, fence->class);
257         return 0;
258 }
259
260 /*
261  * Make sure old fence objects are signaled before their fence sequences are
262  * wrapped around and reused.
263  */
264
265 void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
266 {
267         drm_fence_manager_t *fm = &dev->fm;
268         drm_fence_class_manager_t *fc = &fm->class[class];
269         drm_fence_driver_t *driver = dev->driver->fence_driver;
270         uint32_t old_sequence;
271         unsigned long flags;
272         drm_fence_object_t *fence;
273         uint32_t diff;
274
275         write_lock_irqsave(&fm->lock, flags);
276         old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
277         diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask;
278
279         if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) {
280                 fc->pending_exe_flush = 1;
281                 fc->exe_flush_sequence = sequence - (driver->flush_diff / 2);
282         }
283         write_unlock_irqrestore(&fm->lock, flags);
284
285         mutex_lock(&dev->struct_mutex);
286         read_lock_irqsave(&fm->lock, flags);
287
288         if (list_empty(&fc->ring)) {
289                 read_unlock_irqrestore(&fm->lock, flags);
290                 mutex_unlock(&dev->struct_mutex);
291                 return;
292         }
293         fence = drm_fence_reference_locked(list_entry(fc->ring.next, drm_fence_object_t, ring));
294         mutex_unlock(&dev->struct_mutex);
295         diff = (old_sequence - fence->sequence) & driver->sequence_mask;
296         read_unlock_irqrestore(&fm->lock, flags);
297         if (diff < driver->wrap_diff) {
298                 drm_fence_object_flush(fence, fence->type);
299         }
300         drm_fence_usage_deref_unlocked(&fence);
301 }
302
303 EXPORT_SYMBOL(drm_fence_flush_old);
304
305 static int drm_fence_lazy_wait(drm_fence_object_t *fence,
306                                int ignore_signals,
307                                uint32_t mask)
308 {
309         struct drm_device *dev = fence->dev;
310         drm_fence_manager_t *fm = &dev->fm;
311         drm_fence_class_manager_t *fc = &fm->class[fence->class];
312         int signaled;
313         unsigned long _end = jiffies + 3*DRM_HZ;
314         int ret = 0;
315
316         do {
317                 DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
318                             (signaled = drm_fence_object_signaled(fence, mask, 1)));
319                 if (signaled)
320                         return 0;
321                 if (time_after_eq(jiffies, _end))
322                         break;
323         } while (ret == -EINTR && ignore_signals);
324         if (drm_fence_object_signaled(fence, mask, 0))
325                 return 0;
326         if (time_after_eq(jiffies, _end))
327                 ret = -EBUSY;
328         if (ret) {
329                 if (ret == -EBUSY) {
330                         DRM_ERROR("Fence timeout. "
331                                   "GPU lockup or fence driver was "
332                                   "taken down.\n");
333                 }
334                 return ((ret == -EINTR) ? -EAGAIN : ret);
335         }
336         return 0;
337 }
338
339 int drm_fence_object_wait(drm_fence_object_t * fence,
340                           int lazy, int ignore_signals, uint32_t mask)
341 {
342         struct drm_device *dev = fence->dev;
343         drm_fence_driver_t *driver = dev->driver->fence_driver;
344         int ret = 0;
345         unsigned long _end;
346         int signaled;
347
348         if (mask & ~fence->type) {
349                 DRM_ERROR("Wait trying to extend fence type"
350                           " 0x%08x 0x%08x\n", mask, fence->type);
351                 return -EINVAL;
352         }
353
354         if (drm_fence_object_signaled(fence, mask, 0))
355                 return 0;
356
357         _end = jiffies + 3 * DRM_HZ;
358
359         drm_fence_object_flush(fence, mask);
360
361         if (lazy && driver->lazy_capable) {
362
363                 ret = drm_fence_lazy_wait(fence, ignore_signals, mask);
364                 if (ret)
365                         return ret;
366
367         } else {
368
369                 if (driver->has_irq(dev, fence->class,
370                                     DRM_FENCE_TYPE_EXE)) {
371                         ret = drm_fence_lazy_wait(fence, ignore_signals,
372                                                   DRM_FENCE_TYPE_EXE);
373                         if (ret)
374                                 return ret;
375                 }
376
377                 if (driver->has_irq(dev, fence->class,
378                                     mask & ~DRM_FENCE_TYPE_EXE)) {
379                         ret = drm_fence_lazy_wait(fence, ignore_signals,
380                                                   mask);
381                         if (ret)
382                                 return ret;
383                 }
384         }
385         if (drm_fence_object_signaled(fence, mask, 0))
386                 return 0;
387
388         /*
389          * Avoid kernel-space busy-waits.
390          */
391 #if 1
392         if (!ignore_signals)
393                 return -EAGAIN;
394 #endif
395         do {
396                 schedule();
397                 signaled = drm_fence_object_signaled(fence, mask, 1);
398         } while (!signaled && !time_after_eq(jiffies, _end));
399
400         if (!signaled)
401                 return -EBUSY;
402
403         return 0;
404 }
405
406 int drm_fence_object_emit(drm_fence_object_t * fence,
407                           uint32_t fence_flags, uint32_t class, uint32_t type)
408 {
409         struct drm_device *dev = fence->dev;
410         drm_fence_manager_t *fm = &dev->fm;
411         drm_fence_driver_t *driver = dev->driver->fence_driver;
412         drm_fence_class_manager_t *fc = &fm->class[fence->class];
413         unsigned long flags;
414         uint32_t sequence;
415         uint32_t native_type;
416         int ret;
417
418         drm_fence_unring(dev, &fence->ring);
419         ret = driver->emit(dev, class, fence_flags, &sequence, &native_type);
420         if (ret)
421                 return ret;
422
423         write_lock_irqsave(&fm->lock, flags);
424         fence->class = class;
425         fence->type = type;
426         fence->flush_mask = 0x00;
427         fence->submitted_flush = 0x00;
428         fence->signaled = 0x00;
429         fence->sequence = sequence;
430         fence->native_type = native_type;
431         if (list_empty(&fc->ring))
432                 fc->last_exe_flush = sequence - 1;
433         list_add_tail(&fence->ring, &fc->ring);
434         write_unlock_irqrestore(&fm->lock, flags);
435         return 0;
436 }
437
438 static int drm_fence_object_init(drm_device_t * dev, uint32_t class,
439                                  uint32_t type,
440                                  uint32_t fence_flags,
441                                  drm_fence_object_t * fence)
442 {
443         int ret = 0;
444         unsigned long flags;
445         drm_fence_manager_t *fm = &dev->fm;
446
447         mutex_lock(&dev->struct_mutex);
448         atomic_set(&fence->usage, 1);
449         mutex_unlock(&dev->struct_mutex);
450
451         write_lock_irqsave(&fm->lock, flags);
452         INIT_LIST_HEAD(&fence->ring);
453
454         /* 
455          *  Avoid hitting BUG() for kernel-only fence objects.
456          */
457
458         INIT_LIST_HEAD(&fence->base.list);
459         fence->class = class;
460         fence->type = type;
461         fence->flush_mask = 0;
462         fence->submitted_flush = 0;
463         fence->signaled = 0;
464         fence->sequence = 0;
465         fence->dev = dev;
466         write_unlock_irqrestore(&fm->lock, flags);
467         if (fence_flags & DRM_FENCE_FLAG_EMIT) {
468                 ret = drm_fence_object_emit(fence, fence_flags,
469                                             fence->class, type);
470         }
471         return ret;
472 }
473
474 int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
475                               int shareable)
476 {
477         drm_device_t *dev = priv->head->dev;
478         int ret;
479
480         mutex_lock(&dev->struct_mutex);
481         ret = drm_add_user_object(priv, &fence->base, shareable);
482         if (ret)
483                 goto out;
484         atomic_inc(&fence->usage);
485         fence->base.type = drm_fence_type;
486         fence->base.remove = &drm_fence_object_destroy;
487         DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
488 out:
489         mutex_unlock(&dev->struct_mutex);
490         return ret;
491 }
492 EXPORT_SYMBOL(drm_fence_add_user_object);
493
494 int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type,
495                             unsigned flags, drm_fence_object_t ** c_fence)
496 {
497         drm_fence_object_t *fence;
498         int ret;
499         drm_fence_manager_t *fm = &dev->fm;
500
501         fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
502         if (!fence)
503                 return -ENOMEM;
504         ret = drm_fence_object_init(dev, class, type, flags, fence);
505         if (ret) {
506                 drm_fence_usage_deref_unlocked(&fence);
507                 return ret;
508         }
509         *c_fence = fence;
510         atomic_inc(&fm->count);
511
512         return 0;
513 }
514
515 EXPORT_SYMBOL(drm_fence_object_create);
516
517 void drm_fence_manager_init(drm_device_t * dev)
518 {
519         drm_fence_manager_t *fm = &dev->fm;
520         drm_fence_class_manager_t *class;
521         drm_fence_driver_t *fed = dev->driver->fence_driver;
522         int i;
523
524         rwlock_init(&fm->lock);
525         write_lock(&fm->lock);
526         fm->initialized = 0;
527         if (!fed)
528             goto out_unlock;
529
530         fm->initialized = 1;
531         fm->num_classes = fed->num_classes;
532         BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
533
534         for (i=0; i<fm->num_classes; ++i) {
535             class = &fm->class[i];
536
537             INIT_LIST_HEAD(&class->ring);
538             class->pending_flush = 0;
539             DRM_INIT_WAITQUEUE(&class->fence_queue);
540         }
541
542         atomic_set(&fm->count, 0);
543  out_unlock:
544         write_unlock(&fm->lock);
545 }
546
547 void drm_fence_manager_takedown(drm_device_t * dev)
548 {
549 }
550
551 drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
552 {
553         drm_device_t *dev = priv->head->dev;
554         drm_user_object_t *uo;
555         drm_fence_object_t *fence;
556
557         mutex_lock(&dev->struct_mutex);
558         uo = drm_lookup_user_object(priv, handle);
559         if (!uo || (uo->type != drm_fence_type)) {
560                 mutex_unlock(&dev->struct_mutex);
561                 return NULL;
562         }
563         fence = drm_fence_reference_locked(drm_user_object_entry(uo, drm_fence_object_t, base));
564         mutex_unlock(&dev->struct_mutex);
565         return fence;
566 }
567
568 int drm_fence_ioctl(DRM_IOCTL_ARGS)
569 {
570         DRM_DEVICE;
571         int ret;
572         drm_fence_manager_t *fm = &dev->fm;
573         drm_fence_arg_t arg;
574         drm_fence_object_t *fence;
575         drm_user_object_t *uo;
576         unsigned long flags;
577         ret = 0;
578
579         if (!fm->initialized) {
580                 DRM_ERROR("The DRM driver does not support fencing.\n");
581                 return -EINVAL;
582         }
583
584         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
585         switch (arg.op) {
586         case drm_fence_create:
587                 if (arg.flags & DRM_FENCE_FLAG_EMIT)
588                         LOCK_TEST_WITH_RETURN(dev, filp);
589                 ret = drm_fence_object_create(dev, arg.class,
590                                               arg.type, arg.flags, &fence);
591                 if (ret)
592                         return ret;
593                 ret = drm_fence_add_user_object(priv, fence,
594                                                 arg.flags &
595                                                 DRM_FENCE_FLAG_SHAREABLE);
596                 if (ret) {
597                         drm_fence_usage_deref_unlocked(&fence);
598                         return ret;
599                 }
600                 arg.handle = fence->base.hash.key;
601
602                 break;
603         case drm_fence_destroy:
604                 mutex_lock(&dev->struct_mutex);
605                 uo = drm_lookup_user_object(priv, arg.handle);
606                 if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
607                         mutex_unlock(&dev->struct_mutex);
608                         return -EINVAL;
609                 }
610                 ret = drm_remove_user_object(priv, uo);
611                 mutex_unlock(&dev->struct_mutex);
612                 return ret;
613         case drm_fence_reference:
614                 ret =
615                     drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
616                 if (ret)
617                         return ret;
618                 fence = drm_lookup_fence_object(priv, arg.handle);
619                 break;
620         case drm_fence_unreference:
621                 ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
622                 return ret;
623         case drm_fence_signaled:
624                 fence = drm_lookup_fence_object(priv, arg.handle);
625                 if (!fence)
626                         return -EINVAL;
627                 break;
628         case drm_fence_flush:
629                 fence = drm_lookup_fence_object(priv, arg.handle);
630                 if (!fence)
631                         return -EINVAL;
632                 ret = drm_fence_object_flush(fence, arg.type);
633                 break;
634         case drm_fence_wait:
635                 fence = drm_lookup_fence_object(priv, arg.handle);
636                 if (!fence)
637                         return -EINVAL;
638                 ret =
639                     drm_fence_object_wait(fence,
640                                           arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
641                                           0, arg.type);
642                 break;
643         case drm_fence_emit:
644                 LOCK_TEST_WITH_RETURN(dev, filp);
645                 fence = drm_lookup_fence_object(priv, arg.handle);
646                 if (!fence)
647                         return -EINVAL;
648                 ret = drm_fence_object_emit(fence, arg.flags, arg.class,
649                                             arg.type);
650                 break;
651         case drm_fence_buffers:
652                 if (!dev->bm.initialized) {
653                         DRM_ERROR("Buffer object manager is not initialized\n");
654                         return -EINVAL;
655                 }
656                 LOCK_TEST_WITH_RETURN(dev, filp);
657                 ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
658                                                NULL, &fence);
659                 if (ret)
660                         return ret;
661                 ret = drm_fence_add_user_object(priv, fence,
662                                                 arg.flags &
663                                                 DRM_FENCE_FLAG_SHAREABLE);
664                 if (ret)
665                         return ret;
666                 arg.handle = fence->base.hash.key;
667                 break;
668         default:
669                 return -EINVAL;
670         }
671         read_lock_irqsave(&fm->lock, flags);
672         arg.class = fence->class;
673         arg.type = fence->type;
674         arg.signaled = fence->signaled;
675         read_unlock_irqrestore(&fm->lock, flags);
676         drm_fence_usage_deref_unlocked(&fence);
677
678         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
679         return ret;
680 }