drm: detypedef ttm/bo/fence code
[platform/upstream/libdrm.git] / linux-core / drm_fence.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Typically called by the IRQ handler.
35  */
36
37 void drm_fence_handler(struct drm_device * dev, uint32_t class,
38                        uint32_t sequence, uint32_t type)
39 {
40         int wake = 0;
41         uint32_t diff;
42         uint32_t relevant;
43         struct drm_fence_manager *fm = &dev->fm;
44         struct drm_fence_class_manager *fc = &fm->class[class];
45         struct drm_fence_driver *driver = dev->driver->fence_driver;
46         struct list_head *head;
47         struct drm_fence_object *fence, *next;
48         int found = 0;
49         int is_exe = (type & DRM_FENCE_TYPE_EXE);
50         int ge_last_exe;
51
52         diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
53
54         if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff)
55                 fc->pending_exe_flush = 0;
56
57         diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
58         ge_last_exe = diff < driver->wrap_diff;
59
60         if (ge_last_exe)
61                 fc->pending_flush &= ~type;
62
63         if (is_exe && ge_last_exe) {
64                 fc->last_exe_flush = sequence;
65         }
66
67         if (list_empty(&fc->ring))
68                 return;
69
70         list_for_each_entry(fence, &fc->ring, ring) {
71                 diff = (sequence - fence->sequence) & driver->sequence_mask;
72                 if (diff > driver->wrap_diff) {
73                         found = 1;
74                         break;
75                 }
76         }
77
78         head = (found) ? &fence->ring : &fc->ring;
79
80         list_for_each_entry_safe_reverse(fence, next, head, ring) {
81                 if (&fence->ring == &fc->ring)
82                         break;
83
84                 type |= fence->native_type;
85                 relevant = type & fence->type;
86
87                 if ((fence->signaled | relevant) != fence->signaled) {
88                         fence->signaled |= relevant;
89                         DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
90                                   fence->base.hash.key, fence->signaled);
91                         fence->submitted_flush |= relevant;
92                         wake = 1;
93                 }
94
95                 relevant = fence->flush_mask &
96                     ~(fence->signaled | fence->submitted_flush);
97
98                 if (relevant) {
99                         fc->pending_flush |= relevant;
100                         fence->submitted_flush = fence->flush_mask;
101                 }
102
103                 if (!(fence->type & ~fence->signaled)) {
104                         DRM_DEBUG("Fence completely signaled 0x%08lx\n",
105                                   fence->base.hash.key);
106                         list_del_init(&fence->ring);
107                 }
108         }
109
110         if (wake) {
111                 DRM_WAKEUP(&fc->fence_queue);
112         }
113 }
114
115 EXPORT_SYMBOL(drm_fence_handler);
116
117 static void drm_fence_unring(struct drm_device * dev, struct list_head *ring)
118 {
119         struct drm_fence_manager *fm = &dev->fm;
120         unsigned long flags;
121
122         write_lock_irqsave(&fm->lock, flags);
123         list_del_init(ring);
124         write_unlock_irqrestore(&fm->lock, flags);
125 }
126
127 void drm_fence_usage_deref_locked(struct drm_fence_object ** fence)
128 {
129         struct drm_fence_object *tmp_fence = *fence;
130         struct drm_device *dev = tmp_fence->dev;
131         struct drm_fence_manager *fm = &dev->fm;
132
133         DRM_ASSERT_LOCKED(&dev->struct_mutex);
134         *fence = NULL;
135         if (atomic_dec_and_test(&tmp_fence->usage)) {
136                 drm_fence_unring(dev, &tmp_fence->ring);
137                 DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
138                           tmp_fence->base.hash.key);
139                 atomic_dec(&fm->count);
140                 BUG_ON(!list_empty(&tmp_fence->base.list));
141                 drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
142         }
143 }
144
145 void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)
146 {
147         struct drm_fence_object *tmp_fence = *fence;
148         struct drm_device *dev = tmp_fence->dev;
149         struct drm_fence_manager *fm = &dev->fm;
150
151         *fence = NULL;
152         if (atomic_dec_and_test(&tmp_fence->usage)) {
153                 mutex_lock(&dev->struct_mutex);
154                 if (atomic_read(&tmp_fence->usage) == 0) {
155                         drm_fence_unring(dev, &tmp_fence->ring);
156                         atomic_dec(&fm->count);
157                         BUG_ON(!list_empty(&tmp_fence->base.list));
158                         drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
159                 }
160                 mutex_unlock(&dev->struct_mutex);
161         }
162 }
163
164 struct drm_fence_object
165 *drm_fence_reference_locked(struct drm_fence_object *src)
166 {
167         DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
168
169         atomic_inc(&src->usage);
170         return src;
171 }
172
173 void drm_fence_reference_unlocked(struct drm_fence_object **dst,
174                                   struct drm_fence_object *src)
175 {
176         mutex_lock(&src->dev->struct_mutex);
177         *dst = src;
178         atomic_inc(&src->usage);
179         mutex_unlock(&src->dev->struct_mutex);
180 }
181
182
183 static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base)
184 {
185         struct drm_fence_object *fence =
186             drm_user_object_entry(base, struct drm_fence_object, base);
187
188         drm_fence_usage_deref_locked(&fence);
189 }
190
191 int drm_fence_object_signaled(struct drm_fence_object * fence,
192                               uint32_t mask, int poke_flush)
193 {
194         unsigned long flags;
195         int signaled;
196         struct drm_device *dev = fence->dev;
197         struct drm_fence_manager *fm = &dev->fm;
198         struct drm_fence_driver *driver = dev->driver->fence_driver;
199
200         if (poke_flush)
201                 driver->poke_flush(dev, fence->class);
202         read_lock_irqsave(&fm->lock, flags);
203         signaled =
204             (fence->type & mask & fence->signaled) == (fence->type & mask);
205         read_unlock_irqrestore(&fm->lock, flags);
206
207         return signaled;
208 }
209
210 static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,
211                                 struct drm_fence_driver * driver, uint32_t sequence)
212 {
213         uint32_t diff;
214
215         if (!fc->pending_exe_flush) {
216                 fc->exe_flush_sequence = sequence;
217                 fc->pending_exe_flush = 1;
218         } else {
219                 diff =
220                     (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
221                 if (diff < driver->wrap_diff) {
222                         fc->exe_flush_sequence = sequence;
223                 }
224         }
225 }
226
227 int drm_fence_object_flush(struct drm_fence_object * fence,
228                            uint32_t type)
229 {
230         struct drm_device *dev = fence->dev;
231         struct drm_fence_manager *fm = &dev->fm;
232         struct drm_fence_class_manager *fc = &fm->class[fence->class];
233         struct drm_fence_driver *driver = dev->driver->fence_driver;
234         unsigned long flags;
235
236         if (type & ~fence->type) {
237                 DRM_ERROR("Flush trying to extend fence type, "
238                           "0x%x, 0x%x\n", type, fence->type);
239                 return -EINVAL;
240         }
241
242         write_lock_irqsave(&fm->lock, flags);
243         fence->flush_mask |= type;
244         if (fence->submitted_flush == fence->signaled) {
245                 if ((fence->type & DRM_FENCE_TYPE_EXE) &&
246                     !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
247                         drm_fence_flush_exe(fc, driver, fence->sequence);
248                         fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
249                 } else {
250                         fc->pending_flush |= (fence->flush_mask &
251                                               ~fence->submitted_flush);
252                         fence->submitted_flush = fence->flush_mask;
253                 }
254         }
255         write_unlock_irqrestore(&fm->lock, flags);
256         driver->poke_flush(dev, fence->class);
257         return 0;
258 }
259
260 /*
261  * Make sure old fence objects are signaled before their fence sequences are
262  * wrapped around and reused.
263  */
264
265 void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t sequence)
266 {
267         struct drm_fence_manager *fm = &dev->fm;
268         struct drm_fence_class_manager *fc = &fm->class[class];
269         struct drm_fence_driver *driver = dev->driver->fence_driver;
270         uint32_t old_sequence;
271         unsigned long flags;
272         struct drm_fence_object *fence;
273         uint32_t diff;
274
275         write_lock_irqsave(&fm->lock, flags);
276         old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
277         diff = (old_sequence - fc->last_exe_flush) & driver->sequence_mask;
278
279         if ((diff < driver->wrap_diff) && !fc->pending_exe_flush) {
280                 fc->pending_exe_flush = 1;
281                 fc->exe_flush_sequence = sequence - (driver->flush_diff / 2);
282         }
283         write_unlock_irqrestore(&fm->lock, flags);
284
285         mutex_lock(&dev->struct_mutex);
286         read_lock_irqsave(&fm->lock, flags);
287
288         if (list_empty(&fc->ring)) {
289                 read_unlock_irqrestore(&fm->lock, flags);
290                 mutex_unlock(&dev->struct_mutex);
291                 return;
292         }
293         fence = drm_fence_reference_locked(list_entry(fc->ring.next, struct drm_fence_object, ring));
294         mutex_unlock(&dev->struct_mutex);
295         diff = (old_sequence - fence->sequence) & driver->sequence_mask;
296         read_unlock_irqrestore(&fm->lock, flags);
297         if (diff < driver->wrap_diff) {
298                 drm_fence_object_flush(fence, fence->type);
299         }
300         drm_fence_usage_deref_unlocked(&fence);
301 }
302
303 EXPORT_SYMBOL(drm_fence_flush_old);
304
305 static int drm_fence_lazy_wait(struct drm_fence_object *fence,
306                                int ignore_signals,
307                                uint32_t mask)
308 {
309         struct drm_device *dev = fence->dev;
310         struct drm_fence_manager *fm = &dev->fm;
311         struct drm_fence_class_manager *fc = &fm->class[fence->class];
312         int signaled;
313         unsigned long _end = jiffies + 3*DRM_HZ;
314         int ret = 0;
315
316         do {
317                 DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
318                             (signaled = drm_fence_object_signaled(fence, mask, 1)));
319                 if (signaled)
320                         return 0;
321                 if (time_after_eq(jiffies, _end))
322                         break;
323         } while (ret == -EINTR && ignore_signals);
324         if (drm_fence_object_signaled(fence, mask, 0))
325                 return 0;
326         if (time_after_eq(jiffies, _end))
327                 ret = -EBUSY;
328         if (ret) {
329                 if (ret == -EBUSY) {
330                         DRM_ERROR("Fence timeout. "
331                                   "GPU lockup or fence driver was "
332                                   "taken down.\n");
333                 }
334                 return ((ret == -EINTR) ? -EAGAIN : ret);
335         }
336         return 0;
337 }
338
339 int drm_fence_object_wait(struct drm_fence_object * fence,
340                           int lazy, int ignore_signals, uint32_t mask)
341 {
342         struct drm_device *dev = fence->dev;
343         struct drm_fence_driver *driver = dev->driver->fence_driver;
344         int ret = 0;
345         unsigned long _end;
346         int signaled;
347
348         if (mask & ~fence->type) {
349                 DRM_ERROR("Wait trying to extend fence type"
350                           " 0x%08x 0x%08x\n", mask, fence->type);
351                 return -EINVAL;
352         }
353
354         if (drm_fence_object_signaled(fence, mask, 0))
355                 return 0;
356
357         _end = jiffies + 3 * DRM_HZ;
358
359         drm_fence_object_flush(fence, mask);
360
361         if (lazy && driver->lazy_capable) {
362
363                 ret = drm_fence_lazy_wait(fence, ignore_signals, mask);
364                 if (ret)
365                         return ret;
366
367         } else {
368
369                 if (driver->has_irq(dev, fence->class,
370                                     DRM_FENCE_TYPE_EXE)) {
371                         ret = drm_fence_lazy_wait(fence, ignore_signals,
372                                                   DRM_FENCE_TYPE_EXE);
373                         if (ret)
374                                 return ret;
375                 }
376
377                 if (driver->has_irq(dev, fence->class,
378                                     mask & ~DRM_FENCE_TYPE_EXE)) {
379                         ret = drm_fence_lazy_wait(fence, ignore_signals,
380                                                   mask);
381                         if (ret)
382                                 return ret;
383                 }
384         }
385         if (drm_fence_object_signaled(fence, mask, 0))
386                 return 0;
387
388         /*
389          * Avoid kernel-space busy-waits.
390          */
391 #if 1
392         if (!ignore_signals)
393                 return -EAGAIN;
394 #endif
395         do {
396                 schedule();
397                 signaled = drm_fence_object_signaled(fence, mask, 1);
398         } while (!signaled && !time_after_eq(jiffies, _end));
399
400         if (!signaled)
401                 return -EBUSY;
402
403         return 0;
404 }
405
406 int drm_fence_object_emit(struct drm_fence_object * fence,
407                           uint32_t fence_flags, uint32_t class, uint32_t type)
408 {
409         struct drm_device *dev = fence->dev;
410         struct drm_fence_manager *fm = &dev->fm;
411         struct drm_fence_driver *driver = dev->driver->fence_driver;
412         struct drm_fence_class_manager *fc = &fm->class[fence->class];
413         unsigned long flags;
414         uint32_t sequence;
415         uint32_t native_type;
416         int ret;
417
418         drm_fence_unring(dev, &fence->ring);
419         ret = driver->emit(dev, class, fence_flags, &sequence, &native_type);
420         if (ret)
421                 return ret;
422
423         write_lock_irqsave(&fm->lock, flags);
424         fence->class = class;
425         fence->type = type;
426         fence->flush_mask = 0x00;
427         fence->submitted_flush = 0x00;
428         fence->signaled = 0x00;
429         fence->sequence = sequence;
430         fence->native_type = native_type;
431         if (list_empty(&fc->ring))
432                 fc->last_exe_flush = sequence - 1;
433         list_add_tail(&fence->ring, &fc->ring);
434         write_unlock_irqrestore(&fm->lock, flags);
435         return 0;
436 }
437
438 static int drm_fence_object_init(struct drm_device * dev, uint32_t class,
439                                  uint32_t type,
440                                  uint32_t fence_flags,
441                                  struct drm_fence_object * fence)
442 {
443         int ret = 0;
444         unsigned long flags;
445         struct drm_fence_manager *fm = &dev->fm;
446
447         mutex_lock(&dev->struct_mutex);
448         atomic_set(&fence->usage, 1);
449         mutex_unlock(&dev->struct_mutex);
450
451         write_lock_irqsave(&fm->lock, flags);
452         INIT_LIST_HEAD(&fence->ring);
453
454         /* 
455          *  Avoid hitting BUG() for kernel-only fence objects.
456          */
457
458         INIT_LIST_HEAD(&fence->base.list);
459         fence->class = class;
460         fence->type = type;
461         fence->flush_mask = 0;
462         fence->submitted_flush = 0;
463         fence->signaled = 0;
464         fence->sequence = 0;
465         fence->dev = dev;
466         write_unlock_irqrestore(&fm->lock, flags);
467         if (fence_flags & DRM_FENCE_FLAG_EMIT) {
468                 ret = drm_fence_object_emit(fence, fence_flags,
469                                             fence->class, type);
470         }
471         return ret;
472 }
473
474 int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence,
475                               int shareable)
476 {
477         struct drm_device *dev = priv->head->dev;
478         int ret;
479
480         mutex_lock(&dev->struct_mutex);
481         ret = drm_add_user_object(priv, &fence->base, shareable);
482         if (ret)
483                 goto out;
484         atomic_inc(&fence->usage);
485         fence->base.type = drm_fence_type;
486         fence->base.remove = &drm_fence_object_destroy;
487         DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
488 out:
489         mutex_unlock(&dev->struct_mutex);
490         return ret;
491 }
492 EXPORT_SYMBOL(drm_fence_add_user_object);
493
494 int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t type,
495                             unsigned flags, struct drm_fence_object ** c_fence)
496 {
497         struct drm_fence_object *fence;
498         int ret;
499         struct drm_fence_manager *fm = &dev->fm;
500
501         fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
502         if (!fence)
503                 return -ENOMEM;
504         ret = drm_fence_object_init(dev, class, type, flags, fence);
505         if (ret) {
506                 drm_fence_usage_deref_unlocked(&fence);
507                 return ret;
508         }
509         *c_fence = fence;
510         atomic_inc(&fm->count);
511
512         return 0;
513 }
514
515 EXPORT_SYMBOL(drm_fence_object_create);
516
517 void drm_fence_manager_init(struct drm_device * dev)
518 {
519         struct drm_fence_manager *fm = &dev->fm;
520         struct drm_fence_class_manager *class;
521         struct drm_fence_driver *fed = dev->driver->fence_driver;
522         int i;
523
524         rwlock_init(&fm->lock);
525         write_lock(&fm->lock);
526         fm->initialized = 0;
527         if (!fed)
528             goto out_unlock;
529
530         fm->initialized = 1;
531         fm->num_classes = fed->num_classes;
532         BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
533
534         for (i=0; i<fm->num_classes; ++i) {
535             class = &fm->class[i];
536
537             INIT_LIST_HEAD(&class->ring);
538             class->pending_flush = 0;
539             DRM_INIT_WAITQUEUE(&class->fence_queue);
540         }
541
542         atomic_set(&fm->count, 0);
543  out_unlock:
544         write_unlock(&fm->lock);
545 }
546
547 void drm_fence_manager_takedown(struct drm_device * dev)
548 {
549 }
550
551 struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle)
552 {
553         struct drm_device *dev = priv->head->dev;
554         struct drm_user_object *uo;
555         struct drm_fence_object *fence;
556
557         mutex_lock(&dev->struct_mutex);
558         uo = drm_lookup_user_object(priv, handle);
559         if (!uo || (uo->type != drm_fence_type)) {
560                 mutex_unlock(&dev->struct_mutex);
561                 return NULL;
562         }
563         fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
564         mutex_unlock(&dev->struct_mutex);
565         return fence;
566 }
567
568 int drm_fence_create_ioctl(DRM_IOCTL_ARGS)
569 {
570         DRM_DEVICE;
571         int ret;
572         struct drm_fence_manager *fm = &dev->fm;
573         struct drm_fence_arg arg;
574         struct drm_fence_object *fence;
575         unsigned long flags;
576         ret = 0;
577
578         if (!fm->initialized) {
579                 DRM_ERROR("The DRM driver does not support fencing.\n");
580                 return -EINVAL;
581         }
582
583         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
584         if (arg.flags & DRM_FENCE_FLAG_EMIT)
585                 LOCK_TEST_WITH_RETURN(dev, filp);
586         ret = drm_fence_object_create(dev, arg.class,
587                                       arg.type, arg.flags, &fence);
588         if (ret)
589                 return ret;
590         ret = drm_fence_add_user_object(priv, fence,
591                                         arg.flags &
592                                         DRM_FENCE_FLAG_SHAREABLE);
593         if (ret) {
594                 drm_fence_usage_deref_unlocked(&fence);
595                 return ret;
596         }
597         
598         /*
599          * usage > 0. No need to lock dev->struct_mutex;
600          */
601         
602         atomic_inc(&fence->usage);
603         arg.handle = fence->base.hash.key;
604
605         read_lock_irqsave(&fm->lock, flags);
606         arg.class = fence->class;
607         arg.type = fence->type;
608         arg.signaled = fence->signaled;
609         read_unlock_irqrestore(&fm->lock, flags);
610         drm_fence_usage_deref_unlocked(&fence);
611
612         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
613         return ret;
614 }
615
616 int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS)
617 {
618         DRM_DEVICE;
619         int ret;
620         struct drm_fence_manager *fm = &dev->fm;
621         struct drm_fence_arg arg;
622         struct drm_user_object *uo;
623         ret = 0;
624
625         if (!fm->initialized) {
626                 DRM_ERROR("The DRM driver does not support fencing.\n");
627                 return -EINVAL;
628         }
629
630         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
631
632         mutex_lock(&dev->struct_mutex);
633         uo = drm_lookup_user_object(priv, arg.handle);
634         if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
635                 mutex_unlock(&dev->struct_mutex);
636                 return -EINVAL;
637         }
638         ret = drm_remove_user_object(priv, uo);
639         mutex_unlock(&dev->struct_mutex);
640         return ret;
641 }
642
643
644 int drm_fence_reference_ioctl(DRM_IOCTL_ARGS)
645 {
646         DRM_DEVICE;
647         int ret;
648         struct drm_fence_manager *fm = &dev->fm;
649         struct drm_fence_arg arg;
650         struct drm_fence_object *fence;
651         struct drm_user_object *uo;
652         unsigned long flags;
653         ret = 0;
654
655         if (!fm->initialized) {
656                 DRM_ERROR("The DRM driver does not support fencing.\n");
657                 return -EINVAL;
658         }
659
660         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
661         ret = drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
662         if (ret)
663                 return ret;
664         fence = drm_lookup_fence_object(priv, arg.handle);
665
666         read_lock_irqsave(&fm->lock, flags);
667         arg.class = fence->class;
668         arg.type = fence->type;
669         arg.signaled = fence->signaled;
670         read_unlock_irqrestore(&fm->lock, flags);
671         drm_fence_usage_deref_unlocked(&fence);
672
673         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
674         return ret;
675 }
676
677
678 int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS)
679 {
680         DRM_DEVICE;
681         int ret;
682         struct drm_fence_manager *fm = &dev->fm;
683         struct drm_fence_arg arg;
684         ret = 0;
685
686         if (!fm->initialized) {
687                 DRM_ERROR("The DRM driver does not support fencing.\n");
688                 return -EINVAL;
689         }
690
691         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
692         return drm_user_object_unref(priv, arg.handle, drm_fence_type);
693 }
694
695 int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS)
696 {
697         DRM_DEVICE;
698         int ret;
699         struct drm_fence_manager *fm = &dev->fm;
700         struct drm_fence_arg arg;
701         struct drm_fence_object *fence;
702         unsigned long flags;
703         ret = 0;
704
705         if (!fm->initialized) {
706                 DRM_ERROR("The DRM driver does not support fencing.\n");
707                 return -EINVAL;
708         }
709
710         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
711
712         fence = drm_lookup_fence_object(priv, arg.handle);
713         if (!fence)
714                 return -EINVAL;
715
716         read_lock_irqsave(&fm->lock, flags);
717         arg.class = fence->class;
718         arg.type = fence->type;
719         arg.signaled = fence->signaled;
720         read_unlock_irqrestore(&fm->lock, flags);
721         drm_fence_usage_deref_unlocked(&fence);
722
723         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
724         return ret;
725 }
726
727 int drm_fence_flush_ioctl(DRM_IOCTL_ARGS)
728 {
729         DRM_DEVICE;
730         int ret;
731         struct drm_fence_manager *fm = &dev->fm;
732         struct drm_fence_arg arg;
733         struct drm_fence_object *fence;
734         unsigned long flags;
735         ret = 0;
736
737         if (!fm->initialized) {
738                 DRM_ERROR("The DRM driver does not support fencing.\n");
739                 return -EINVAL;
740         }
741
742         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
743
744         fence = drm_lookup_fence_object(priv, arg.handle);
745         if (!fence)
746                 return -EINVAL;
747         ret = drm_fence_object_flush(fence, arg.type);
748
749         read_lock_irqsave(&fm->lock, flags);
750         arg.class = fence->class;
751         arg.type = fence->type;
752         arg.signaled = fence->signaled;
753         read_unlock_irqrestore(&fm->lock, flags);
754         drm_fence_usage_deref_unlocked(&fence);
755
756         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
757         return ret;
758 }
759
760
761 int drm_fence_wait_ioctl(DRM_IOCTL_ARGS)
762 {
763         DRM_DEVICE;
764         int ret;
765         struct drm_fence_manager *fm = &dev->fm;
766         struct drm_fence_arg arg;
767         struct drm_fence_object *fence;
768         unsigned long flags;
769         ret = 0;
770
771         if (!fm->initialized) {
772                 DRM_ERROR("The DRM driver does not support fencing.\n");
773                 return -EINVAL;
774         }
775
776         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
777
778         fence = drm_lookup_fence_object(priv, arg.handle);
779         if (!fence)
780                 return -EINVAL;
781         ret = drm_fence_object_wait(fence,
782                                     arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
783                                     0, arg.type);
784
785         read_lock_irqsave(&fm->lock, flags);
786         arg.class = fence->class;
787         arg.type = fence->type;
788         arg.signaled = fence->signaled;
789         read_unlock_irqrestore(&fm->lock, flags);
790         drm_fence_usage_deref_unlocked(&fence);
791
792         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
793         return ret;
794 }
795
796
797 int drm_fence_emit_ioctl(DRM_IOCTL_ARGS)
798 {
799         DRM_DEVICE;
800         int ret;
801         struct drm_fence_manager *fm = &dev->fm;
802         struct drm_fence_arg arg;
803         struct drm_fence_object *fence;
804         unsigned long flags;
805         ret = 0;
806
807         if (!fm->initialized) {
808                 DRM_ERROR("The DRM driver does not support fencing.\n");
809                 return -EINVAL;
810         }
811
812         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
813
814         LOCK_TEST_WITH_RETURN(dev, filp);
815         fence = drm_lookup_fence_object(priv, arg.handle);
816         if (!fence)
817                 return -EINVAL;
818         ret = drm_fence_object_emit(fence, arg.flags, arg.class,
819                                     arg.type);
820
821         read_lock_irqsave(&fm->lock, flags);
822         arg.class = fence->class;
823         arg.type = fence->type;
824         arg.signaled = fence->signaled;
825         read_unlock_irqrestore(&fm->lock, flags);
826         drm_fence_usage_deref_unlocked(&fence);
827
828         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
829         return ret;
830 }
831
832 int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS)
833 {
834         DRM_DEVICE;
835         int ret;
836         struct drm_fence_manager *fm = &dev->fm;
837         struct drm_fence_arg arg;
838         struct drm_fence_object *fence;
839         unsigned long flags;
840         ret = 0;
841
842         if (!fm->initialized) {
843                 DRM_ERROR("The DRM driver does not support fencing.\n");
844                 return -EINVAL;
845         }
846
847         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
848
849         if (!dev->bm.initialized) {
850                 DRM_ERROR("Buffer object manager is not initialized\n");
851                 return -EINVAL;
852         }
853         LOCK_TEST_WITH_RETURN(dev, filp);
854         ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
855                                        NULL, &fence);
856         if (ret)
857                 return ret;
858         ret = drm_fence_add_user_object(priv, fence,
859                                         arg.flags &
860                                         DRM_FENCE_FLAG_SHAREABLE);
861         if (ret)
862                 return ret;
863         atomic_inc(&fence->usage);
864         arg.handle = fence->base.hash.key;
865
866         read_lock_irqsave(&fm->lock, flags);
867         arg.class = fence->class;
868         arg.type = fence->type;
869         arg.signaled = fence->signaled;
870         read_unlock_irqrestore(&fm->lock, flags);
871         drm_fence_usage_deref_unlocked(&fence);
872
873         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
874         return ret;
875 }