More verbose error reporting in some cases.
[platform/upstream/libdrm.git] / linux-core / drm_fence.c
1 /**************************************************************************
2  * 
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  * 
26  * 
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30  */
31
32 #include "drmP.h"
33
34 /*
35  * Typically called by the IRQ handler.
36  */
37
38 void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
39 {
40         int wake = 0;
41         uint32_t diff;
42         uint32_t relevant;
43         drm_fence_manager_t *fm = &dev->fm;
44         drm_fence_driver_t *driver = dev->driver->fence_driver;
45         struct list_head *list, *prev;
46         drm_fence_object_t *fence;
47
48         list_for_each_entry(fence, &fm->ring, ring) {
49                 diff = (sequence - fence->sequence) & driver->sequence_mask;
50                 if (diff > driver->wrap_diff)
51                         break;
52         }
53
54         list = fence->ring.prev;
55         prev = list->prev;
56
57         for (; list != &fm->ring; list = prev, prev = list->prev) {
58                 fence = list_entry(list, drm_fence_object_t, ring);
59
60                 type |= fence->native_type;
61                 relevant = type & fence->type;
62
63                 if ((fence->signaled | relevant) != fence->signaled) {
64                         fence->signaled |= relevant;
65                         DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
66                                   fence->base.hash.key, fence->signaled);
67                         fence->submitted_flush |= relevant;
68                         wake = 1;
69                 }
70
71                 relevant = fence->flush_mask &
72                     ~(fence->signaled | fence->submitted_flush);
73
74                 if (relevant) {
75                         fm->pending_flush |= relevant;
76                         fence->submitted_flush = fence->flush_mask;
77                 }
78
79                 if (!(fence->type & ~fence->signaled)) {
80                         DRM_DEBUG("Fence completely signaled 0x%08lx\n",
81                                   fence->base.hash.key);
82                         list_del_init(&fence->ring);
83                 }
84
85         }
86                 
87         if (wake) {
88                 DRM_WAKEUP(&fm->fence_queue);
89         }
90 }
91
92 EXPORT_SYMBOL(drm_fence_handler);
93
94 static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
95 {
96         drm_fence_manager_t *fm = &dev->fm;
97         unsigned long flags;
98
99         write_lock_irqsave(&fm->lock, flags);
100         list_del_init(ring);
101         write_unlock_irqrestore(&fm->lock, flags);
102 }
103
104 void drm_fence_usage_deref_locked(drm_device_t * dev,
105                                   drm_fence_object_t * fence)
106 {
107         if (atomic_dec_and_test(&fence->usage)) {
108                 drm_fence_unring(dev, &fence->ring);
109                 DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
110                           fence->base.hash.key);
111                 kmem_cache_free(drm_cache.fence_object, fence);
112         }
113 }
114
115 void drm_fence_usage_deref_unlocked(drm_device_t * dev,
116                                     drm_fence_object_t * fence)
117 {
118         if (atomic_dec_and_test(&fence->usage)) {
119                 mutex_lock(&dev->struct_mutex);
120                 if (atomic_read(&fence->usage) == 0) {
121                         drm_fence_unring(dev, &fence->ring);
122                         kmem_cache_free(drm_cache.fence_object, fence);
123                 }
124                 mutex_unlock(&dev->struct_mutex);
125         }
126 }
127
128 static void drm_fence_object_destroy(drm_file_t * priv,
129                                      drm_user_object_t * base)
130 {
131         drm_device_t *dev = priv->head->dev;
132         drm_fence_object_t *fence =
133             drm_user_object_entry(base, drm_fence_object_t, base);
134
135         drm_fence_usage_deref_locked(dev, fence);
136 }
137
138 static int fence_signaled(drm_device_t * dev, drm_fence_object_t * fence,
139                           uint32_t mask, int poke_flush)
140 {
141         unsigned long flags;
142         int signaled;
143         drm_fence_manager_t *fm = &dev->fm;
144         drm_fence_driver_t *driver = dev->driver->fence_driver;
145
146         if (poke_flush)
147                 driver->poke_flush(dev);
148         read_lock_irqsave(&fm->lock, flags);
149         signaled =
150             (fence->type & mask & fence->signaled) == (fence->type & mask);
151         read_unlock_irqrestore(&fm->lock, flags);
152
153         return signaled;
154 }
155
156 static void drm_fence_flush_exe(drm_fence_manager_t * fm,
157                                 drm_fence_driver_t * driver, uint32_t sequence)
158 {
159         uint32_t diff;
160
161         if (!fm->pending_exe_flush) {
162                 struct list_head *list;
163
164                 /*
165                  * Last_exe_flush is invalid. Find oldest sequence.
166                  */
167
168 /*              list = fm->fence_types[_DRM_FENCE_TYPE_EXE];*/
169                 list = &fm->ring;
170                 if (list->next == &fm->ring) {
171                         return;
172                 } else {
173                         drm_fence_object_t *fence =
174                             list_entry(list->next, drm_fence_object_t, ring);
175                         fm->last_exe_flush = (fence->sequence - 1) &
176                             driver->sequence_mask;
177                 }
178                 diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
179                 if (diff >= driver->wrap_diff)
180                         return;
181                 fm->exe_flush_sequence = sequence;
182                 fm->pending_exe_flush = 1;
183         } else {
184                 diff =
185                     (sequence - fm->exe_flush_sequence) & driver->sequence_mask;
186                 if (diff < driver->wrap_diff) {
187                         fm->exe_flush_sequence = sequence;
188                 }
189         }
190 }
191
192 int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type)
193 {
194         return ((fence->signaled & type) == type);
195 }
196
197 /*
198  * Make sure old fence objects are signaled before their fence sequences are
199  * wrapped around and reused.
200  */
201
202 int drm_fence_object_flush(drm_device_t * dev,
203                            drm_fence_object_t * fence, uint32_t type)
204 {
205         drm_fence_manager_t *fm = &dev->fm;
206         drm_fence_driver_t *driver = dev->driver->fence_driver;
207         unsigned long flags;
208
209         if (type & ~fence->type) {
210                 DRM_ERROR("Flush trying to extend fence type, "
211                           "0x%x, 0x%x\n", type, fence->type);
212                 return -EINVAL;
213         }
214
215         write_lock_irqsave(&fm->lock, flags);
216         fence->flush_mask |= type;
217         if (fence->submitted_flush == fence->signaled) {
218                 if ((fence->type & DRM_FENCE_TYPE_EXE) &&
219                     !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
220                         drm_fence_flush_exe(fm, driver, fence->sequence);
221                         fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
222                 } else {
223                         fm->pending_flush |= (fence->flush_mask &
224                                               ~fence->submitted_flush);
225                         fence->submitted_flush = fence->flush_mask;
226                 }
227         }
228         write_unlock_irqrestore(&fm->lock, flags);
229         driver->poke_flush(dev);
230         return 0;
231 }
232
233 void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
234 {
235         drm_fence_manager_t *fm = &dev->fm;
236         drm_fence_driver_t *driver = dev->driver->fence_driver;
237         uint32_t old_sequence;
238         unsigned long flags;
239         drm_fence_object_t *fence;
240         uint32_t diff;
241
242         mutex_lock(&dev->struct_mutex);
243         read_lock_irqsave(&fm->lock, flags);
244         if (fm->ring.next == &fm->ring) {
245                 read_unlock_irqrestore(&fm->lock, flags);
246                 mutex_unlock(&dev->struct_mutex);
247                 return;
248         }
249         old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
250         fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
251         atomic_inc(&fence->usage);
252         mutex_unlock(&dev->struct_mutex);
253         diff = (old_sequence - fence->sequence) & driver->sequence_mask;
254         read_unlock_irqrestore(&fm->lock, flags);
255         if (diff < driver->wrap_diff) {
256                 drm_fence_object_flush(dev, fence, fence->type);
257         }
258         drm_fence_usage_deref_unlocked(dev, fence);
259 }
260
261 EXPORT_SYMBOL(drm_fence_flush_old);
262
263 int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence,
264                           int lazy, int ignore_signals, uint32_t mask)
265 {
266         drm_fence_manager_t *fm = &dev->fm;
267         drm_fence_driver_t *driver = dev->driver->fence_driver;
268         int ret = 0;
269         unsigned long _end;
270         int signaled;
271
272         if (mask & ~fence->type) {
273                 DRM_ERROR("Wait trying to extend fence type"
274                           " 0x%08x 0x%08x\n", mask, fence->type);
275                 return -EINVAL;
276         }
277
278         if (fence_signaled(dev, fence, mask, 0))
279                 return 0;
280
281         _end = jiffies + 3 * DRM_HZ;
282
283         drm_fence_object_flush(dev, fence, mask);
284
285         if (lazy && driver->lazy_capable) {
286
287                 do {
288                         DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
289                                     fence_signaled(dev, fence, mask, 1));
290                         if (time_after_eq(jiffies, _end))
291                                 break;
292                 } while (ret == -EINTR && ignore_signals);
293                 if (time_after_eq(jiffies, _end) && (ret != 0))
294                         ret = -EBUSY;
295                 if (ret)
296                         return ((ret == -EINTR) ? -EAGAIN : ret);
297
298         } else if ((fence->class == 0) && (mask & DRM_FENCE_TYPE_EXE) &&
299                    driver->lazy_capable) {
300
301                 /*
302                  * We use IRQ wait for EXE fence if available to gain 
303                  * CPU in some cases.
304                  */
305
306                 do {
307                         DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
308                                     fence_signaled(dev, fence, DRM_FENCE_TYPE_EXE,
309                                                    1));
310                         if (time_after_eq(jiffies, _end))
311                                 break;
312                 } while (ret == -EINTR && ignore_signals);
313                 if (time_after_eq(jiffies, _end) && (ret != 0))
314                         ret = -EBUSY;
315                 if (ret)
316                         return ((ret == -EINTR) ? -EAGAIN : ret);
317         }
318
319         if (fence_signaled(dev, fence, mask, 0))
320                 return 0;
321
322         /*
323          * Avoid kernel-space busy-waits.
324          */
325 #if 1
326         if (!ignore_signals)
327                 return -EAGAIN;
328 #endif
329         do { 
330                 schedule();
331                 signaled = fence_signaled(dev, fence, mask, 1);
332         } while (!signaled && !time_after_eq(jiffies, _end));
333
334         if (!signaled)
335                 return -EBUSY;
336
337         return 0;
338 }
339
340 int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
341                           uint32_t fence_flags, uint32_t type)
342 {
343         drm_fence_manager_t *fm = &dev->fm;
344         drm_fence_driver_t *driver = dev->driver->fence_driver;
345         unsigned long flags;
346         uint32_t sequence;
347         uint32_t native_type;
348         int ret;
349
350         drm_fence_unring(dev, &fence->ring);
351         ret = driver->emit(dev, fence_flags, &sequence, &native_type);
352         if (ret)
353                 return ret;
354
355         write_lock_irqsave(&fm->lock, flags);
356         fence->type = type;
357         fence->flush_mask = 0x00;
358         fence->submitted_flush = 0x00;
359         fence->signaled = 0x00;
360         fence->sequence = sequence;
361         fence->native_type = native_type;
362         list_add_tail(&fence->ring, &fm->ring);
363         write_unlock_irqrestore(&fm->lock, flags);
364         return 0;
365 }
366
367 static int drm_fence_object_init(drm_device_t * dev, uint32_t type, 
368                                  uint32_t fence_flags,
369                                  drm_fence_object_t * fence)
370 {
371         int ret = 0;
372         unsigned long flags;
373         drm_fence_manager_t *fm = &dev->fm;
374
375         mutex_lock(&dev->struct_mutex);
376         atomic_set(&fence->usage, 1);
377         mutex_unlock(&dev->struct_mutex);
378
379         write_lock_irqsave(&fm->lock, flags);
380         INIT_LIST_HEAD(&fence->ring);
381         fence->class = 0;
382         fence->type = type;
383         fence->flush_mask = 0;
384         fence->submitted_flush = 0;
385         fence->signaled = 0;
386         fence->sequence = 0;
387         write_unlock_irqrestore(&fm->lock, flags);
388         if (fence_flags & DRM_FENCE_FLAG_EMIT) {
389                 ret = drm_fence_object_emit(dev, fence, fence_flags, type);
390         }
391         return ret;
392 }
393
394
395 int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
396                               int shareable)
397 {
398         drm_device_t *dev = priv->head->dev;
399         int ret;
400
401         mutex_lock(&dev->struct_mutex);
402         ret = drm_add_user_object(priv, &fence->base, shareable);
403         mutex_unlock(&dev->struct_mutex);
404         if (ret)
405                 return ret;
406         fence->base.type = drm_fence_type;
407         fence->base.remove = &drm_fence_object_destroy;
408         DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
409         return 0;
410 }
411
412 EXPORT_SYMBOL(drm_fence_add_user_object);
413
414 int drm_fence_object_create(drm_device_t * dev, uint32_t type,
415                             unsigned flags, drm_fence_object_t ** c_fence)
416 {
417         drm_fence_object_t *fence;
418         int ret;
419
420         fence = kmem_cache_alloc(drm_cache.fence_object, GFP_KERNEL);
421         if (!fence)
422                 return -ENOMEM;
423         ret = drm_fence_object_init(dev, type, flags, fence);
424         if (ret) {
425                 drm_fence_usage_deref_unlocked(dev, fence);
426                 return ret;
427         }
428         *c_fence = fence;
429         return 0;
430 }
431
432 EXPORT_SYMBOL(drm_fence_object_create);
433
434 void drm_fence_manager_init(drm_device_t * dev)
435 {
436         drm_fence_manager_t *fm = &dev->fm;
437         drm_fence_driver_t *fed = dev->driver->fence_driver;
438         int i;
439
440         fm->lock = RW_LOCK_UNLOCKED;
441         INIT_LIST_HEAD(&fm->ring);
442         fm->pending_flush = 0;
443         DRM_INIT_WAITQUEUE(&fm->fence_queue);
444         fm->initialized = 0;
445         if (fed) {
446                 fm->initialized = 1;
447                 for (i = 0; i < fed->no_types; ++i) {
448                         fm->fence_types[i] = &fm->ring;
449                 }
450         }
451 }
452
453 void drm_fence_manager_takedown(drm_device_t * dev)
454 {
455 }
456
457 drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
458 {
459         drm_device_t *dev = priv->head->dev;
460         drm_user_object_t *uo;
461         drm_fence_object_t *fence;
462
463         mutex_lock(&dev->struct_mutex);
464         uo = drm_lookup_user_object(priv, handle);
465         if (!uo || (uo->type != drm_fence_type)) {
466                 mutex_unlock(&dev->struct_mutex);
467                 return NULL;
468         }
469         fence = drm_user_object_entry(uo, drm_fence_object_t, base);
470         atomic_inc(&fence->usage);
471         mutex_unlock(&dev->struct_mutex);
472         return fence;
473 }
474
475 int drm_fence_ioctl(DRM_IOCTL_ARGS)
476 {
477         DRM_DEVICE;
478         int ret;
479         drm_fence_manager_t *fm = &dev->fm;
480         drm_fence_arg_t arg;
481         drm_fence_object_t *fence;
482         drm_user_object_t *uo;
483         unsigned long flags;
484         ret = 0;
485
486         if (!fm->initialized) {
487                 DRM_ERROR("The DRM driver does not support fencing.\n");
488                 return -EINVAL;
489         }
490
491         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
492         switch (arg.op) {
493         case drm_fence_create:
494                 if (arg.flags & DRM_FENCE_FLAG_EMIT)
495                         LOCK_TEST_WITH_RETURN(dev, filp);
496                 ret = drm_fence_object_create(dev, arg.type,
497                                               arg.flags,
498                                               &fence);
499                 if (ret)
500                         return ret;
501                 ret = drm_fence_add_user_object(priv, fence,
502                                                 arg.flags &
503                                                 DRM_FENCE_FLAG_SHAREABLE);
504                 if (ret) {
505                         drm_fence_usage_deref_unlocked(dev, fence);
506                         return ret;
507                 }
508
509                 /*
510                  * usage > 0. No need to lock dev->struct_mutex;
511                  */
512
513                 atomic_inc(&fence->usage);
514                 arg.handle = fence->base.hash.key;
515                 break;
516         case drm_fence_destroy:
517                 mutex_lock(&dev->struct_mutex);
518                 uo = drm_lookup_user_object(priv, arg.handle);
519                 if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
520                         mutex_unlock(&dev->struct_mutex);
521                         return -EINVAL;
522                 }
523                 ret = drm_remove_user_object(priv, uo);
524                 mutex_unlock(&dev->struct_mutex);
525                 return ret;
526         case drm_fence_reference:
527                 ret =
528                     drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
529                 if (ret)
530                         return ret;
531                 fence = drm_lookup_fence_object(priv, arg.handle);
532                 break;
533         case drm_fence_unreference:
534                 ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
535                 return ret;
536         case drm_fence_signaled:
537                 fence = drm_lookup_fence_object(priv, arg.handle);
538                 if (!fence)
539                         return -EINVAL;
540                 break;
541         case drm_fence_flush:
542                 fence = drm_lookup_fence_object(priv, arg.handle);
543                 if (!fence)
544                         return -EINVAL;
545                 ret = drm_fence_object_flush(dev, fence, arg.type);
546                 break;
547         case drm_fence_wait:
548                 fence = drm_lookup_fence_object(priv, arg.handle);
549                 if (!fence)
550                         return -EINVAL;
551                 ret =
552                     drm_fence_object_wait(dev, fence,
553                                           arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
554                                           0, arg.type);
555                 break;
556         case drm_fence_emit:
557                 LOCK_TEST_WITH_RETURN(dev, filp);
558                 fence = drm_lookup_fence_object(priv, arg.handle);
559                 if (!fence)
560                         return -EINVAL;
561                 ret = drm_fence_object_emit(dev, fence, arg.flags, arg.type);
562                 break;
563         case drm_fence_buffers:
564                 if (!dev->bm.initialized) {
565                         DRM_ERROR("Buffer object manager is not initialized\n");
566                         return -EINVAL;
567                 }
568                 LOCK_TEST_WITH_RETURN(dev, filp);
569                 ret = drm_fence_buffer_objects(priv, NULL, arg.flags, 
570                                                NULL, &fence);
571                 if (ret)
572                         return ret;
573                 ret = drm_fence_add_user_object(priv, fence,
574                                                 arg.flags &
575                                                 DRM_FENCE_FLAG_SHAREABLE);
576                 if (ret)
577                         return ret;
578                 atomic_inc(&fence->usage);
579                 arg.handle = fence->base.hash.key;
580                 break;
581         default:
582                 return -EINVAL;
583         }
584         read_lock_irqsave(&fm->lock, flags);
585         arg.class = fence->class;
586         arg.type = fence->type;
587         arg.signaled = fence->signaled;
588         read_unlock_irqrestore(&fm->lock, flags);
589         drm_fence_usage_deref_unlocked(dev, fence);
590
591         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
592         return ret;
593 }