Fix proc formatting broken by last commit.
[platform/upstream/libdrm.git] / linux-core / drm_fence.c
1 /**************************************************************************
2  * 
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  * 
26  * 
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30  */
31
32 #include "drmP.h"
33
34
35 /*
36  * Typically called by the IRQ handler.
37  */
38
39 void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
40 {
41         int wake = 0;
42         uint32_t diff;
43         uint32_t relevant;
44         drm_fence_manager_t *fm = &dev->fm;
45         drm_fence_driver_t *driver = dev->driver->fence_driver;
46         struct list_head *list, *prev;
47         drm_fence_object_t *fence;
48         int found = 0;
49
50         if (list_empty(&fm->ring))
51                 return;
52
53         list_for_each_entry(fence, &fm->ring, ring) {
54                 diff = (sequence - fence->sequence) & driver->sequence_mask;
55                 if (diff > driver->wrap_diff) {
56                         found = 1;
57                         break;
58                 }
59         }
60
61         list = (found) ? fence->ring.prev : fm->ring.prev;
62         prev = list->prev;
63
64         for (; list != &fm->ring; list = prev, prev = list->prev) {
65                 fence = list_entry(list, drm_fence_object_t, ring);
66
67                 type |= fence->native_type;
68                 relevant = type & fence->type;
69
70                 if ((fence->signaled | relevant) != fence->signaled) {
71                         fence->signaled |= relevant;
72                         DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
73                                   fence->base.hash.key, fence->signaled);
74                         fence->submitted_flush |= relevant;
75                         wake = 1;
76                 }
77
78                 relevant = fence->flush_mask &
79                     ~(fence->signaled | fence->submitted_flush);
80
81                 if (relevant) {
82                         fm->pending_flush |= relevant;
83                         fence->submitted_flush = fence->flush_mask;
84                 }
85
86                 if (!(fence->type & ~fence->signaled)) {
87                         DRM_DEBUG("Fence completely signaled 0x%08lx\n",
88                                   fence->base.hash.key);
89                         list_del_init(&fence->ring);
90                 }
91
92         }
93                 
94         if (wake) {
95                 DRM_WAKEUP(&fm->fence_queue);
96         }
97 }
98
99 EXPORT_SYMBOL(drm_fence_handler);
100
101 static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
102 {
103         drm_fence_manager_t *fm = &dev->fm;
104         unsigned long flags;
105
106         write_lock_irqsave(&fm->lock, flags);
107         list_del_init(ring);
108         write_unlock_irqrestore(&fm->lock, flags);
109 }
110
111 void drm_fence_usage_deref_locked(drm_device_t * dev,
112                                   drm_fence_object_t * fence)
113 {
114         if (atomic_dec_and_test(&fence->usage)) {
115                 drm_fence_unring(dev, &fence->ring);
116                 DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
117                           fence->base.hash.key);
118                 kmem_cache_free(drm_cache.fence_object, fence);
119         }
120 }
121
122 void drm_fence_usage_deref_unlocked(drm_device_t * dev,
123                                     drm_fence_object_t * fence)
124 {
125         if (atomic_dec_and_test(&fence->usage)) {
126                 mutex_lock(&dev->struct_mutex);
127                 if (atomic_read(&fence->usage) == 0) {
128                         drm_fence_unring(dev, &fence->ring);
129                         kmem_cache_free(drm_cache.fence_object, fence);
130                 }
131                 mutex_unlock(&dev->struct_mutex);
132         }
133 }
134
135 static void drm_fence_object_destroy(drm_file_t * priv,
136                                      drm_user_object_t * base)
137 {
138         drm_device_t *dev = priv->head->dev;
139         drm_fence_object_t *fence =
140             drm_user_object_entry(base, drm_fence_object_t, base);
141
142         drm_fence_usage_deref_locked(dev, fence);
143 }
144
145 static int fence_signaled(drm_device_t * dev, drm_fence_object_t * fence,
146                           uint32_t mask, int poke_flush)
147 {
148         unsigned long flags;
149         int signaled;
150         drm_fence_manager_t *fm = &dev->fm;
151         drm_fence_driver_t *driver = dev->driver->fence_driver;
152
153         if (poke_flush)
154                 driver->poke_flush(dev);
155         read_lock_irqsave(&fm->lock, flags);
156         signaled =
157             (fence->type & mask & fence->signaled) == (fence->type & mask);
158         read_unlock_irqrestore(&fm->lock, flags);
159
160         return signaled;
161 }
162
163 static void drm_fence_flush_exe(drm_fence_manager_t * fm,
164                                 drm_fence_driver_t * driver, uint32_t sequence)
165 {
166         uint32_t diff;
167
168         if (!fm->pending_exe_flush) {
169                 struct list_head *list;
170
171                 /*
172                  * Last_exe_flush is invalid. Find oldest sequence.
173                  */
174
175 /*              list = fm->fence_types[_DRM_FENCE_TYPE_EXE];*/
176                 list = &fm->ring;
177                 if (list->next == &fm->ring) {
178                         return;
179                 } else {
180                         drm_fence_object_t *fence =
181                             list_entry(list->next, drm_fence_object_t, ring);
182                         fm->last_exe_flush = (fence->sequence - 1) &
183                             driver->sequence_mask;
184                 }
185                 diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
186                 if (diff >= driver->wrap_diff)
187                         return;
188                 fm->exe_flush_sequence = sequence;
189                 fm->pending_exe_flush = 1;
190         } else {
191                 diff =
192                     (sequence - fm->exe_flush_sequence) & driver->sequence_mask;
193                 if (diff < driver->wrap_diff) {
194                         fm->exe_flush_sequence = sequence;
195                 }
196         }
197 }
198
199 int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type)
200 {
201         return ((fence->signaled & type) == type);
202 }
203
204 /*
205  * Make sure old fence objects are signaled before their fence sequences are
206  * wrapped around and reused.
207  */
208
209 int drm_fence_object_flush(drm_device_t * dev,
210                            drm_fence_object_t * fence, uint32_t type)
211 {
212         drm_fence_manager_t *fm = &dev->fm;
213         drm_fence_driver_t *driver = dev->driver->fence_driver;
214         unsigned long flags;
215
216         if (type & ~fence->type) {
217                 DRM_ERROR("Flush trying to extend fence type, "
218                           "0x%x, 0x%x\n", type, fence->type);
219                 return -EINVAL;
220         }
221
222         write_lock_irqsave(&fm->lock, flags);
223         fence->flush_mask |= type;
224         if (fence->submitted_flush == fence->signaled) {
225                 if ((fence->type & DRM_FENCE_TYPE_EXE) &&
226                     !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
227                         drm_fence_flush_exe(fm, driver, fence->sequence);
228                         fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
229                 } else {
230                         fm->pending_flush |= (fence->flush_mask &
231                                               ~fence->submitted_flush);
232                         fence->submitted_flush = fence->flush_mask;
233                 }
234         }
235         write_unlock_irqrestore(&fm->lock, flags);
236         driver->poke_flush(dev);
237         return 0;
238 }
239
240 void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
241 {
242         drm_fence_manager_t *fm = &dev->fm;
243         drm_fence_driver_t *driver = dev->driver->fence_driver;
244         uint32_t old_sequence;
245         unsigned long flags;
246         drm_fence_object_t *fence;
247         uint32_t diff;
248
249         mutex_lock(&dev->struct_mutex);
250         read_lock_irqsave(&fm->lock, flags);
251         if (fm->ring.next == &fm->ring) {
252                 read_unlock_irqrestore(&fm->lock, flags);
253                 mutex_unlock(&dev->struct_mutex);
254                 return;
255         }
256         old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
257         fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
258         atomic_inc(&fence->usage);
259         mutex_unlock(&dev->struct_mutex);
260         diff = (old_sequence - fence->sequence) & driver->sequence_mask;
261         read_unlock_irqrestore(&fm->lock, flags);
262         if (diff < driver->wrap_diff) {
263                 drm_fence_object_flush(dev, fence, fence->type);
264         }
265         drm_fence_usage_deref_unlocked(dev, fence);
266 }
267
268 EXPORT_SYMBOL(drm_fence_flush_old);
269
270 int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence,
271                           int lazy, int ignore_signals, uint32_t mask)
272 {
273         drm_fence_manager_t *fm = &dev->fm;
274         drm_fence_driver_t *driver = dev->driver->fence_driver;
275         int ret = 0;
276         unsigned long _end;
277         int signaled;
278
279         if (mask & ~fence->type) {
280                 DRM_ERROR("Wait trying to extend fence type"
281                           " 0x%08x 0x%08x\n", mask, fence->type);
282                 return -EINVAL;
283         }
284
285         if (fence_signaled(dev, fence, mask, 0))
286                 return 0;
287
288         _end = jiffies + 3 * DRM_HZ;
289
290         drm_fence_object_flush(dev, fence, mask);
291
292         if (lazy && driver->lazy_capable) {
293
294                 do {
295                         DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
296                                     fence_signaled(dev, fence, mask, 1));
297                         if (time_after_eq(jiffies, _end))
298                                 break;
299                 } while (ret == -EINTR && ignore_signals);
300                 if (time_after_eq(jiffies, _end) && (ret != 0))
301                         ret = -EBUSY;
302                 if (ret) {
303                         if (ret == -EBUSY) {
304                                 DRM_ERROR("Fence timout. GPU lockup.\n");
305                         }
306                         return ((ret == -EINTR) ? -EAGAIN : ret);
307                 }
308         } else if ((fence->class == 0) && (mask & DRM_FENCE_TYPE_EXE) &&
309                    driver->lazy_capable) {
310
311                 /*
312                  * We use IRQ wait for EXE fence if available to gain 
313                  * CPU in some cases.
314                  */
315
316                 do {
317                         DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
318                                     fence_signaled(dev, fence, DRM_FENCE_TYPE_EXE,
319                                                    1));
320                         if (time_after_eq(jiffies, _end))
321                                 break;
322                 } while (ret == -EINTR && ignore_signals);
323                 if (time_after_eq(jiffies, _end) && (ret != 0))
324                         ret = -EBUSY;
325                 if (ret)
326                         return ((ret == -EINTR) ? -EAGAIN : ret);
327         }
328
329         if (fence_signaled(dev, fence, mask, 0))
330                 return 0;
331
332         /*
333          * Avoid kernel-space busy-waits.
334          */
335 #if 1
336         if (!ignore_signals)
337                 return -EAGAIN;
338 #endif
339         do { 
340                 schedule();
341                 signaled = fence_signaled(dev, fence, mask, 1);
342         } while (!signaled && !time_after_eq(jiffies, _end));
343
344         if (!signaled)
345                 return -EBUSY;
346
347         return 0;
348 }
349
350 int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
351                           uint32_t fence_flags, uint32_t type)
352 {
353         drm_fence_manager_t *fm = &dev->fm;
354         drm_fence_driver_t *driver = dev->driver->fence_driver;
355         unsigned long flags;
356         uint32_t sequence;
357         uint32_t native_type;
358         int ret;
359
360         drm_fence_unring(dev, &fence->ring);
361         ret = driver->emit(dev, fence_flags, &sequence, &native_type);
362         if (ret)
363                 return ret;
364
365         write_lock_irqsave(&fm->lock, flags);
366         fence->type = type;
367         fence->flush_mask = 0x00;
368         fence->submitted_flush = 0x00;
369         fence->signaled = 0x00;
370         fence->sequence = sequence;
371         fence->native_type = native_type;
372         list_add_tail(&fence->ring, &fm->ring);
373         write_unlock_irqrestore(&fm->lock, flags);
374         return 0;
375 }
376
377 static int drm_fence_object_init(drm_device_t * dev, uint32_t type, 
378                                  uint32_t fence_flags,
379                                  drm_fence_object_t * fence)
380 {
381         int ret = 0;
382         unsigned long flags;
383         drm_fence_manager_t *fm = &dev->fm;
384
385         mutex_lock(&dev->struct_mutex);
386         atomic_set(&fence->usage, 1);
387         mutex_unlock(&dev->struct_mutex);
388
389         write_lock_irqsave(&fm->lock, flags);
390         INIT_LIST_HEAD(&fence->ring);
391         fence->class = 0;
392         fence->type = type;
393         fence->flush_mask = 0;
394         fence->submitted_flush = 0;
395         fence->signaled = 0;
396         fence->sequence = 0;
397         write_unlock_irqrestore(&fm->lock, flags);
398         if (fence_flags & DRM_FENCE_FLAG_EMIT) {
399                 ret = drm_fence_object_emit(dev, fence, fence_flags, type);
400         }
401         return ret;
402 }
403
404
405 int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
406                               int shareable)
407 {
408         drm_device_t *dev = priv->head->dev;
409         int ret;
410
411         mutex_lock(&dev->struct_mutex);
412         ret = drm_add_user_object(priv, &fence->base, shareable);
413         mutex_unlock(&dev->struct_mutex);
414         if (ret)
415                 return ret;
416         fence->base.type = drm_fence_type;
417         fence->base.remove = &drm_fence_object_destroy;
418         DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
419         return 0;
420 }
421
422 EXPORT_SYMBOL(drm_fence_add_user_object);
423
424 int drm_fence_object_create(drm_device_t * dev, uint32_t type,
425                             unsigned flags, drm_fence_object_t ** c_fence)
426 {
427         drm_fence_object_t *fence;
428         int ret;
429
430         fence = kmem_cache_alloc(drm_cache.fence_object, GFP_KERNEL);
431         if (!fence)
432                 return -ENOMEM;
433         ret = drm_fence_object_init(dev, type, flags, fence);
434         if (ret) {
435                 drm_fence_usage_deref_unlocked(dev, fence);
436                 return ret;
437         }
438         *c_fence = fence;
439         return 0;
440 }
441
442 EXPORT_SYMBOL(drm_fence_object_create);
443
444 void drm_fence_manager_init(drm_device_t * dev)
445 {
446         drm_fence_manager_t *fm = &dev->fm;
447         drm_fence_driver_t *fed = dev->driver->fence_driver;
448         int i;
449
450         fm->lock = RW_LOCK_UNLOCKED;
451         INIT_LIST_HEAD(&fm->ring);
452         fm->pending_flush = 0;
453         DRM_INIT_WAITQUEUE(&fm->fence_queue);
454         fm->initialized = 0;
455         if (fed) {
456                 fm->initialized = 1;
457                 for (i = 0; i < fed->no_types; ++i) {
458                         fm->fence_types[i] = &fm->ring;
459                 }
460         }
461 }
462
463 void drm_fence_manager_takedown(drm_device_t * dev)
464 {
465 }
466
467 drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
468 {
469         drm_device_t *dev = priv->head->dev;
470         drm_user_object_t *uo;
471         drm_fence_object_t *fence;
472
473         mutex_lock(&dev->struct_mutex);
474         uo = drm_lookup_user_object(priv, handle);
475         if (!uo || (uo->type != drm_fence_type)) {
476                 mutex_unlock(&dev->struct_mutex);
477                 return NULL;
478         }
479         fence = drm_user_object_entry(uo, drm_fence_object_t, base);
480         atomic_inc(&fence->usage);
481         mutex_unlock(&dev->struct_mutex);
482         return fence;
483 }
484
485 int drm_fence_ioctl(DRM_IOCTL_ARGS)
486 {
487         DRM_DEVICE;
488         int ret;
489         drm_fence_manager_t *fm = &dev->fm;
490         drm_fence_arg_t arg;
491         drm_fence_object_t *fence;
492         drm_user_object_t *uo;
493         unsigned long flags;
494         ret = 0;
495
496         if (!fm->initialized) {
497                 DRM_ERROR("The DRM driver does not support fencing.\n");
498                 return -EINVAL;
499         }
500
501         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
502         switch (arg.op) {
503         case drm_fence_create:
504                 if (arg.flags & DRM_FENCE_FLAG_EMIT)
505                         LOCK_TEST_WITH_RETURN(dev, filp);
506                 ret = drm_fence_object_create(dev, arg.type,
507                                               arg.flags,
508                                               &fence);
509                 if (ret)
510                         return ret;
511                 ret = drm_fence_add_user_object(priv, fence,
512                                                 arg.flags &
513                                                 DRM_FENCE_FLAG_SHAREABLE);
514                 if (ret) {
515                         drm_fence_usage_deref_unlocked(dev, fence);
516                         return ret;
517                 }
518
519                 /*
520                  * usage > 0. No need to lock dev->struct_mutex;
521                  */
522
523                 atomic_inc(&fence->usage);
524                 arg.handle = fence->base.hash.key;
525                 break;
526         case drm_fence_destroy:
527                 mutex_lock(&dev->struct_mutex);
528                 uo = drm_lookup_user_object(priv, arg.handle);
529                 if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
530                         mutex_unlock(&dev->struct_mutex);
531                         return -EINVAL;
532                 }
533                 ret = drm_remove_user_object(priv, uo);
534                 mutex_unlock(&dev->struct_mutex);
535                 return ret;
536         case drm_fence_reference:
537                 ret =
538                     drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
539                 if (ret)
540                         return ret;
541                 fence = drm_lookup_fence_object(priv, arg.handle);
542                 break;
543         case drm_fence_unreference:
544                 ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
545                 return ret;
546         case drm_fence_signaled:
547                 fence = drm_lookup_fence_object(priv, arg.handle);
548                 if (!fence)
549                         return -EINVAL;
550                 break;
551         case drm_fence_flush:
552                 fence = drm_lookup_fence_object(priv, arg.handle);
553                 if (!fence)
554                         return -EINVAL;
555                 ret = drm_fence_object_flush(dev, fence, arg.type);
556                 break;
557         case drm_fence_wait:
558                 fence = drm_lookup_fence_object(priv, arg.handle);
559                 if (!fence)
560                         return -EINVAL;
561                 ret =
562                     drm_fence_object_wait(dev, fence,
563                                           arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
564                                           0, arg.type);
565                 break;
566         case drm_fence_emit:
567                 LOCK_TEST_WITH_RETURN(dev, filp);
568                 fence = drm_lookup_fence_object(priv, arg.handle);
569                 if (!fence)
570                         return -EINVAL;
571                 ret = drm_fence_object_emit(dev, fence, arg.flags, arg.type);
572                 break;
573         case drm_fence_buffers:
574                 if (!dev->bm.initialized) {
575                         DRM_ERROR("Buffer object manager is not initialized\n");
576                         return -EINVAL;
577                 }
578                 LOCK_TEST_WITH_RETURN(dev, filp);
579                 ret = drm_fence_buffer_objects(priv, NULL, arg.flags, 
580                                                NULL, &fence);
581                 if (ret)
582                         return ret;
583                 ret = drm_fence_add_user_object(priv, fence,
584                                                 arg.flags &
585                                                 DRM_FENCE_FLAG_SHAREABLE);
586                 if (ret)
587                         return ret;
588                 atomic_inc(&fence->usage);
589                 arg.handle = fence->base.hash.key;
590                 break;
591         default:
592                 return -EINVAL;
593         }
594         read_lock_irqsave(&fm->lock, flags);
595         arg.class = fence->class;
596         arg.type = fence->type;
597         arg.signaled = fence->signaled;
598         read_unlock_irqrestore(&fm->lock, flags);
599         drm_fence_usage_deref_unlocked(dev, fence);
600
601         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
602         return ret;
603 }