More bugfixes.
[platform/upstream/libdrm.git] / linux-core / drm_fence.c
1 /**************************************************************************
2  * 
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  * 
26  * 
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30  */
31
32 #include "drmP.h"
33
34 static void drm_fm_update_pointers(drm_fence_manager_t * fm,
35                                    struct list_head *list, int no_types,
36                                    uint32_t type)
37 {
38         int i;
39         for (i = 0; i < no_types; ++i) {
40                 if (type & (1 << i)) {
41                         fm->fence_types[i] = list;
42                 }
43         }
44 }
45
46 /*
47  * Typically called by the IRQ handler.
48  */
49
50 void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
51 {
52         int i;
53         int wake = 0;
54         int largest = 0;
55         uint32_t diff;
56         uint32_t relevant;
57         int index = 0;
58         drm_fence_manager_t *fm = &dev->fm;
59         drm_fence_driver_t *driver = dev->driver->fence_driver;
60         struct list_head *list;
61         struct list_head *fence_list;
62         drm_fence_object_t *fence;
63         int found = 0;
64
65         for (i = 0; i < driver->no_types; ++i) {
66                 if (!(type & (1 << i)))
67                         continue;
68
69                 list = fm->fence_types[i];
70                 fence_list = list->next;
71
72                 if (fence_list == &fm->ring)
73                         continue;
74
75                 fence = list_entry(fence_list, drm_fence_object_t, ring);
76
77                 diff = (sequence - fence->sequence) & driver->sequence_mask;
78
79                 if (diff < driver->wrap_diff) {
80                         if (diff >= largest) {
81                                 largest = diff;
82                                 index = i;
83                                 found = 1;
84                         }
85                 }
86         }
87
88         if (!found)
89                 return;
90
91         /*
92          * Start with the fence object with the lowest sequence number, affected by
93          * the type mask of this call. Update signaled fields, 
94          * Check if we need to wake sleeping processes
95          */
96
97         list = fm->fence_types[index]->next;
98         do {
99                 if (list == &fm->ring) {
100                         drm_fm_update_pointers(fm, list->prev,
101                                                driver->no_types, type);
102                         break;
103                 }
104                 fence = list_entry(list, drm_fence_object_t, ring);
105                 diff = (sequence - fence->sequence) & driver->sequence_mask;
106                 if (diff >= driver->wrap_diff) {
107                         drm_fm_update_pointers(fm, fence->ring.prev,
108                                                driver->no_types, type);
109                         break;
110                 }
111                 relevant = type & fence->type;
112                 if ((fence->signaled | relevant) != fence->signaled) {
113                         fence->signaled |= relevant;
114 #ifdef BODEBUG
115                         DRM_ERROR("Fence 0x%08lx signaled 0x%08x\n",
116                                   fence->base.hash.key, fence->signaled);
117 #endif
118                         fence->submitted_flush |= relevant;
119                         wake = 1;
120                 }
121
122                 relevant = fence->flush_mask &
123                     ~(fence->signaled | fence->submitted_flush);
124                 if (relevant) {
125                         fm->pending_flush |= relevant;
126                         fence->submitted_flush = fence->flush_mask;
127                 }
128
129                 list = list->next;
130
131                 /*
132                  * Remove a completely signaled fence from the
133                  * fence manager ring.
134                  */
135
136                 if (!(fence->type & ~fence->signaled)) {
137 #ifdef BODEBUG
138                         DRM_ERROR("Fence completely signaled 0x%08lx\n",
139                                   fence->base.hash.key);
140 #endif
141                         fence_list = &fence->ring;
142                         for (i = 0; i < driver->no_types; ++i) {
143                                 if (fm->fence_types[i] == fence_list)
144                                         fm->fence_types[i] = fence_list->prev;
145                         }
146                         list_del_init(fence_list);
147                 }
148
149         } while (1);
150
151         /*
152          * Wake sleeping processes.
153          */
154
155         if (wake) {
156                 DRM_WAKEUP(&fm->fence_queue);
157         }
158 }
159
160 EXPORT_SYMBOL(drm_fence_handler);
161
162 static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
163 {
164         drm_fence_manager_t *fm = &dev->fm;
165         drm_fence_driver_t *driver = dev->driver->fence_driver;
166         unsigned long flags;
167         int i;
168
169         write_lock_irqsave(&fm->lock, flags);
170         for (i = 0; i < driver->no_types; ++i) {
171                 if (fm->fence_types[i] == ring)
172                         fm->fence_types[i] = ring->prev;
173         }
174         list_del_init(ring);
175         write_unlock_irqrestore(&fm->lock, flags);
176 }
177
178 void drm_fence_usage_deref_locked(drm_device_t * dev,
179                                   drm_fence_object_t * fence)
180 {
181         if (atomic_dec_and_test(&fence->usage)) {
182                 drm_fence_unring(dev, &fence->ring);
183 #ifdef BODEBUG
184                 DRM_ERROR("Destroyed a fence object 0x%08lx\n",
185                           fence->base.hash.key);
186 #endif
187                 kmem_cache_free(drm_cache.fence_object, fence);
188         }
189 }
190
191 void drm_fence_usage_deref_unlocked(drm_device_t * dev,
192                                     drm_fence_object_t * fence)
193 {
194         if (atomic_dec_and_test(&fence->usage)) {
195                 mutex_lock(&dev->struct_mutex);
196                 if (atomic_read(&fence->usage) == 0) {
197                         drm_fence_unring(dev, &fence->ring);
198                         kmem_cache_free(drm_cache.fence_object, fence);
199                 }
200                 mutex_unlock(&dev->struct_mutex);
201         }
202 }
203
204 static void drm_fence_object_destroy(drm_file_t * priv,
205                                      drm_user_object_t * base)
206 {
207         drm_device_t *dev = priv->head->dev;
208         drm_fence_object_t *fence =
209             drm_user_object_entry(base, drm_fence_object_t, base);
210
211         drm_fence_usage_deref_locked(dev, fence);
212 }
213
214 static int fence_signaled(drm_device_t * dev, drm_fence_object_t * fence,
215                           uint32_t mask, int poke_flush)
216 {
217         unsigned long flags;
218         int signaled;
219         drm_fence_manager_t *fm = &dev->fm;
220         drm_fence_driver_t *driver = dev->driver->fence_driver;
221
222         if (poke_flush)
223                 driver->poke_flush(dev);
224         read_lock_irqsave(&fm->lock, flags);
225         signaled =
226             (fence->type & mask & fence->signaled) == (fence->type & mask);
227         read_unlock_irqrestore(&fm->lock, flags);
228
229         return signaled;
230 }
231
232 static void drm_fence_flush_exe(drm_fence_manager_t * fm,
233                                 drm_fence_driver_t * driver, uint32_t sequence)
234 {
235         uint32_t diff;
236
237         if (!fm->pending_exe_flush) {
238                 struct list_head *list;
239
240                 /*
241                  * Last_exe_flush is invalid. Find oldest sequence.
242                  */
243
244                 list = fm->fence_types[_DRM_FENCE_TYPE_EXE];
245                 if (list->next == &fm->ring) {
246                         return;
247                 } else {
248                         drm_fence_object_t *fence =
249                             list_entry(list->next, drm_fence_object_t, ring);
250                         fm->last_exe_flush = (fence->sequence - 1) &
251                             driver->sequence_mask;
252                 }
253                 diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
254                 if (diff >= driver->wrap_diff)
255                         return;
256                 fm->exe_flush_sequence = sequence;
257                 fm->pending_exe_flush = 1;
258         } else {
259                 diff =
260                     (sequence - fm->exe_flush_sequence) & driver->sequence_mask;
261                 if (diff < driver->wrap_diff) {
262                         fm->exe_flush_sequence = sequence;
263                 }
264         }
265 }
266
267 int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type)
268 {
269         return ((fence->signaled & type) == type);
270 }
271
272 /*
273  * Make sure old fence objects are signaled before their fence sequences are
274  * wrapped around and reused.
275  */
276
277 int drm_fence_object_flush(drm_device_t * dev,
278                                   drm_fence_object_t * fence, uint32_t type)
279 {
280         drm_fence_manager_t *fm = &dev->fm;
281         drm_fence_driver_t *driver = dev->driver->fence_driver;
282         unsigned long flags;
283
284         if (type & ~fence->type) {
285                 DRM_ERROR("Flush trying to extend fence type\n");
286                 return -EINVAL;
287         }
288
289         write_lock_irqsave(&fm->lock, flags);
290         fence->flush_mask |= type;
291         if (fence->submitted_flush == fence->signaled) {
292                 if ((fence->type & DRM_FENCE_EXE) &&
293                     !(fence->submitted_flush & DRM_FENCE_EXE)) {
294                         drm_fence_flush_exe(fm, driver, fence->sequence);
295                         fence->submitted_flush |= DRM_FENCE_EXE;
296                 } else {
297                         fm->pending_flush |= (fence->flush_mask &
298                                               ~fence->submitted_flush);
299                         fence->submitted_flush = fence->flush_mask;
300                 }
301         }
302         write_unlock_irqrestore(&fm->lock, flags);
303         driver->poke_flush(dev);
304         return 0;
305 }
306
307 void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
308 {
309         drm_fence_manager_t *fm = &dev->fm;
310         drm_fence_driver_t *driver = dev->driver->fence_driver;
311         uint32_t old_sequence;
312         unsigned long flags;
313         drm_fence_object_t *fence;
314         uint32_t diff;
315
316         mutex_lock(&dev->struct_mutex);
317         read_lock_irqsave(&fm->lock, flags);
318         if (fm->ring.next == &fm->ring) {
319                 read_unlock_irqrestore(&fm->lock, flags);
320                 mutex_unlock(&dev->struct_mutex);
321                 return;
322         }
323         old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
324         fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
325         atomic_inc(&fence->usage);
326         mutex_unlock(&dev->struct_mutex);
327         diff = (old_sequence - fence->sequence) & driver->sequence_mask;
328         read_unlock_irqrestore(&fm->lock, flags);
329         if (diff < driver->wrap_diff) {
330                 drm_fence_object_flush(dev, fence, fence->type);
331         }
332         drm_fence_usage_deref_unlocked(dev, fence);
333 }
334
335 EXPORT_SYMBOL(drm_fence_flush_old);
336
337 int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence,
338                           int lazy, int ignore_signals, uint32_t mask)
339 {
340         drm_fence_manager_t *fm = &dev->fm;
341         drm_fence_driver_t *driver = dev->driver->fence_driver;
342         int ret = 0;
343         unsigned long _end;
344
345         if (mask & ~fence->type) {
346                 DRM_ERROR("Wait trying to extend fence type\n");
347                 return -EINVAL;
348         }
349
350         if (fence_signaled(dev, fence, mask, 0))
351                 return 0;
352
353         _end = jiffies + 3 * DRM_HZ;
354
355         drm_fence_object_flush(dev, fence, mask);
356         if (lazy && driver->lazy_capable) {
357                 do {
358                         DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
359                                     fence_signaled(dev, fence, mask, 1));
360                         if (time_after_eq(jiffies, _end))
361                                 break;
362                 } while (ret == -EINTR && ignore_signals);              
363                 if (time_after_eq(jiffies, _end) && (ret != 0))
364                         ret = -EBUSY;
365                 if (ret) 
366                         return ((ret == -EINTR) ? -EAGAIN : ret);
367         } else {
368                 int signaled;
369                 do {
370                         signaled = fence_signaled(dev, fence, mask, 1);
371                 } while (!signaled && !time_after_eq(jiffies, _end));
372                 if (!signaled)
373                         return -EBUSY;
374         }
375         return 0;
376 }
377
378 int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
379                           uint32_t type)
380 {
381         drm_fence_manager_t *fm = &dev->fm;
382         drm_fence_driver_t *driver = dev->driver->fence_driver;
383         unsigned long flags;
384         uint32_t sequence;
385         int ret;
386
387         drm_fence_unring(dev, &fence->ring);
388         ret = driver->emit(dev, &sequence);
389         if (ret)
390                 return ret;
391
392         write_lock_irqsave(&fm->lock, flags);
393         fence->type = type;
394         fence->flush_mask = 0x00;
395         fence->submitted_flush = 0x00;
396         fence->signaled = 0x00;
397         fence->sequence = sequence;
398         list_add_tail(&fence->ring, &fm->ring);
399         write_unlock_irqrestore(&fm->lock, flags);
400         return 0;
401 }
402
403 int drm_fence_object_init(drm_device_t * dev, uint32_t type, int emit,
404                           drm_fence_object_t * fence)
405 {
406         int ret = 0;
407         unsigned long flags;
408         drm_fence_manager_t *fm = &dev->fm;
409
410         mutex_lock(&dev->struct_mutex);
411         atomic_set(&fence->usage, 1);
412         mutex_unlock(&dev->struct_mutex);
413
414         write_lock_irqsave(&fm->lock, flags);
415         INIT_LIST_HEAD(&fence->ring);
416         fence->class = 0;
417         fence->type = type;
418         fence->flush_mask = 0;
419         fence->submitted_flush = 0;
420         fence->signaled = 0;
421         fence->sequence = 0;
422         write_unlock_irqrestore(&fm->lock, flags);
423         if (emit) {
424                 ret = drm_fence_object_emit(dev, fence, type);
425         }
426         return ret;
427 }
428
429 EXPORT_SYMBOL(drm_fence_object_init);
430
431 int drm_fence_add_user_object(drm_file_t *priv, drm_fence_object_t *fence,
432                               int shareable)
433 {
434         drm_device_t *dev = priv->head->dev;
435         int ret;
436
437         mutex_lock(&dev->struct_mutex);
438         ret = drm_add_user_object(priv, &fence->base, shareable);
439         mutex_unlock(&dev->struct_mutex);
440         if (ret)
441                 return ret;
442         fence->base.type = drm_fence_type;
443         fence->base.remove = &drm_fence_object_destroy;
444 #ifdef BODEBUG
445         DRM_ERROR("Fence 0x%08lx created\n", fence->base.hash.key);
446 #endif
447         return 0;
448 }
449 EXPORT_SYMBOL(drm_fence_add_user_object);
450
451 int drm_fence_object_create(drm_device_t *dev, uint32_t type,
452                             int emit, drm_fence_object_t **c_fence)
453 {
454         drm_fence_object_t *fence;
455         int ret;
456
457         fence = kmem_cache_alloc(drm_cache.fence_object, GFP_KERNEL);
458         if (!fence)
459                 return -ENOMEM;
460         ret = drm_fence_object_init(dev, type, emit, fence);
461         if (ret) {
462                 drm_fence_usage_deref_unlocked(dev, fence);
463                 return ret;
464         }
465         *c_fence = fence; 
466         return 0;
467 }
468 EXPORT_SYMBOL(drm_fence_object_create);
469
470
471 void drm_fence_manager_init(drm_device_t * dev)
472 {
473         drm_fence_manager_t *fm = &dev->fm;
474         drm_fence_driver_t *fed = dev->driver->fence_driver;
475         int i;
476
477         fm->lock = RW_LOCK_UNLOCKED;
478         INIT_LIST_HEAD(&fm->ring);
479         fm->pending_flush = 0;
480         DRM_INIT_WAITQUEUE(&fm->fence_queue);
481         fm->initialized = 0;
482         if (fed) {
483                 fm->initialized = 1;
484                 for (i = 0; i < fed->no_types; ++i) {
485                         fm->fence_types[i] = &fm->ring;
486                 }
487         }
488 }
489
490 void drm_fence_manager_takedown(drm_device_t * dev)
491 {
492 }
493
494 drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
495 {
496         drm_device_t *dev = priv->head->dev;
497         drm_user_object_t *uo;
498         drm_fence_object_t *fence;
499
500         mutex_lock(&dev->struct_mutex);
501         uo = drm_lookup_user_object(priv, handle);
502         if (!uo || (uo->type != drm_fence_type)) {
503                 mutex_unlock(&dev->struct_mutex);
504                 return NULL;
505         }
506         fence = drm_user_object_entry(uo, drm_fence_object_t, base);
507         atomic_inc(&fence->usage);
508         mutex_unlock(&dev->struct_mutex);
509         return fence;
510 }
511
512 int drm_fence_ioctl(DRM_IOCTL_ARGS)
513 {
514         DRM_DEVICE;
515         int ret;
516         drm_fence_manager_t *fm = &dev->fm;
517         drm_fence_arg_t arg;
518         drm_fence_object_t *fence;
519         drm_user_object_t *uo;
520         unsigned long flags;
521         ret = 0;
522
523         if (!fm->initialized) {
524                 DRM_ERROR("The DRM driver does not support fencing.\n");
525                 return -EINVAL;
526         }
527
528         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
529         switch (arg.op) {
530         case drm_fence_create:
531                 if (arg.flags & DRM_FENCE_FLAG_EMIT)
532                         LOCK_TEST_WITH_RETURN(dev, filp);
533                 ret = drm_fence_object_create(dev, arg.type,
534                                               arg.flags & DRM_FENCE_FLAG_EMIT,
535                                               &fence);
536                 if (ret)
537                         return ret;
538                 ret = drm_fence_add_user_object(priv, fence, 
539                                                 arg.flags & 
540                                                 DRM_FENCE_FLAG_SHAREABLE);
541                 if (ret) {
542                         drm_fence_usage_deref_unlocked(dev, fence);
543                         return ret;
544                 }
545
546                 /*
547                  * usage > 0. No need to lock dev->struct_mutex;
548                  */
549
550                 atomic_inc(&fence->usage);
551                 arg.handle = fence->base.hash.key;
552                 break;
553         case drm_fence_destroy:
554                 mutex_lock(&dev->struct_mutex);
555                 uo = drm_lookup_user_object(priv, arg.handle);
556                 if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
557                         mutex_unlock(&dev->struct_mutex);
558                         return -EINVAL;
559                 }
560                 ret = drm_remove_user_object(priv, uo);
561                 mutex_unlock(&dev->struct_mutex);
562                 return ret;
563         case drm_fence_reference:
564                 ret =
565                     drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
566                 if (ret)
567                         return ret;
568                 fence = drm_lookup_fence_object(priv, arg.handle);
569                 break;
570         case drm_fence_unreference:
571                 ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
572                 return ret;
573         case drm_fence_signaled:
574                 fence = drm_lookup_fence_object(priv, arg.handle);
575                 if (!fence)
576                         return -EINVAL;
577                 break;
578         case drm_fence_flush:
579                 fence = drm_lookup_fence_object(priv, arg.handle);
580                 if (!fence)
581                         return -EINVAL;
582                 ret = drm_fence_object_flush(dev, fence, arg.type);
583                 break;
584         case drm_fence_wait:
585                 fence = drm_lookup_fence_object(priv, arg.handle);
586                 if (!fence)
587                         return -EINVAL;
588                 ret =
589                     drm_fence_object_wait(dev, fence,
590                                           arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
591                                           0,
592                                           arg.type);
593                 break;
594         case drm_fence_emit:
595                 LOCK_TEST_WITH_RETURN(dev, filp);
596                 fence = drm_lookup_fence_object(priv, arg.handle);
597                 if (!fence)
598                         return -EINVAL;
599                 ret = drm_fence_object_emit(dev, fence, arg.type);
600                 break;
601         case drm_fence_buffers:
602                 if (!dev->bm.initialized) {
603                         DRM_ERROR("Buffer object manager is not initialized\n");
604                         return -EINVAL;
605                 }
606                 LOCK_TEST_WITH_RETURN(dev, filp);
607                 ret = drm_fence_buffer_objects(priv, NULL, NULL, &fence);
608                 if (ret) 
609                         return ret;
610                 ret = drm_fence_add_user_object(priv, fence, 
611                                                 arg.flags & 
612                                                 DRM_FENCE_FLAG_SHAREABLE);
613                 if (ret)
614                         return ret;
615                 atomic_inc(&fence->usage);
616                 arg.handle = fence->base.hash.key;
617                 break;                  
618         default:
619                 return -EINVAL;
620         }
621         read_lock_irqsave(&fm->lock, flags);
622         arg.class = fence->class;
623         arg.type = fence->type;
624         arg.signaled = fence->signaled;
625         read_unlock_irqrestore(&fm->lock, flags);
626         drm_fence_usage_deref_unlocked(dev, fence);
627
628         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
629         return ret;
630 }