eaaf7f4019f887f50e134362cf8914c831de3170
[platform/upstream/libdrm.git] / linux-core / drm_fence.c
1 /**************************************************************************
2  * 
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  * 
26  * 
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30  */
31
32 #include "drmP.h"
33
34 static void drm_fm_update_pointers(drm_fence_manager_t * fm,
35                                    struct list_head *list, int no_types,
36                                    uint32_t type)
37 {
38         int i;
39         for (i = 0; i < no_types; ++i) {
40                 if (type & (1 << i)) {
41                         fm->fence_types[i] = list;
42                 }
43         }
44 }
45
46 /*
47  * Typically called by the IRQ handler.
48  */
49
50 void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
51 {
52         int i;
53         int wake = 0;
54         int largest = 0;
55         uint32_t diff;
56         uint32_t relevant;
57         int index = 0;
58         drm_fence_manager_t *fm = &dev->fm;
59         drm_fence_driver_t *driver = dev->driver->fence_driver;
60         struct list_head *list;
61         struct list_head *fence_list;
62         drm_fence_object_t *fence;
63         int found = 0;
64
65         for (i = 0; i < driver->no_types; ++i) {
66                 if (!(type & (1 << i)))
67                         continue;
68
69                 list = fm->fence_types[i];
70                 fence_list = list->next;
71
72                 if (fence_list == &fm->ring)
73                         continue;
74
75                 fence = list_entry(fence_list, drm_fence_object_t, ring);
76
77                 diff = (sequence - fence->sequence) & driver->sequence_mask;
78
79                 if (diff < driver->wrap_diff) {
80                         if (diff >= largest) {
81                                 largest = diff;
82                                 index = i;
83                                 found = 1;
84                         }
85                 }
86         }
87
88         if (!found)
89                 return;
90
91         /*
92          * Start with the fence object with the lowest sequence number, affected by
93          * the type mask of this call. Update signaled fields, 
94          * Check if we need to wake sleeping processes
95          */
96
97         list = fm->fence_types[index]->next;
98         do {
99                 if (list == &fm->ring) {
100                         drm_fm_update_pointers(fm, list->prev,
101                                                driver->no_types, type);
102                         break;
103                 }
104                 fence = list_entry(list, drm_fence_object_t, ring);
105                 diff = (sequence - fence->sequence) & driver->sequence_mask;
106                 if (diff >= driver->wrap_diff) {
107                         drm_fm_update_pointers(fm, fence->ring.prev,
108                                                driver->no_types, type);
109                         break;
110                 }
111                 relevant = type & fence->type;
112                 if ((fence->signaled | relevant) != fence->signaled) {
113                         fence->signaled |= relevant;
114 #ifdef BODEBUG
115                         DRM_ERROR("Fence 0x%08lx signaled 0x%08x\n",
116                                   fence->base.hash.key, fence->signaled);
117 #endif
118                         fence->submitted_flush |= relevant;
119                         wake = 1;
120                 }
121
122                 relevant = fence->flush_mask &
123                     ~(fence->signaled | fence->submitted_flush);
124                 if (relevant) {
125                         fm->pending_flush |= relevant;
126                         fence->submitted_flush = fence->flush_mask;
127                 }
128
129                 list = list->next;
130
131                 /*
132                  * Remove a completely signaled fence from the
133                  * fence manager ring.
134                  */
135
136                 if (!(fence->type & ~fence->signaled)) {
137 #ifdef BODEBUG
138                         DRM_ERROR("Fence completely signaled 0x%08lx\n",
139                                   fence->base.hash.key);
140 #endif
141                         fence_list = &fence->ring;
142                         for (i = 0; i < driver->no_types; ++i) {
143                                 if (fm->fence_types[i] == fence_list)
144                                         fm->fence_types[i] = fence_list->prev;
145                         }
146                         list_del_init(fence_list);
147                 }
148
149         } while (1);
150
151         /*
152          * Wake sleeping processes.
153          */
154
155         if (wake) {
156                 DRM_WAKEUP(&fm->fence_queue);
157         }
158 }
159
160 EXPORT_SYMBOL(drm_fence_handler);
161
162 static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
163 {
164         drm_fence_manager_t *fm = &dev->fm;
165         drm_fence_driver_t *driver = dev->driver->fence_driver;
166         unsigned long flags;
167         int i;
168
169         write_lock_irqsave(&fm->lock, flags);
170         for (i = 0; i < driver->no_types; ++i) {
171                 if (fm->fence_types[i] == ring)
172                         fm->fence_types[i] = ring->prev;
173         }
174         list_del_init(ring);
175         write_unlock_irqrestore(&fm->lock, flags);
176 }
177
178 void drm_fence_usage_deref_locked(drm_device_t * dev,
179                                   drm_fence_object_t * fence)
180 {
181         if (atomic_dec_and_test(&fence->usage)) {
182                 drm_fence_unring(dev, &fence->ring);
183 #ifdef BODEBUG
184                 DRM_ERROR("Destroyed a fence object 0x%08lx\n",
185                           fence->base.hash.key);
186 #endif
187                 kmem_cache_free(drm_cache.fence_object, fence);
188         }
189 }
190
191 void drm_fence_usage_deref_unlocked(drm_device_t * dev,
192                                     drm_fence_object_t * fence)
193 {
194         if (atomic_dec_and_test(&fence->usage)) {
195                 mutex_lock(&dev->struct_mutex);
196                 if (atomic_read(&fence->usage) == 0) {
197                         drm_fence_unring(dev, &fence->ring);
198                         kmem_cache_free(drm_cache.fence_object, fence);
199                 }
200                 mutex_unlock(&dev->struct_mutex);
201         }
202 }
203
204 static void drm_fence_object_destroy(drm_file_t * priv,
205                                      drm_user_object_t * base)
206 {
207         drm_device_t *dev = priv->head->dev;
208         drm_fence_object_t *fence =
209             drm_user_object_entry(base, drm_fence_object_t, base);
210
211         drm_fence_usage_deref_locked(dev, fence);
212 }
213
214 static int fence_signaled(drm_device_t * dev, drm_fence_object_t * fence,
215                           uint32_t mask, int poke_flush)
216 {
217         unsigned long flags;
218         int signaled;
219         drm_fence_manager_t *fm = &dev->fm;
220         drm_fence_driver_t *driver = dev->driver->fence_driver;
221
222         if (poke_flush)
223                 driver->poke_flush(dev);
224         read_lock_irqsave(&fm->lock, flags);
225         signaled =
226             (fence->type & mask & fence->signaled) == (fence->type & mask);
227         read_unlock_irqrestore(&fm->lock, flags);
228
229         return signaled;
230 }
231
232 static void drm_fence_flush_exe(drm_fence_manager_t * fm,
233                                 drm_fence_driver_t * driver, uint32_t sequence)
234 {
235         uint32_t diff;
236
237         if (!fm->pending_exe_flush) {
238                 struct list_head *list;
239
240                 /*
241                  * Last_exe_flush is invalid. Find oldest sequence.
242                  */
243
244                 list = fm->fence_types[_DRM_FENCE_TYPE_EXE];
245                 if (list->next == &fm->ring) {
246                         return;
247                 } else {
248                         drm_fence_object_t *fence =
249                             list_entry(list->next, drm_fence_object_t, ring);
250                         fm->last_exe_flush = (fence->sequence - 1) &
251                             driver->sequence_mask;
252                 }
253                 diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
254                 if (diff >= driver->wrap_diff)
255                         return;
256                 fm->exe_flush_sequence = sequence;
257                 fm->pending_exe_flush = 1;
258         } else {
259                 diff =
260                     (sequence - fm->exe_flush_sequence) & driver->sequence_mask;
261                 if (diff < driver->wrap_diff) {
262                         fm->exe_flush_sequence = sequence;
263                 }
264         }
265 }
266
267 int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type)
268 {
269         return ((fence->signaled & type) == type);
270 }
271
272 /*
273  * Make sure old fence objects are signaled before their fence sequences are
274  * wrapped around and reused.
275  */
276
277 int drm_fence_object_flush(drm_device_t * dev,
278                                   drm_fence_object_t * fence, uint32_t type)
279 {
280         drm_fence_manager_t *fm = &dev->fm;
281         drm_fence_driver_t *driver = dev->driver->fence_driver;
282         unsigned long flags;
283
284         if (type & ~fence->type) {
285                 DRM_ERROR("Flush trying to extend fence type\n");
286                 return -EINVAL;
287         }
288
289         write_lock_irqsave(&fm->lock, flags);
290         fence->flush_mask |= type;
291         if (fence->submitted_flush == fence->signaled) {
292                 if ((fence->type & DRM_FENCE_EXE) &&
293                     !(fence->submitted_flush & DRM_FENCE_EXE)) {
294                         drm_fence_flush_exe(fm, driver, fence->sequence);
295                         fence->submitted_flush |= DRM_FENCE_EXE;
296                 } else {
297                         fm->pending_flush |= (fence->flush_mask &
298                                               ~fence->submitted_flush);
299                         fence->submitted_flush = fence->flush_mask;
300                 }
301         }
302         write_unlock_irqrestore(&fm->lock, flags);
303         driver->poke_flush(dev);
304         return 0;
305 }
306
307 void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
308 {
309         drm_fence_manager_t *fm = &dev->fm;
310         drm_fence_driver_t *driver = dev->driver->fence_driver;
311         uint32_t old_sequence;
312         unsigned long flags;
313         drm_fence_object_t *fence;
314         uint32_t diff;
315
316         mutex_lock(&dev->struct_mutex);
317         read_lock_irqsave(&fm->lock, flags);
318         if (fm->ring.next == &fm->ring) {
319                 read_unlock_irqrestore(&fm->lock, flags);
320                 mutex_unlock(&dev->struct_mutex);
321                 return;
322         }
323         old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
324         fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
325         atomic_inc(&fence->usage);
326         mutex_unlock(&dev->struct_mutex);
327         diff = (old_sequence - fence->sequence) & driver->sequence_mask;
328         read_unlock_irqrestore(&fm->lock, flags);
329         if (diff < driver->wrap_diff) {
330                 drm_fence_object_flush(dev, fence, fence->type);
331         }
332         drm_fence_usage_deref_unlocked(dev, fence);
333 }
334
335 EXPORT_SYMBOL(drm_fence_flush_old);
336
337 int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence,
338                           int lazy, int ignore_signals, uint32_t mask)
339 {
340         drm_fence_manager_t *fm = &dev->fm;
341         drm_fence_driver_t *driver = dev->driver->fence_driver;
342         int ret = 0;
343         unsigned long _end;
344
345         if (mask & ~fence->type) {
346                 DRM_ERROR("Wait trying to extend fence type\n");
347                 return -EINVAL;
348         }
349
350         if (fence_signaled(dev, fence, mask, 0))
351                 return 0;
352
353         _end = jiffies + 3 * DRM_HZ;
354
355         drm_fence_object_flush(dev, fence, mask);
356         if (lazy && driver->lazy_capable) {
357                 do {
358                         DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
359                                     fence_signaled(dev, fence, mask, 1));
360                         if (time_after_eq(jiffies, _end))
361                                 break;
362                 } while (ret == -EINTR && ignore_signals);
363
364                 if (time_after_eq(jiffies, _end) && (ret != 0))
365                         ret = -EBUSY;
366                 return ret;
367
368         } else {
369                 int signaled;
370                 do {
371                         signaled = fence_signaled(dev, fence, mask, 1);
372                 } while (!signaled && !time_after_eq(jiffies, _end));
373                 if (!signaled)
374                         return -EBUSY;
375         }
376         return 0;
377 }
378
379 int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
380                           uint32_t type)
381 {
382         drm_fence_manager_t *fm = &dev->fm;
383         drm_fence_driver_t *driver = dev->driver->fence_driver;
384         unsigned long flags;
385         uint32_t sequence;
386         int ret;
387
388         drm_fence_unring(dev, &fence->ring);
389         ret = driver->emit(dev, &sequence);
390         if (ret)
391                 return ret;
392
393         write_lock_irqsave(&fm->lock, flags);
394         fence->type = type;
395         fence->flush_mask = 0x00;
396         fence->submitted_flush = 0x00;
397         fence->signaled = 0x00;
398         fence->sequence = sequence;
399         list_add_tail(&fence->ring, &fm->ring);
400         write_unlock_irqrestore(&fm->lock, flags);
401         return 0;
402 }
403
404 int drm_fence_object_init(drm_device_t * dev, uint32_t type, int emit,
405                           drm_fence_object_t * fence)
406 {
407         int ret = 0;
408         unsigned long flags;
409         drm_fence_manager_t *fm = &dev->fm;
410
411         mutex_lock(&dev->struct_mutex);
412         atomic_set(&fence->usage, 1);
413         mutex_unlock(&dev->struct_mutex);
414
415         write_lock_irqsave(&fm->lock, flags);
416         INIT_LIST_HEAD(&fence->ring);
417         fence->class = 0;
418         fence->type = type;
419         fence->flush_mask = 0;
420         fence->submitted_flush = 0;
421         fence->signaled = 0;
422         fence->sequence = 0;
423         write_unlock_irqrestore(&fm->lock, flags);
424         if (emit) {
425                 ret = drm_fence_object_emit(dev, fence, type);
426         }
427         return ret;
428 }
429
430 EXPORT_SYMBOL(drm_fence_object_init);
431
432 int drm_fence_add_user_object(drm_file_t *priv, drm_fence_object_t *fence,
433                               int shareable)
434 {
435         drm_device_t *dev = priv->head->dev;
436         int ret;
437
438         mutex_lock(&dev->struct_mutex);
439         ret = drm_add_user_object(priv, &fence->base, shareable);
440         mutex_unlock(&dev->struct_mutex);
441         if (ret)
442                 return ret;
443         fence->base.type = drm_fence_type;
444         fence->base.remove = &drm_fence_object_destroy;
445 #ifdef BODEBUG
446         DRM_ERROR("Fence 0x%08lx created\n", fence->base.hash.key);
447 #endif
448         return 0;
449 }
450 EXPORT_SYMBOL(drm_fence_add_user_object);
451
452 int drm_fence_object_create(drm_device_t *dev, uint32_t type,
453                             int emit, drm_fence_object_t **c_fence)
454 {
455         drm_fence_object_t *fence;
456         int ret;
457
458         fence = kmem_cache_alloc(drm_cache.fence_object, GFP_KERNEL);
459         if (!fence)
460                 return -ENOMEM;
461         ret = drm_fence_object_init(dev, type, emit, fence);
462         if (ret) {
463                 drm_fence_usage_deref_unlocked(dev, fence);
464                 return ret;
465         }
466         *c_fence = fence; 
467         return 0;
468 }
469 EXPORT_SYMBOL(drm_fence_object_create);
470
471
472 void drm_fence_manager_init(drm_device_t * dev)
473 {
474         drm_fence_manager_t *fm = &dev->fm;
475         drm_fence_driver_t *fed = dev->driver->fence_driver;
476         int i;
477
478         fm->lock = RW_LOCK_UNLOCKED;
479         INIT_LIST_HEAD(&fm->ring);
480         fm->pending_flush = 0;
481         DRM_INIT_WAITQUEUE(&fm->fence_queue);
482         fm->initialized = 0;
483         if (fed) {
484                 fm->initialized = 1;
485                 for (i = 0; i < fed->no_types; ++i) {
486                         fm->fence_types[i] = &fm->ring;
487                 }
488         }
489 }
490
491 void drm_fence_manager_takedown(drm_device_t * dev)
492 {
493 }
494
495 drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
496 {
497         drm_device_t *dev = priv->head->dev;
498         drm_user_object_t *uo;
499         drm_fence_object_t *fence;
500
501         mutex_lock(&dev->struct_mutex);
502         uo = drm_lookup_user_object(priv, handle);
503         if (!uo || (uo->type != drm_fence_type)) {
504                 mutex_unlock(&dev->struct_mutex);
505                 return NULL;
506         }
507         fence = drm_user_object_entry(uo, drm_fence_object_t, base);
508         atomic_inc(&fence->usage);
509         mutex_unlock(&dev->struct_mutex);
510         return fence;
511 }
512
513 int drm_fence_ioctl(DRM_IOCTL_ARGS)
514 {
515         DRM_DEVICE;
516         int ret;
517         drm_fence_manager_t *fm = &dev->fm;
518         drm_fence_arg_t arg;
519         drm_fence_object_t *fence;
520         drm_user_object_t *uo;
521         unsigned long flags;
522         ret = 0;
523
524         if (!fm->initialized) {
525                 DRM_ERROR("The DRM driver does not support fencing.\n");
526                 return -EINVAL;
527         }
528
529         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
530         switch (arg.op) {
531         case drm_fence_create:
532                 if (arg.flags & DRM_FENCE_FLAG_EMIT)
533                         LOCK_TEST_WITH_RETURN(dev, filp);
534                 ret = drm_fence_object_create(dev, arg.type,
535                                               arg.flags & DRM_FENCE_FLAG_EMIT,
536                                               &fence);
537                 if (ret)
538                         return ret;
539                 ret = drm_fence_add_user_object(priv, fence, 
540                                                 arg.flags & 
541                                                 DRM_FENCE_FLAG_SHAREABLE);
542                 if (ret) {
543                         drm_fence_usage_deref_unlocked(dev, fence);
544                         return ret;
545                 }
546
547                 /*
548                  * usage > 0. No need to lock dev->struct_mutex;
549                  */
550
551                 atomic_inc(&fence->usage);
552                 arg.handle = fence->base.hash.key;
553                 break;
554         case drm_fence_destroy:
555                 mutex_lock(&dev->struct_mutex);
556                 uo = drm_lookup_user_object(priv, arg.handle);
557                 if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
558                         mutex_unlock(&dev->struct_mutex);
559                         return -EINVAL;
560                 }
561                 ret = drm_remove_user_object(priv, uo);
562                 mutex_unlock(&dev->struct_mutex);
563                 return ret;
564         case drm_fence_reference:
565                 ret =
566                     drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
567                 if (ret)
568                         return ret;
569                 fence = drm_lookup_fence_object(priv, arg.handle);
570                 break;
571         case drm_fence_unreference:
572                 ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
573                 return ret;
574         case drm_fence_signaled:
575                 fence = drm_lookup_fence_object(priv, arg.handle);
576                 if (!fence)
577                         return -EINVAL;
578                 break;
579         case drm_fence_flush:
580                 fence = drm_lookup_fence_object(priv, arg.handle);
581                 if (!fence)
582                         return -EINVAL;
583                 ret = drm_fence_object_flush(dev, fence, arg.type);
584                 break;
585         case drm_fence_wait:
586                 fence = drm_lookup_fence_object(priv, arg.handle);
587                 if (!fence)
588                         return -EINVAL;
589                 ret =
590                     drm_fence_object_wait(dev, fence,
591                                           arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
592                                           0,
593                                           arg.type);
594                 break;
595         case drm_fence_emit:
596                 LOCK_TEST_WITH_RETURN(dev, filp);
597                 fence = drm_lookup_fence_object(priv, arg.handle);
598                 if (!fence)
599                         return -EINVAL;
600                 ret = drm_fence_object_emit(dev, fence, arg.type);
601                 break;
602         case drm_fence_buffers:
603                 if (!dev->bm.initialized) {
604                         DRM_ERROR("Buffer object manager is not initialized\n");
605                         return -EINVAL;
606                 }
607                 LOCK_TEST_WITH_RETURN(dev, filp);
608                 ret = drm_fence_buffer_objects(priv, NULL, NULL, &fence);
609                 if (ret) 
610                         return ret;
611                 ret = drm_fence_add_user_object(priv, fence, 
612                                                 arg.flags & 
613                                                 DRM_FENCE_FLAG_SHAREABLE);
614                 if (ret)
615                         return ret;
616                 atomic_inc(&fence->usage);
617                 arg.handle = fence->base.hash.key;
618                 break;                  
619         default:
620                 return -EINVAL;
621         }
622         read_lock_irqsave(&fm->lock, flags);
623         arg.class = fence->class;
624         arg.type = fence->type;
625         arg.signaled = fence->signaled;
626         read_unlock_irqrestore(&fm->lock, flags);
627         drm_fence_usage_deref_unlocked(dev, fence);
628
629         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
630         return ret;
631 }