Merge branch 'ttm-vram-0-1-branch'
[platform/upstream/libdrm.git] / linux-core / drm_fence.c
1 /**************************************************************************
2  * 
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  * 
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Typically called by the IRQ handler.
35  */
36
37 void drm_fence_handler(drm_device_t * dev, uint32_t class,
38                        uint32_t sequence, uint32_t type)
39 {
40         int wake = 0;
41         uint32_t diff;
42         uint32_t relevant;
43         drm_fence_manager_t *fm = &dev->fm;
44         drm_fence_driver_t *driver = dev->driver->fence_driver;
45         struct list_head *list, *prev;
46         drm_fence_object_t *fence;
47         int found = 0;
48
49         if (list_empty(&fm->ring))
50                 return;
51
52         list_for_each_entry(fence, &fm->ring, ring) {
53                 diff = (sequence - fence->sequence) & driver->sequence_mask;
54                 if (diff > driver->wrap_diff) {
55                         found = 1;
56                         break;
57                 }
58         }
59
60         list = (found) ? fence->ring.prev : fm->ring.prev;
61         prev = list->prev;
62
63         for (; list != &fm->ring; list = prev, prev = list->prev) {
64                 fence = list_entry(list, drm_fence_object_t, ring);
65
66                 type |= fence->native_type;
67                 relevant = type & fence->type;
68
69                 if ((fence->signaled | relevant) != fence->signaled) {
70                         fence->signaled |= relevant;
71                         DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n",
72                                   fence->base.hash.key, fence->signaled);
73                         fence->submitted_flush |= relevant;
74                         wake = 1;
75                 }
76
77                 relevant = fence->flush_mask &
78                     ~(fence->signaled | fence->submitted_flush);
79
80                 if (relevant) {
81                         fm->pending_flush |= relevant;
82                         fence->submitted_flush = fence->flush_mask;
83                 }
84
85                 if (!(fence->type & ~fence->signaled)) {
86                         DRM_DEBUG("Fence completely signaled 0x%08lx\n",
87                                   fence->base.hash.key);
88                         list_del_init(&fence->ring);
89                 }
90
91         }
92
93         if (wake) {
94                 DRM_WAKEUP(&fm->fence_queue);
95         }
96 }
97
98 EXPORT_SYMBOL(drm_fence_handler);
99
100 static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
101 {
102         drm_fence_manager_t *fm = &dev->fm;
103         unsigned long flags;
104
105         write_lock_irqsave(&fm->lock, flags);
106         list_del_init(ring);
107         write_unlock_irqrestore(&fm->lock, flags);
108 }
109
110 void drm_fence_usage_deref_locked(drm_device_t * dev,
111                                   drm_fence_object_t * fence)
112 {
113         drm_fence_manager_t *fm = &dev->fm;
114
115         if (atomic_dec_and_test(&fence->usage)) {
116                 drm_fence_unring(dev, &fence->ring);
117                 DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
118                           fence->base.hash.key);
119                 atomic_dec(&fm->count);
120                 drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
121         }
122 }
123
124 void drm_fence_usage_deref_unlocked(drm_device_t * dev,
125                                     drm_fence_object_t * fence)
126 {
127         drm_fence_manager_t *fm = &dev->fm;
128
129         if (atomic_dec_and_test(&fence->usage)) {
130                 mutex_lock(&dev->struct_mutex);
131                 if (atomic_read(&fence->usage) == 0) {
132                         drm_fence_unring(dev, &fence->ring);
133                         atomic_dec(&fm->count);
134                         drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
135                 }
136                 mutex_unlock(&dev->struct_mutex);
137         }
138 }
139
140 static void drm_fence_object_destroy(drm_file_t * priv,
141                                      drm_user_object_t * base)
142 {
143         drm_device_t *dev = priv->head->dev;
144         drm_fence_object_t *fence =
145             drm_user_object_entry(base, drm_fence_object_t, base);
146
147         drm_fence_usage_deref_locked(dev, fence);
148 }
149
150 static int fence_signaled(drm_device_t * dev,
151                           drm_fence_object_t * fence,
152                           uint32_t mask, int poke_flush)
153 {
154         unsigned long flags;
155         int signaled;
156         drm_fence_manager_t *fm = &dev->fm;
157         drm_fence_driver_t *driver = dev->driver->fence_driver;
158
159         if (poke_flush)
160                 driver->poke_flush(dev, fence->class);
161         read_lock_irqsave(&fm->lock, flags);
162         signaled =
163             (fence->type & mask & fence->signaled) == (fence->type & mask);
164         read_unlock_irqrestore(&fm->lock, flags);
165
166         return signaled;
167 }
168
169 static void drm_fence_flush_exe(drm_fence_manager_t * fm,
170                                 drm_fence_driver_t * driver, uint32_t sequence)
171 {
172         uint32_t diff;
173
174         if (!fm->pending_exe_flush) {
175                 struct list_head *list;
176
177                 /*
178                  * Last_exe_flush is invalid. Find oldest sequence.
179                  */
180
181                 list = &fm->ring;
182                 if (list->next == &fm->ring) {
183                         return;
184                 } else {
185                         drm_fence_object_t *fence =
186                             list_entry(list->next, drm_fence_object_t, ring);
187                         fm->last_exe_flush = (fence->sequence - 1) &
188                             driver->sequence_mask;
189                 }
190                 diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
191                 if (diff >= driver->wrap_diff)
192                         return;
193                 fm->exe_flush_sequence = sequence;
194                 fm->pending_exe_flush = 1;
195         } else {
196                 diff =
197                     (sequence - fm->exe_flush_sequence) & driver->sequence_mask;
198                 if (diff < driver->wrap_diff) {
199                         fm->exe_flush_sequence = sequence;
200                 }
201         }
202 }
203
204 int drm_fence_object_signaled(drm_fence_object_t * fence,
205                               uint32_t type)
206 {
207         return ((fence->signaled & type) == type);
208 }
209
210 int drm_fence_object_flush(drm_device_t * dev,
211                            drm_fence_object_t * fence,
212                            uint32_t type)
213 {
214         drm_fence_manager_t *fm = &dev->fm;
215         drm_fence_driver_t *driver = dev->driver->fence_driver;
216         unsigned long flags;
217
218         if (type & ~fence->type) {
219                 DRM_ERROR("Flush trying to extend fence type, "
220                           "0x%x, 0x%x\n", type, fence->type);
221                 return -EINVAL;
222         }
223
224         write_lock_irqsave(&fm->lock, flags);
225         fence->flush_mask |= type;
226         if (fence->submitted_flush == fence->signaled) {
227                 if ((fence->type & DRM_FENCE_TYPE_EXE) &&
228                     !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) {
229                         drm_fence_flush_exe(fm, driver, fence->sequence);
230                         fence->submitted_flush |= DRM_FENCE_TYPE_EXE;
231                 } else {
232                         fm->pending_flush |= (fence->flush_mask &
233                                               ~fence->submitted_flush);
234                         fence->submitted_flush = fence->flush_mask;
235                 }
236         }
237         write_unlock_irqrestore(&fm->lock, flags);
238         driver->poke_flush(dev, fence->class);
239         return 0;
240 }
241
242 /*
243  * Make sure old fence objects are signaled before their fence sequences are
244  * wrapped around and reused.
245  */
246
247 void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
248 {
249         drm_fence_manager_t *fm = &dev->fm;
250         drm_fence_driver_t *driver = dev->driver->fence_driver;
251         uint32_t old_sequence;
252         unsigned long flags;
253         drm_fence_object_t *fence;
254         uint32_t diff;
255
256         mutex_lock(&dev->struct_mutex);
257         read_lock_irqsave(&fm->lock, flags);
258         if (fm->ring.next == &fm->ring) {
259                 read_unlock_irqrestore(&fm->lock, flags);
260                 mutex_unlock(&dev->struct_mutex);
261                 return;
262         }
263         old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
264         fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
265         atomic_inc(&fence->usage);
266         mutex_unlock(&dev->struct_mutex);
267         diff = (old_sequence - fence->sequence) & driver->sequence_mask;
268         read_unlock_irqrestore(&fm->lock, flags);
269         if (diff < driver->wrap_diff) {
270                 drm_fence_object_flush(dev, fence, fence->type);
271         }
272         drm_fence_usage_deref_unlocked(dev, fence);
273 }
274
275 EXPORT_SYMBOL(drm_fence_flush_old);
276
277 static int drm_fence_lazy_wait(drm_device_t *dev,
278                                drm_fence_object_t *fence,
279                                int ignore_signals, uint32_t mask)
280 {
281         drm_fence_manager_t *fm = &dev->fm;
282         unsigned long _end = jiffies + 3*DRM_HZ;
283         int ret = 0;
284
285         do {
286                 DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
287                             fence_signaled(dev, fence, mask, 1));
288                 if (time_after_eq(jiffies, _end))
289                         break;
290         } while (ret == -EINTR && ignore_signals);
291         if (time_after_eq(jiffies, _end) && (ret != 0))
292                 ret = -EBUSY;
293         if (ret) {
294                 if (ret == -EBUSY) {
295                         DRM_ERROR("Fence timeout. "
296                                   "GPU lockup or fence driver was "
297                                   "taken down.\n");
298                 }
299                 return ((ret == -EINTR) ? -EAGAIN : ret);
300         }
301         return 0;
302 }
303
304 int drm_fence_object_wait(drm_device_t * dev,
305                           drm_fence_object_t * fence,
306                           int lazy, int ignore_signals, uint32_t mask)
307 {
308         drm_fence_driver_t *driver = dev->driver->fence_driver;
309         int ret = 0;
310         unsigned long _end;
311         int signaled;
312
313         if (mask & ~fence->type) {
314                 DRM_ERROR("Wait trying to extend fence type"
315                           " 0x%08x 0x%08x\n", mask, fence->type);
316                 return -EINVAL;
317         }
318
319         if (fence_signaled(dev, fence, mask, 0))
320                 return 0;
321
322         _end = jiffies + 3 * DRM_HZ;
323
324         drm_fence_object_flush(dev, fence, mask);
325
326         if (lazy && driver->lazy_capable) {
327
328                 ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask);
329                 if (ret)
330                         return ret;
331
332         } else {
333
334                 if (driver->has_irq(dev, fence->class,
335                                     DRM_FENCE_TYPE_EXE)) {
336                         ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
337                                                   DRM_FENCE_TYPE_EXE);
338                         if (ret)
339                                 return ret;
340                 }
341
342                 if (driver->has_irq(dev, fence->class,
343                                     mask & ~DRM_FENCE_TYPE_EXE)) {
344                         ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
345                                                   mask);
346                         if (ret)
347                                 return ret;
348                 }
349         }
350         if (fence_signaled(dev, fence, mask, 0))
351                 return 0;
352
353         DRM_ERROR("Busy wait\n");
354         /*
355          * Avoid kernel-space busy-waits.
356          */
357 #if 1
358         if (!ignore_signals)
359                 return -EAGAIN;
360 #endif
361         do {
362                 schedule();
363                 signaled = fence_signaled(dev, fence, mask, 1);
364         } while (!signaled && !time_after_eq(jiffies, _end));
365
366         if (!signaled)
367                 return -EBUSY;
368
369         return 0;
370 }
371
372 int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
373                           uint32_t fence_flags, uint32_t type)
374 {
375         drm_fence_manager_t *fm = &dev->fm;
376         drm_fence_driver_t *driver = dev->driver->fence_driver;
377         unsigned long flags;
378         uint32_t sequence;
379         uint32_t native_type;
380         int ret;
381
382         drm_fence_unring(dev, &fence->ring);
383         ret = driver->emit(dev, fence->class, fence_flags, &sequence, &native_type);
384         if (ret)
385                 return ret;
386
387         write_lock_irqsave(&fm->lock, flags);
388         fence->type = type;
389         fence->flush_mask = 0x00;
390         fence->submitted_flush = 0x00;
391         fence->signaled = 0x00;
392         fence->sequence = sequence;
393         fence->native_type = native_type;
394         list_add_tail(&fence->ring, &fm->ring);
395         write_unlock_irqrestore(&fm->lock, flags);
396         return 0;
397 }
398
399 static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
400                                  uint32_t fence_flags,
401                                  drm_fence_object_t * fence)
402 {
403         int ret = 0;
404         unsigned long flags;
405         drm_fence_manager_t *fm = &dev->fm;
406
407         mutex_lock(&dev->struct_mutex);
408         atomic_set(&fence->usage, 1);
409         mutex_unlock(&dev->struct_mutex);
410
411         write_lock_irqsave(&fm->lock, flags);
412         INIT_LIST_HEAD(&fence->ring);
413         fence->class = 0;
414         fence->type = type;
415         fence->flush_mask = 0;
416         fence->submitted_flush = 0;
417         fence->signaled = 0;
418         fence->sequence = 0;
419         write_unlock_irqrestore(&fm->lock, flags);
420         if (fence_flags & DRM_FENCE_FLAG_EMIT) {
421                 ret = drm_fence_object_emit(dev, fence, fence_flags, type);
422         }
423         return ret;
424 }
425
426 int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
427                               int shareable)
428 {
429         drm_device_t *dev = priv->head->dev;
430         int ret;
431
432         mutex_lock(&dev->struct_mutex);
433         ret = drm_add_user_object(priv, &fence->base, shareable);
434         mutex_unlock(&dev->struct_mutex);
435         if (ret)
436                 return ret;
437         fence->base.type = drm_fence_type;
438         fence->base.remove = &drm_fence_object_destroy;
439         DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
440         return 0;
441 }
442
443 EXPORT_SYMBOL(drm_fence_add_user_object);
444
445 int drm_fence_object_create(drm_device_t * dev, uint32_t type,
446                             unsigned flags, drm_fence_object_t ** c_fence)
447 {
448         drm_fence_object_t *fence;
449         int ret;
450         drm_fence_manager_t *fm = &dev->fm;
451
452         fence = drm_ctl_alloc(sizeof(*fence), DRM_MEM_FENCE);
453         if (!fence)
454                 return -ENOMEM;
455         ret = drm_fence_object_init(dev, type, flags, fence);
456         if (ret) {
457                 drm_fence_usage_deref_unlocked(dev, fence);
458                 return ret;
459         }
460         *c_fence = fence;
461         atomic_inc(&fm->count);
462
463         return 0;
464 }
465
466 EXPORT_SYMBOL(drm_fence_object_create);
467
468 void drm_fence_manager_init(drm_device_t * dev)
469 {
470         drm_fence_manager_t *fm = &dev->fm;
471         drm_fence_driver_t *fed = dev->driver->fence_driver;
472         int i;
473
474         fm->lock = RW_LOCK_UNLOCKED;
475         write_lock(&fm->lock);
476         INIT_LIST_HEAD(&fm->ring);
477         fm->pending_flush = 0;
478         DRM_INIT_WAITQUEUE(&fm->fence_queue);
479         fm->initialized = 0;
480         if (fed) {
481                 fm->initialized = 1;
482                 atomic_set(&fm->count, 0);
483                 for (i = 0; i < fed->no_types; ++i) {
484                         fm->fence_types[i] = &fm->ring;
485                 }
486         }
487         write_unlock(&fm->lock);
488 }
489
490 void drm_fence_manager_takedown(drm_device_t * dev)
491 {
492 }
493
494 drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
495 {
496         drm_device_t *dev = priv->head->dev;
497         drm_user_object_t *uo;
498         drm_fence_object_t *fence;
499
500         mutex_lock(&dev->struct_mutex);
501         uo = drm_lookup_user_object(priv, handle);
502         if (!uo || (uo->type != drm_fence_type)) {
503                 mutex_unlock(&dev->struct_mutex);
504                 return NULL;
505         }
506         fence = drm_user_object_entry(uo, drm_fence_object_t, base);
507         atomic_inc(&fence->usage);
508         mutex_unlock(&dev->struct_mutex);
509         return fence;
510 }
511
512 int drm_fence_ioctl(DRM_IOCTL_ARGS)
513 {
514         DRM_DEVICE;
515         int ret;
516         drm_fence_manager_t *fm = &dev->fm;
517         drm_fence_arg_t arg;
518         drm_fence_object_t *fence;
519         drm_user_object_t *uo;
520         unsigned long flags;
521         ret = 0;
522
523         if (!fm->initialized) {
524                 DRM_ERROR("The DRM driver does not support fencing.\n");
525                 return -EINVAL;
526         }
527
528         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
529         switch (arg.op) {
530         case drm_fence_create:
531                 if (arg.flags & DRM_FENCE_FLAG_EMIT)
532                         LOCK_TEST_WITH_RETURN(dev, filp);
533                 ret = drm_fence_object_create(dev, arg.type, arg.flags, &fence);
534                 if (ret)
535                         return ret;
536                 ret = drm_fence_add_user_object(priv, fence,
537                                                 arg.flags &
538                                                 DRM_FENCE_FLAG_SHAREABLE);
539                 if (ret) {
540                         drm_fence_usage_deref_unlocked(dev, fence);
541                         return ret;
542                 }
543
544                 /*
545                  * usage > 0. No need to lock dev->struct_mutex;
546                  */
547
548                 atomic_inc(&fence->usage);
549                 arg.handle = fence->base.hash.key;
550                 break;
551         case drm_fence_destroy:
552                 mutex_lock(&dev->struct_mutex);
553                 uo = drm_lookup_user_object(priv, arg.handle);
554                 if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
555                         mutex_unlock(&dev->struct_mutex);
556                         return -EINVAL;
557                 }
558                 ret = drm_remove_user_object(priv, uo);
559                 mutex_unlock(&dev->struct_mutex);
560                 return ret;
561         case drm_fence_reference:
562                 ret =
563                     drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
564                 if (ret)
565                         return ret;
566                 fence = drm_lookup_fence_object(priv, arg.handle);
567                 break;
568         case drm_fence_unreference:
569                 ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
570                 return ret;
571         case drm_fence_signaled:
572                 fence = drm_lookup_fence_object(priv, arg.handle);
573                 if (!fence)
574                         return -EINVAL;
575                 break;
576         case drm_fence_flush:
577                 fence = drm_lookup_fence_object(priv, arg.handle);
578                 if (!fence)
579                         return -EINVAL;
580                 ret = drm_fence_object_flush(dev, fence, arg.type);
581                 break;
582         case drm_fence_wait:
583                 fence = drm_lookup_fence_object(priv, arg.handle);
584                 if (!fence)
585                         return -EINVAL;
586                 ret =
587                     drm_fence_object_wait(dev, fence,
588                                           arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
589                                           0, arg.type);
590                 break;
591         case drm_fence_emit:
592                 LOCK_TEST_WITH_RETURN(dev, filp);
593                 fence = drm_lookup_fence_object(priv, arg.handle);
594                 if (!fence)
595                         return -EINVAL;
596                 ret = drm_fence_object_emit(dev, fence, arg.flags, arg.type);
597                 break;
598         case drm_fence_buffers:
599                 if (!dev->bm.initialized) {
600                         DRM_ERROR("Buffer object manager is not initialized\n");
601                         return -EINVAL;
602                 }
603                 LOCK_TEST_WITH_RETURN(dev, filp);
604                 ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
605                                                NULL, &fence);
606                 if (ret)
607                         return ret;
608                 ret = drm_fence_add_user_object(priv, fence,
609                                                 arg.flags &
610                                                 DRM_FENCE_FLAG_SHAREABLE);
611                 if (ret)
612                         return ret;
613                 atomic_inc(&fence->usage);
614                 arg.handle = fence->base.hash.key;
615                 break;
616         default:
617                 return -EINVAL;
618         }
619         read_lock_irqsave(&fm->lock, flags);
620         arg.class = fence->class;
621         arg.type = fence->type;
622         arg.signaled = fence->signaled;
623         read_unlock_irqrestore(&fm->lock, flags);
624         drm_fence_usage_deref_unlocked(dev, fence);
625
626         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
627         return ret;
628 }