Fence all unfenced buffers function.
[platform/upstream/libdrm.git] / linux-core / drm_fence.c
1 /**************************************************************************
2  * 
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  * 
26  * 
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30  */
31
32 #include "drmP.h"
33
34 static void drm_fm_update_pointers(drm_fence_manager_t * fm,
35                                    struct list_head *list, int no_types,
36                                    uint32_t type)
37 {
38         int i;
39         for (i = 0; i < no_types; ++i) {
40                 if (type & (1 << i)) {
41                         fm->fence_types[i] = list;
42                 }
43         }
44 }
45
46 /*
47  * Typically called by the IRQ handler.
48  */
49
50 void drm_fence_handler(drm_device_t * dev, uint32_t sequence, uint32_t type)
51 {
52         int i;
53         int wake = 0;
54         int largest = 0;
55         uint32_t diff;
56         uint32_t relevant;
57         int index = 0;
58         drm_fence_manager_t *fm = &dev->fm;
59         drm_fence_driver_t *driver = dev->driver->fence_driver;
60         struct list_head *list;
61         struct list_head *fence_list;
62         drm_fence_object_t *fence;
63         int found = 0;
64
65         for (i = 0; i < driver->no_types; ++i) {
66                 if (!(type & (1 << i)))
67                         continue;
68
69                 list = fm->fence_types[i];
70                 fence_list = list->next;
71
72                 if (fence_list == &fm->ring)
73                         continue;
74
75                 fence = list_entry(fence_list, drm_fence_object_t, ring);
76
77                 diff = (sequence - fence->sequence) & driver->sequence_mask;
78
79                 if (diff < driver->wrap_diff) {
80                         if (diff >= largest) {
81                                 largest = diff;
82                                 index = i;
83                                 found = 1;
84                         }
85                 }
86         }
87
88         if (!found)
89                 return;
90
91         /*
92          * Start with the fence object with the lowest sequence number, affected by
93          * the type mask of this call. Update signaled fields, 
94          * Check if we need to wake sleeping processes
95          */
96
97         list = fm->fence_types[index]->next;
98         do {
99                 if (list == &fm->ring) {
100                         drm_fm_update_pointers(fm, list->prev,
101                                                driver->no_types, type);
102                         break;
103                 }
104                 fence = list_entry(list, drm_fence_object_t, ring);
105                 diff = (sequence - fence->sequence) & driver->sequence_mask;
106                 if (diff >= driver->wrap_diff) {
107                         drm_fm_update_pointers(fm, fence->ring.prev,
108                                                driver->no_types, type);
109                         break;
110                 }
111                 relevant = type & fence->type;
112                 if ((fence->signaled | relevant) != fence->signaled) {
113                         fence->signaled |= relevant;
114                         fence->submitted_flush |= relevant;
115                         wake = 1;
116                 }
117
118                 relevant = fence->flush_mask &
119                     ~(fence->signaled | fence->submitted_flush);
120                 if (relevant) {
121                         fm->pending_flush |= relevant;
122                         fence->submitted_flush = fence->flush_mask;
123                 }
124
125                 list = list->next;
126
127                 /*
128                  * Remove a completely signaled fence from the
129                  * fence manager ring.
130                  */
131
132                 if (!(fence->type & ~fence->signaled)) {
133                         fence_list = &fence->ring;
134                         for (i = 0; i < driver->no_types; ++i) {
135                                 if (fm->fence_types[i] == fence_list)
136                                         fm->fence_types[i] = fence_list->prev;
137                         }
138                         list_del_init(fence_list);
139                 }
140
141         } while (1);
142
143         /*
144          * Wake sleeping processes.
145          */
146
147         if (wake) {
148                 DRM_WAKEUP(&fm->fence_queue);
149         }
150 }
151
152 EXPORT_SYMBOL(drm_fence_handler);
153
154 static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
155 {
156         drm_fence_manager_t *fm = &dev->fm;
157         drm_fence_driver_t *driver = dev->driver->fence_driver;
158         unsigned long flags;
159         int i;
160
161         write_lock_irqsave(&fm->lock, flags);
162         for (i = 0; i < driver->no_types; ++i) {
163                 if (fm->fence_types[i] == ring)
164                         fm->fence_types[i] = ring->prev;
165         }
166         list_del_init(ring);
167         write_unlock_irqrestore(&fm->lock, flags);
168 }
169
170 void drm_fence_usage_deref_locked(drm_device_t * dev,
171                                   drm_fence_object_t * fence)
172 {
173         if (atomic_dec_and_test(&fence->usage)) {
174                 drm_fence_unring(dev, &fence->ring);
175                 kmem_cache_free(drm_cache.fence_object, fence);
176         }
177 }
178
179 void drm_fence_usage_deref_unlocked(drm_device_t * dev,
180                                     drm_fence_object_t * fence)
181 {
182         if (atomic_dec_and_test(&fence->usage)) {
183                 mutex_lock(&dev->struct_mutex);
184                 if (atomic_read(&fence->usage) == 0) {
185                         drm_fence_unring(dev, &fence->ring);
186                         kmem_cache_free(drm_cache.fence_object, fence);
187                 }
188                 mutex_unlock(&dev->struct_mutex);
189         }
190 }
191
192 static void drm_fence_object_destroy(drm_file_t * priv,
193                                      drm_user_object_t * base)
194 {
195         drm_device_t *dev = priv->head->dev;
196         drm_fence_object_t *fence =
197             drm_user_object_entry(base, drm_fence_object_t, base);
198
199         drm_fence_usage_deref_locked(dev, fence);
200 }
201
202 static int fence_signaled(drm_device_t * dev, drm_fence_object_t * fence,
203                           uint32_t mask, int poke_flush)
204 {
205         unsigned long flags;
206         int signaled;
207         drm_fence_manager_t *fm = &dev->fm;
208         drm_fence_driver_t *driver = dev->driver->fence_driver;
209
210         if (poke_flush)
211                 driver->poke_flush(dev);
212         read_lock_irqsave(&fm->lock, flags);
213         signaled =
214             (fence->type & mask & fence->signaled) == (fence->type & mask);
215         read_unlock_irqrestore(&fm->lock, flags);
216
217         return signaled;
218 }
219
220 static void drm_fence_flush_exe(drm_fence_manager_t * fm,
221                                 drm_fence_driver_t * driver, uint32_t sequence)
222 {
223         uint32_t diff;
224
225         if (!fm->pending_exe_flush) {
226                 struct list_head *list;
227
228                 /*
229                  * Last_exe_flush is invalid. Find oldest sequence.
230                  */
231
232                 list = fm->fence_types[_DRM_FENCE_TYPE_EXE];
233                 if (list->next == &fm->ring) {
234                         return;
235                 } else {
236                         drm_fence_object_t *fence =
237                             list_entry(list->next, drm_fence_object_t, ring);
238                         fm->last_exe_flush = (fence->sequence - 1) &
239                             driver->sequence_mask;
240                 }
241                 diff = (sequence - fm->last_exe_flush) & driver->sequence_mask;
242                 if (diff >= driver->wrap_diff)
243                         return;
244                 fm->exe_flush_sequence = sequence;
245                 fm->pending_exe_flush = 1;
246         } else {
247                 diff =
248                     (sequence - fm->exe_flush_sequence) & driver->sequence_mask;
249                 if (diff < driver->wrap_diff) {
250                         fm->exe_flush_sequence = sequence;
251                 }
252         }
253 }
254
255 int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type)
256 {
257         return ((fence->signaled & type) == type);
258 }
259
260 /*
261  * Make sure old fence objects are signaled before their fence sequences are
262  * wrapped around and reused.
263  */
264
265 int drm_fence_object_flush(drm_device_t * dev,
266                                   drm_fence_object_t * fence, uint32_t type)
267 {
268         drm_fence_manager_t *fm = &dev->fm;
269         drm_fence_driver_t *driver = dev->driver->fence_driver;
270         unsigned long flags;
271
272         if (type & ~fence->type) {
273                 DRM_ERROR("Flush trying to extend fence type\n");
274                 return -EINVAL;
275         }
276
277         write_lock_irqsave(&fm->lock, flags);
278         fence->flush_mask |= type;
279         if (fence->submitted_flush == fence->signaled) {
280                 if ((fence->type & DRM_FENCE_EXE) &&
281                     !(fence->submitted_flush & DRM_FENCE_EXE)) {
282                         drm_fence_flush_exe(fm, driver, fence->sequence);
283                         fence->submitted_flush |= DRM_FENCE_EXE;
284                 } else {
285                         fm->pending_flush |= (fence->flush_mask &
286                                               ~fence->submitted_flush);
287                         fence->submitted_flush = fence->flush_mask;
288                 }
289         }
290         write_unlock_irqrestore(&fm->lock, flags);
291         driver->poke_flush(dev);
292         return 0;
293 }
294
295 void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
296 {
297         drm_fence_manager_t *fm = &dev->fm;
298         drm_fence_driver_t *driver = dev->driver->fence_driver;
299         uint32_t old_sequence;
300         unsigned long flags;
301         drm_fence_object_t *fence;
302         uint32_t diff;
303
304         mutex_lock(&dev->struct_mutex);
305         read_lock_irqsave(&fm->lock, flags);
306         if (fm->ring.next == &fm->ring) {
307                 read_unlock_irqrestore(&fm->lock, flags);
308                 mutex_unlock(&dev->struct_mutex);
309                 return;
310         }
311         old_sequence = (sequence - driver->flush_diff) & driver->sequence_mask;
312         fence = list_entry(fm->ring.next, drm_fence_object_t, ring);
313         atomic_inc(&fence->usage);
314         mutex_unlock(&dev->struct_mutex);
315         diff = (old_sequence - fence->sequence) & driver->sequence_mask;
316         read_unlock_irqrestore(&fm->lock, flags);
317         if (diff < driver->wrap_diff) {
318                 drm_fence_object_flush(dev, fence, fence->type);
319         }
320         drm_fence_usage_deref_unlocked(dev, fence);
321 }
322
323 EXPORT_SYMBOL(drm_fence_flush_old);
324
325 int drm_fence_object_wait(drm_device_t * dev, drm_fence_object_t * fence,
326                           int lazy, int ignore_signals, uint32_t mask)
327 {
328         drm_fence_manager_t *fm = &dev->fm;
329         drm_fence_driver_t *driver = dev->driver->fence_driver;
330         int ret = 0;
331         unsigned long _end;
332
333         if (mask & ~fence->type) {
334                 DRM_ERROR("Wait trying to extend fence type\n");
335                 return -EINVAL;
336         }
337
338         if (fence_signaled(dev, fence, mask, 0))
339                 return 0;
340
341         _end = jiffies + 3 * DRM_HZ;
342
343         drm_fence_object_flush(dev, fence, mask);
344         if (lazy && driver->lazy_capable) {
345                 do {
346                         DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
347                                     fence_signaled(dev, fence, mask, 1));
348                         if (time_after_eq(jiffies, _end))
349                                 break;
350                 } while (ret == -EINTR && ignore_signals);
351
352                 if (time_after_eq(jiffies, _end) && (ret != 0))
353                         ret = -EBUSY;
354                 return ret;
355
356         } else {
357                 int signaled;
358                 do {
359                         signaled = fence_signaled(dev, fence, mask, 1);
360                 } while (!signaled && !time_after_eq(jiffies, _end));
361                 if (!signaled)
362                         return -EBUSY;
363         }
364         return 0;
365 }
366
367 int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
368                           uint32_t type)
369 {
370         drm_fence_manager_t *fm = &dev->fm;
371         drm_fence_driver_t *driver = dev->driver->fence_driver;
372         unsigned long flags;
373         uint32_t sequence;
374         int ret;
375
376         drm_fence_unring(dev, &fence->ring);
377         ret = driver->emit(dev, &sequence);
378         if (ret)
379                 return ret;
380
381         write_lock_irqsave(&fm->lock, flags);
382         fence->type = type;
383         fence->flush_mask = 0x00;
384         fence->submitted_flush = 0x00;
385         fence->signaled = 0x00;
386         fence->sequence = sequence;
387         list_add_tail(&fence->ring, &fm->ring);
388         write_unlock_irqrestore(&fm->lock, flags);
389         return 0;
390 }
391
392 int drm_fence_object_init(drm_device_t * dev, uint32_t type, int emit,
393                           drm_fence_object_t * fence)
394 {
395         int ret = 0;
396         unsigned long flags;
397         drm_fence_manager_t *fm = &dev->fm;
398
399         mutex_lock(&dev->struct_mutex);
400         atomic_set(&fence->usage, 1);
401         mutex_unlock(&dev->struct_mutex);
402
403         write_lock_irqsave(&fm->lock, flags);
404         INIT_LIST_HEAD(&fence->ring);
405         fence->class = 0;
406         fence->type = type;
407         fence->flush_mask = 0;
408         fence->submitted_flush = 0;
409         fence->signaled = 0;
410         fence->sequence = 0;
411         write_unlock_irqrestore(&fm->lock, flags);
412         if (emit) {
413                 ret = drm_fence_object_emit(dev, fence, type);
414         }
415         return ret;
416 }
417
418 EXPORT_SYMBOL(drm_fence_object_init);
419
420 int drm_fence_add_user_object(drm_file_t *priv, drm_fence_object_t *fence,
421                               int shareable)
422 {
423         drm_device_t *dev = priv->head->dev;
424         int ret;
425
426         mutex_lock(&dev->struct_mutex);
427         ret = drm_add_user_object(priv, &fence->base, shareable);
428         mutex_unlock(&dev->struct_mutex);
429         if (ret)
430                 return ret;
431         fence->base.type = drm_fence_type;
432         fence->base.remove = &drm_fence_object_destroy;
433         return 0;
434 }
435 EXPORT_SYMBOL(drm_fence_add_user_object);
436
437 int drm_fence_object_create(drm_device_t *dev, uint32_t type,
438                             int emit, drm_fence_object_t **c_fence)
439 {
440         drm_fence_object_t *fence;
441         int ret;
442
443         fence = kmem_cache_alloc(drm_cache.fence_object, GFP_KERNEL);
444         if (!fence)
445                 return -ENOMEM;
446         ret = drm_fence_object_init(dev, type, emit, fence);
447         if (ret) {
448                 drm_fence_usage_deref_unlocked(dev, fence);
449                 return ret;
450         }
451         *c_fence = fence; 
452         return 0;
453 }
454 EXPORT_SYMBOL(drm_fence_object_create);
455
456
457 void drm_fence_manager_init(drm_device_t * dev)
458 {
459         drm_fence_manager_t *fm = &dev->fm;
460         drm_fence_driver_t *fed = dev->driver->fence_driver;
461         int i;
462
463         fm->lock = RW_LOCK_UNLOCKED;
464         INIT_LIST_HEAD(&fm->ring);
465         fm->pending_flush = 0;
466         DRM_INIT_WAITQUEUE(&fm->fence_queue);
467         fm->initialized = 0;
468         if (fed) {
469                 fm->initialized = 1;
470                 for (i = 0; i < fed->no_types; ++i) {
471                         fm->fence_types[i] = &fm->ring;
472                 }
473         }
474 }
475
476 void drm_fence_manager_takedown(drm_device_t * dev)
477 {
478 }
479
480 drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
481 {
482         drm_device_t *dev = priv->head->dev;
483         drm_user_object_t *uo;
484         drm_fence_object_t *fence;
485
486         mutex_lock(&dev->struct_mutex);
487         uo = drm_lookup_user_object(priv, handle);
488         if (!uo || (uo->type != drm_fence_type)) {
489                 mutex_unlock(&dev->struct_mutex);
490                 return NULL;
491         }
492         fence = drm_user_object_entry(uo, drm_fence_object_t, base);
493         atomic_inc(&fence->usage);
494         mutex_unlock(&dev->struct_mutex);
495         return fence;
496 }
497
498 int drm_fence_ioctl(DRM_IOCTL_ARGS)
499 {
500         DRM_DEVICE;
501         int ret;
502         drm_fence_manager_t *fm = &dev->fm;
503         drm_fence_arg_t arg;
504         drm_fence_object_t *fence;
505         drm_user_object_t *uo;
506         unsigned long flags;
507         ret = 0;
508
509         if (!fm->initialized) {
510                 DRM_ERROR("The DRM driver does not support fencing.\n");
511                 return -EINVAL;
512         }
513
514         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
515         switch (arg.op) {
516         case drm_fence_create:
517                 if (arg.flags & DRM_FENCE_FLAG_EMIT)
518                         LOCK_TEST_WITH_RETURN(dev, filp);
519                 ret = drm_fence_object_create(dev, arg.type,
520                                               arg.flags & DRM_FENCE_FLAG_EMIT,
521                                               &fence);
522                 if (ret)
523                         return ret;
524                 ret = drm_fence_add_user_object(priv, fence, 
525                                                 arg.flags & 
526                                                 DRM_FENCE_FLAG_SHAREABLE);
527                 if (ret) {
528                         drm_fence_usage_deref_unlocked(dev, fence);
529                         return ret;
530                 }
531
532                 /*
533                  * usage > 0. No need to lock dev->struct_mutex;
534                  */
535
536                 atomic_inc(&fence->usage);
537                 arg.handle = fence->base.hash.key;
538                 break;
539         case drm_fence_destroy:
540                 mutex_lock(&dev->struct_mutex);
541                 uo = drm_lookup_user_object(priv, arg.handle);
542                 if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
543                         mutex_unlock(&dev->struct_mutex);
544                         return -EINVAL;
545                 }
546                 ret = drm_remove_user_object(priv, uo);
547                 mutex_unlock(&dev->struct_mutex);
548                 return ret;
549         case drm_fence_reference:
550                 ret =
551                     drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
552                 if (ret)
553                         return ret;
554                 fence = drm_lookup_fence_object(priv, arg.handle);
555                 break;
556         case drm_fence_unreference:
557                 ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
558                 return ret;
559         case drm_fence_signaled:
560                 fence = drm_lookup_fence_object(priv, arg.handle);
561                 if (!fence)
562                         return -EINVAL;
563                 break;
564         case drm_fence_flush:
565                 fence = drm_lookup_fence_object(priv, arg.handle);
566                 if (!fence)
567                         return -EINVAL;
568                 ret = drm_fence_object_flush(dev, fence, arg.type);
569                 break;
570         case drm_fence_wait:
571                 fence = drm_lookup_fence_object(priv, arg.handle);
572                 if (!fence)
573                         return -EINVAL;
574                 ret =
575                     drm_fence_object_wait(dev, fence,
576                                           arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
577                                           0,
578                                           arg.type);
579                 break;
580         case drm_fence_emit:
581                 LOCK_TEST_WITH_RETURN(dev, filp);
582                 fence = drm_lookup_fence_object(priv, arg.handle);
583                 if (!fence)
584                         return -EINVAL;
585                 ret = drm_fence_object_emit(dev, fence, arg.type);
586                 break;
587         case drm_fence_buffers:
588                 if (!dev->bm.initialized) {
589                         DRM_ERROR("Buffer object manager is not initialized\n");
590                         return -EINVAL;
591                 }
592                 LOCK_TEST_WITH_RETURN(dev, filp);
593                 ret = drm_fence_buffer_objects(priv, NULL, NULL, &fence);
594                 if (ret) 
595                         return ret;
596                 ret = drm_fence_add_user_object(priv, fence, 
597                                                 arg.flags & 
598                                                 DRM_FENCE_FLAG_SHAREABLE);
599                 if (ret)
600                         return ret;
601                 atomic_inc(&fence->usage);
602                 arg.handle = fence->base.hash.key;
603                 break;                  
604         default:
605                 return -EINVAL;
606         }
607         read_lock_irqsave(&fm->lock, flags);
608         arg.class = fence->class;
609         arg.type = fence->type;
610         arg.signaled = fence->signaled;
611         read_unlock_irqrestore(&fm->lock, flags);
612         drm_fence_usage_deref_unlocked(dev, fence);
613
614         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
615         return ret;
616 }