Add a buffer object transfer function.
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  * 
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  * 
26  * 
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30  */
31
32 #include "drmP.h"
33
34 /*
35  * Buffer object locking policy:
36  * Lock dev->struct_mutex;
37  * Increase usage
38  * Unlock dev->struct_mutex;
39  * Lock buffer->mutex;
40  * Do whatever you want;
41  * Unlock buffer->mutex;
42  * Decrease usage. Call destruction if zero.
43  *
44  * User object visibility ups usage just once, since it has its own 
45  * refcounting.
46  *
47  * Destruction:
48  * lock dev->struct_mutex;
49  * Verify that usage is zero. Otherwise unlock and continue.
50  * Destroy object.
51  * unlock dev->struct_mutex;
52  *
53  * Mutex and spinlock locking orders:
54  * 1.) Buffer mutex
55  * 2.) Refer to ttm locking orders.
56  */
57
58 static void drm_bo_destroy_locked(drm_buffer_object_t *bo);
59
60 #define DRM_FLAG_MASKED(_old, _new, _mask) {\
61 (_old) ^= (((_old) ^ (_new)) & (_mask)); \
62 }
63
64 static inline uint32_t drm_bo_type_flags(unsigned type)
65 {
66         return (1 << (24 + type));
67 }
68
69 /*
70  * bo locked. dev->struct_mutex locked.
71  */
72
73 static void drm_bo_add_to_lru(drm_buffer_object_t * bo,
74                               drm_buffer_manager_t * bm)
75 {
76         struct list_head *list;
77         bo->mem_type = 0;
78
79         switch(bo->flags & DRM_BO_MASK_MEM) {
80         case DRM_BO_FLAG_MEM_TT:
81                 bo->mem_type = DRM_BO_MEM_TT;
82                 break;
83         case DRM_BO_FLAG_MEM_VRAM:
84                 bo->mem_type = DRM_BO_MEM_VRAM;
85                 break;
86         case DRM_BO_FLAG_MEM_LOCAL:
87                 bo->mem_type = DRM_BO_MEM_LOCAL;
88                 break;
89         default:
90                 BUG_ON(1);              
91         }
92         list = (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
93                 &bm->pinned[bo->mem_type] : &bm->lru[bo->mem_type];
94         list_add_tail(&bo->lru, list);
95         return;
96 }
97
98 /*
99  * bo locked.
100  */
101
102 static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict,
103                                 int force_no_move)
104 {
105         drm_device_t *dev = bo->dev;
106         int ret;
107
108         if (bo->mm_node) {
109                 mutex_lock(&dev->struct_mutex);
110                 if (evict)
111                         ret = drm_evict_ttm(bo->ttm);
112                 else
113                         ret = drm_unbind_ttm(bo->ttm);
114
115                 if (ret) {
116                         mutex_unlock(&dev->struct_mutex);
117                         if (ret == -EAGAIN)
118                                 schedule();
119                         return ret;
120                 }
121
122                 if (!(bo->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) {
123                         drm_mm_put_block(bo->mm_node);
124                         bo->mm_node = NULL;
125                 }
126                 mutex_unlock(&dev->struct_mutex);
127         }
128
129         bo->flags &= ~DRM_BO_FLAG_MEM_TT;
130         bo->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
131
132         return 0;
133 }
134
135
136 /*
137  * Call bo->mutex locked.
138  * Wait until the buffer is idle.
139  */
140
141 static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
142                        int no_wait)
143 {
144
145         drm_fence_object_t *fence = bo->fence;
146         int ret;
147
148         if (fence) {
149                 drm_device_t *dev = bo->dev;
150                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
151                         drm_fence_usage_deref_unlocked(dev, fence);
152                         bo->fence = NULL;
153                         return 0;
154                 }
155                 if (no_wait) {
156                         return -EBUSY;
157                 }
158                 ret =
159                     drm_fence_object_wait(dev, fence, lazy, ignore_signals,
160                                           bo->fence_type);
161                 if (ret)
162                         return ret;
163
164                 drm_fence_usage_deref_unlocked(dev, fence);
165                 bo->fence = NULL;
166
167         }
168         return 0;
169 }
170
171 /*
172  * Call dev->struct_mutex locked.
173  * Attempts to remove all private references to a buffer by expiring its
174  * fence object and removing from lru lists and memory managers.
175  */
176
177
178 static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all)
179 {
180         drm_device_t *dev = bo->dev;
181         drm_buffer_manager_t *bm = &dev->bm;
182
183         atomic_inc(&bo->usage);
184         mutex_unlock(&dev->struct_mutex);
185         mutex_lock(&bo->mutex);
186
187         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
188
189         if (bo->fence && drm_fence_object_signaled(bo->fence,
190                                                    bo->fence_type)) {
191                 drm_fence_usage_deref_locked(dev, bo->fence);
192                 bo->fence = NULL;
193         }
194
195         if (bo->fence && remove_all) {
196                 if (bm->nice_mode) {
197                         unsigned long _end = jiffies + 3 * DRM_HZ;
198                         int ret;
199                         do {
200                                 ret = drm_bo_wait(bo, 0, 1, 0);
201                         } while (ret && !time_after_eq(jiffies, _end));
202
203                         if (bo->fence) {
204                                 bm->nice_mode = 0;
205                                 DRM_ERROR("Detected GPU lockup or "
206                                           "fence driver was taken down. "
207                                           "Evicting waiting buffers.\n");
208                         }
209                         if (bo->fence) {
210                                 drm_fence_usage_deref_unlocked(dev, bo->fence);
211                                 bo->fence = NULL;
212                         }
213                 }
214         }
215         mutex_lock(&dev->struct_mutex);
216
217         if (!atomic_dec_and_test(&bo->usage)) {
218                 goto out;
219         }
220
221         if (!bo->fence) {
222                 list_del_init(&bo->lru);
223                 if (bo->mm_node) {
224                         drm_mm_put_block(bo->mm_node);
225                         bo->mm_node = NULL;
226                 }
227                 list_del_init(&bo->ddestroy);
228                 mutex_unlock(&bo->mutex);
229                 drm_bo_destroy_locked(bo);
230                 return;
231         }
232
233         if (list_empty(&bo->ddestroy)) {
234                 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
235                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
236                 schedule_delayed_work(&bm->wq,
237                                       ((DRM_HZ / 100) <
238                                        1) ? 1 : DRM_HZ / 100);
239         }
240
241 out:
242         mutex_unlock(&bo->mutex);
243         return;
244 }
245
246
247 /*
248  * Verify that refcount is 0 and that there are no internal references
249  * to the buffer object. Then destroy it.
250  */
251
252 static void drm_bo_destroy_locked(drm_buffer_object_t *bo)
253 {
254         drm_device_t *dev = bo->dev;
255         drm_buffer_manager_t *bm = &dev->bm;
256
257         if (list_empty(&bo->lru) && bo->mm_node == NULL && atomic_read(&bo->usage) == 0) {
258                 BUG_ON(bo->fence != NULL);
259
260                 if (bo->ttm) {
261                         unsigned long _end = jiffies + DRM_HZ;
262                         int ret;
263
264                         do {
265                                 ret = drm_unbind_ttm(bo->ttm);
266                                 if (ret == -EAGAIN) {
267                                         mutex_unlock(&dev->struct_mutex);
268                                         schedule();
269                                         mutex_lock(&dev->struct_mutex);
270                                 }
271                         } while (ret == -EAGAIN && !time_after_eq(jiffies, _end));
272
273                         if (ret) {
274                                 DRM_ERROR("Couldn't unbind TTM region while destroying a buffer. "
275                                           "Bad. Continuing anyway\n");
276                         }
277                 }
278
279                 if (bo->ttm_object) {
280                         drm_ttm_object_deref_locked(dev, bo->ttm_object);
281                 }
282
283                 atomic_dec(&bm->count);
284
285                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
286
287                 return;
288         }
289
290         /*
291          * Some stuff is still trying to reference the buffer object.
292          * Get rid of those references.
293          */
294
295         drm_bo_cleanup_refs(bo, 0);
296
297         return;
298 }
299
300
301 /*
302  * Call dev->struct_mutex locked.
303  */
304
305 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
306 {
307         drm_buffer_manager_t *bm = &dev->bm;
308
309         drm_buffer_object_t *entry, *nentry;
310         struct list_head *list, *next;
311
312         list_for_each_safe(list, next, &bm->ddestroy) {
313                 entry = list_entry(list, drm_buffer_object_t, ddestroy);
314
315                 nentry = NULL;
316                 if (next != &bm->ddestroy) {
317                         nentry = list_entry(next, drm_buffer_object_t,
318                                             ddestroy);
319                         atomic_inc(&nentry->usage);
320                 }
321
322                 drm_bo_cleanup_refs(entry, remove_all);
323
324                 if (nentry) {
325                         atomic_dec(&nentry->usage);
326                 }
327         }
328
329 }
330
331 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
332 static void drm_bo_delayed_workqueue(void *data)
333 #else
334 static void drm_bo_delayed_workqueue(struct work_struct *work)
335 #endif
336 {
337 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
338         drm_device_t *dev = (drm_device_t *) data;
339         drm_buffer_manager_t *bm = &dev->bm;
340 #else
341         drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work);
342         drm_device_t *dev = container_of(bm, drm_device_t, bm);
343 #endif
344
345
346         DRM_DEBUG("Delayed delete Worker\n");
347
348         mutex_lock(&dev->struct_mutex);
349         if (!bm->initialized) {
350                 mutex_unlock(&dev->struct_mutex);
351                 return;
352         }
353         drm_bo_delayed_delete(dev, 0);
354         if (bm->initialized && !list_empty(&bm->ddestroy)) {
355                 schedule_delayed_work(&bm->wq,
356                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
357         }
358         mutex_unlock(&dev->struct_mutex);
359 }
360
361 static void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
362 {
363         if (atomic_dec_and_test(&bo->usage)) {
364                 drm_bo_destroy_locked(bo);
365         }
366 }
367
368 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
369 {
370         drm_bo_usage_deref_locked(drm_user_object_entry(uo, drm_buffer_object_t,
371                                                         base));
372 }
373
374 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
375 {
376         if (atomic_dec_and_test(&bo->usage)) {
377                 mutex_lock(&bo->dev->struct_mutex);
378                 if (atomic_read(&bo->usage) == 0)
379                         drm_bo_destroy_locked(bo);
380                 mutex_unlock(&bo->dev->struct_mutex);
381         }
382 }
383
384 /*
385  * Note. The caller has to register (if applicable) 
386  * and deregister fence object usage.
387  */
388
389 int drm_fence_buffer_objects(drm_file_t * priv,
390                              struct list_head *list,
391                              uint32_t fence_flags,
392                              drm_fence_object_t * fence,
393                              drm_fence_object_t ** used_fence)
394 {
395         drm_device_t *dev = priv->head->dev;
396         drm_buffer_manager_t *bm = &dev->bm;
397
398         drm_buffer_object_t *entry;
399         uint32_t fence_type = 0;
400         int count = 0;
401         int ret = 0;
402         struct list_head *l;
403         LIST_HEAD(f_list);
404
405         mutex_lock(&dev->struct_mutex);
406
407         if (!list)
408                 list = &bm->unfenced;
409
410         list_for_each_entry(entry, list, lru) {
411                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
412                 fence_type |= entry->fence_type;
413                 if (entry->fence_class != 0) {
414                         DRM_ERROR("Fence class %d is not implemented yet.\n",
415                                   entry->fence_class);
416                         ret = -EINVAL;
417                         goto out;
418                 }
419                 count++;
420         }
421
422         if (!count) {
423                 ret = -EINVAL;
424                 goto out;
425         }
426
427         /*
428          * Transfer to a local list before we release the dev->struct_mutex;
429          * This is so we don't get any new unfenced objects while fencing 
430          * the ones we already have..
431          */
432
433         list_splice_init(list, &f_list);
434
435         if (fence) {
436                 if ((fence_type & fence->type) != fence_type) {
437                         DRM_ERROR("Given fence doesn't match buffers "
438                                   "on unfenced list.\n");
439                         ret = -EINVAL;
440                         goto out;
441                 }
442         } else {
443                 mutex_unlock(&dev->struct_mutex);
444                 ret = drm_fence_object_create(dev, fence_type,
445                                               fence_flags | DRM_FENCE_FLAG_EMIT,
446                                               &fence);
447                 mutex_lock(&dev->struct_mutex);
448                 if (ret)
449                         goto out;
450         }
451
452         count = 0;
453         l = f_list.next;
454         while (l != &f_list) {
455                 entry = list_entry(l, drm_buffer_object_t, lru);
456                 atomic_inc(&entry->usage);
457                 mutex_unlock(&dev->struct_mutex);
458                 mutex_lock(&entry->mutex);
459                 mutex_lock(&dev->struct_mutex);
460                 list_del_init(l);
461                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
462                         count++;
463                         if (entry->fence)
464                                 drm_fence_usage_deref_locked(dev, entry->fence);
465                         entry->fence = fence;
466                         DRM_FLAG_MASKED(entry->priv_flags, 0,
467                                         _DRM_BO_FLAG_UNFENCED);
468                         DRM_WAKEUP(&entry->event_queue);
469                         drm_bo_add_to_lru(entry, bm);
470                 }
471                 mutex_unlock(&entry->mutex);
472                 drm_bo_usage_deref_locked(entry);
473                 l = f_list.next;
474         }
475         atomic_add(count, &fence->usage);
476         DRM_DEBUG("Fenced %d buffers\n", count);
477       out:
478         mutex_unlock(&dev->struct_mutex);
479         *used_fence = fence;
480         return ret;
481 }
482
483 EXPORT_SYMBOL(drm_fence_buffer_objects);
484
485 /*
486  * bo->mutex locked 
487  */
488
489 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
490                         int no_wait, int force_no_move)
491 {
492         int ret = 0;
493         drm_device_t *dev = bo->dev;
494         drm_buffer_manager_t *bm = &dev->bm;
495
496         /*
497          * Someone might have modified the buffer before we took the buffer mutex.
498          */
499
500         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
501                 goto out;
502         if (!(bo->flags & drm_bo_type_flags(mem_type)))
503                 goto out;
504
505         ret = drm_bo_wait(bo, 0, 0, no_wait);
506
507         if (ret) {
508                 if (ret != -EAGAIN)
509                         DRM_ERROR("Failed to expire fence before "
510                                   "buffer eviction.\n");
511                 goto out;
512         }
513
514         if (mem_type == DRM_BO_MEM_TT) {
515                 ret = drm_move_tt_to_local(bo, 1, force_no_move);
516                 if (ret)
517                         goto out;
518                 mutex_lock(&dev->struct_mutex);
519                 list_del_init(&bo->lru);
520                 drm_bo_add_to_lru(bo, bm);
521                 mutex_unlock(&dev->struct_mutex);
522         }
523
524         if (ret)
525                 goto out;
526
527         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
528                         _DRM_BO_FLAG_EVICTED);
529       out:
530         return ret;
531 }
532
533 /*
534  * bo->mutex locked.
535  */
536
537 int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type,
538                        int no_wait)
539 {
540         drm_device_t *dev = bo->dev;
541         drm_mm_node_t *node;
542         drm_buffer_manager_t *bm = &dev->bm;
543         drm_buffer_object_t *entry;
544         drm_mm_t *mm = &bm->manager[mem_type];
545         struct list_head *lru;
546         unsigned long size = bo->num_pages;
547         int ret;
548
549         mutex_lock(&dev->struct_mutex);
550         do {
551                 node = drm_mm_search_free(mm, size, bo->page_alignment, 1);
552                 if (node)
553                         break;
554
555                 lru = &bm->lru[mem_type];
556                 if (lru->next == lru)
557                         break;
558
559                 entry = list_entry(lru->next, drm_buffer_object_t, lru);
560
561                 atomic_inc(&entry->usage);
562                 mutex_unlock(&dev->struct_mutex);
563                 mutex_lock(&entry->mutex);
564                 BUG_ON(bo->flags & DRM_BO_FLAG_NO_MOVE);
565                 ret = drm_bo_evict(entry, mem_type, no_wait, 0);
566                 mutex_unlock(&entry->mutex);
567                 drm_bo_usage_deref_unlocked(entry);
568                 if (ret)
569                         return ret;
570                 mutex_lock(&dev->struct_mutex);
571         } while (1);
572
573         if (!node) {
574                 DRM_ERROR("Out of videoram / aperture space\n");
575                 mutex_unlock(&dev->struct_mutex);
576                 return -ENOMEM;
577         }
578
579         node = drm_mm_get_block(node, size, bo->page_alignment);
580         mutex_unlock(&dev->struct_mutex);
581         BUG_ON(!node);
582         node->private = (void *)bo;
583
584         bo->mm_node = node;
585         bo->offset = node->start * PAGE_SIZE;
586         return 0;
587 }
588
589 static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
590 {
591         drm_device_t *dev = bo->dev;
592         drm_ttm_backend_t *be;
593         int ret;
594
595         if (!(bo->mm_node && (bo->flags & DRM_BO_FLAG_NO_MOVE))) {
596                 BUG_ON(bo->mm_node);
597                 ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait);
598                 if (ret)
599                         return ret;
600         }
601
602         DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start);
603
604         mutex_lock(&dev->struct_mutex);
605         ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED,
606                            bo->mm_node->start);
607         if (ret) {
608                 drm_mm_put_block(bo->mm_node);
609                 bo->mm_node = NULL;
610         }
611         mutex_unlock(&dev->struct_mutex);
612
613         if (ret) {
614                 return ret;
615         }
616
617         be = bo->ttm->be;
618         if (be->needs_ub_cache_adjust(be))
619                 bo->flags &= ~DRM_BO_FLAG_CACHED;
620         bo->flags &= ~DRM_BO_MASK_MEM;
621         bo->flags |= DRM_BO_FLAG_MEM_TT;
622
623         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
624                 ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags);
625                 if (ret)
626                         DRM_ERROR("Could not flush read caches\n");
627         }
628         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED);
629
630         return 0;
631 }
632
633 static int drm_bo_new_flags(drm_device_t * dev,
634                             uint32_t flags, uint32_t new_mask, uint32_t hint,
635                             int init, uint32_t * n_flags, uint32_t * n_mask)
636 {
637         uint32_t new_flags = 0;
638         uint32_t new_props;
639         drm_bo_driver_t *driver = dev->driver->bo_driver;
640         drm_buffer_manager_t *bm = &dev->bm;
641         unsigned i;
642
643         /*
644          * First adjust the mask to take away nonexistant memory types. 
645          */
646
647         for (i = 0; i < DRM_BO_MEM_TYPES; ++i) {
648                 if (!bm->use_type[i])
649                         new_mask &= ~drm_bo_type_flags(i);
650         }
651
652         if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
653                 DRM_ERROR
654                     ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
655                      "processes\n");
656                 return -EPERM;
657         }
658         if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
659                 if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
660                      !driver->cached[DRM_BO_MEM_TT]) &&
661                     ((new_mask & DRM_BO_FLAG_MEM_VRAM)
662                      && !driver->cached[DRM_BO_MEM_VRAM])) {
663                         new_mask &= ~DRM_BO_FLAG_BIND_CACHED;
664                 } else {
665                         if (!driver->cached[DRM_BO_MEM_TT])
666                                 new_flags &= DRM_BO_FLAG_MEM_TT;
667                         if (!driver->cached[DRM_BO_MEM_VRAM])
668                                 new_flags &= DRM_BO_FLAG_MEM_VRAM;
669                 }
670         }
671
672         if ((new_mask & DRM_BO_FLAG_READ_CACHED) &&
673             !(new_mask & DRM_BO_FLAG_BIND_CACHED)) {
674                 if ((new_mask & DRM_BO_FLAG_NO_EVICT) &&
675                     !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) {
676                         DRM_ERROR
677                             ("Cannot read cached from a pinned VRAM / TT buffer\n");
678                         return -EINVAL;
679                 }
680         }
681
682         /*
683          * Determine new memory location:
684          */
685
686         if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) {
687
688                 new_flags = new_mask & DRM_BO_MASK_MEM;
689
690                 if (!new_flags) {
691                         DRM_ERROR("Invalid buffer object memory flags\n");
692                         return -EINVAL;
693                 }
694
695                 if (new_flags & DRM_BO_FLAG_MEM_LOCAL) {
696                         if ((hint & DRM_BO_HINT_AVOID_LOCAL) &&
697                             new_flags & (DRM_BO_FLAG_MEM_VRAM |
698                                          DRM_BO_FLAG_MEM_TT)) {
699                                 new_flags &= ~DRM_BO_FLAG_MEM_LOCAL;
700                         } else {
701                                 new_flags = DRM_BO_FLAG_MEM_LOCAL;
702                         }
703                 }
704                 if (new_flags & DRM_BO_FLAG_MEM_TT) {
705                         if ((new_mask & DRM_BO_FLAG_PREFER_VRAM) &&
706                             new_flags & DRM_BO_FLAG_MEM_VRAM) {
707                                 new_flags = DRM_BO_FLAG_MEM_VRAM;
708                         } else {
709                                 new_flags = DRM_BO_FLAG_MEM_TT;
710                         }
711                 }
712         } else {
713                 new_flags = flags & DRM_BO_MASK_MEM;
714         }
715
716         new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
717                                 DRM_BO_FLAG_READ);
718
719         if (!new_props) {
720                 DRM_ERROR("Invalid buffer object rwx properties\n");
721                 return -EINVAL;
722         }
723
724         new_flags |= new_mask & ~DRM_BO_MASK_MEM;
725
726         if (((flags ^ new_flags) & DRM_BO_FLAG_BIND_CACHED) &&
727             (new_flags & DRM_BO_FLAG_NO_EVICT) &&
728             (flags & (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM))) {
729                 if (!(flags & DRM_BO_FLAG_CACHED)) {
730                         DRM_ERROR
731                             ("Cannot change caching policy of pinned buffer\n");
732                         return -EINVAL;
733                 } else {
734                         new_flags &= ~DRM_BO_FLAG_CACHED;
735                 }
736         }
737
738         *n_flags = new_flags;
739         *n_mask = new_mask;
740         return 0;
741 }
742
743 /*
744  * Call dev->struct_mutex locked.
745  */
746
747 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
748                                               uint32_t handle, int check_owner)
749 {
750         drm_user_object_t *uo;
751         drm_buffer_object_t *bo;
752
753         uo = drm_lookup_user_object(priv, handle);
754
755         if (!uo || (uo->type != drm_buffer_type)) {
756                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
757                 return NULL;
758         }
759
760         if (check_owner && priv != uo->owner) {
761                 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
762                         return NULL;
763         }
764
765         bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
766         atomic_inc(&bo->usage);
767         return bo;
768 }
769
770 /*
771  * Call bo->mutex locked.
772  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
773  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
774  */
775
776 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
777 {
778         drm_fence_object_t *fence = bo->fence;
779
780         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
781         if (fence) {
782                 drm_device_t *dev = bo->dev;
783                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
784                         drm_fence_usage_deref_unlocked(dev, fence);
785                         bo->fence = NULL;
786                         return 0;
787                 }
788                 return 1;
789         }
790         return 0;
791 }
792
793 /*
794  * Call bo->mutex locked.
795  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
796  */
797
798 static int drm_bo_busy(drm_buffer_object_t * bo)
799 {
800         drm_fence_object_t *fence = bo->fence;
801
802         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
803         if (fence) {
804                 drm_device_t *dev = bo->dev;
805                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
806                         drm_fence_usage_deref_unlocked(dev, fence);
807                         bo->fence = NULL;
808                         return 0;
809                 }
810                 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
811                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
812                         drm_fence_usage_deref_unlocked(dev, fence);
813                         bo->fence = NULL;
814                         return 0;
815                 }
816                 return 1;
817         }
818         return 0;
819 }
820
821 static int drm_bo_read_cached(drm_buffer_object_t * bo)
822 {
823         int ret = 0;
824
825         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
826         if (bo->mm_node)
827                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
828         return ret;
829 }
830
831 /*
832  * Wait until a buffer is unmapped.
833  */
834
835 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
836 {
837         int ret = 0;
838
839         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
840                 return -EBUSY;
841
842         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
843                     atomic_read(&bo->mapped) == -1);
844
845         if (ret == -EINTR)
846                 ret = -EAGAIN;
847
848         return ret;
849 }
850
851 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
852 {
853         int ret;
854
855         mutex_lock(&bo->mutex);
856         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
857         mutex_unlock(&bo->mutex);
858         return ret;
859 }
860
861 /*
862  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
863  * Until then, we cannot really do anything with it except delete it.
864  * The unfenced list is a PITA, and the operations
865  * 1) validating
866  * 2) submitting commands
867  * 3) fencing
868  * Should really be an atomic operation. 
869  * We now "solve" this problem by keeping
870  * the buffer "unfenced" after validating, but before fencing.
871  */
872
873 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
874                                 int eagain_if_wait)
875 {
876         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
877         unsigned long _end = jiffies + 3 * DRM_HZ;
878
879         if (ret && no_wait)
880                 return -EBUSY;
881         else if (!ret)
882                 return 0;
883
884         do {
885                 mutex_unlock(&bo->mutex);
886                 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
887                             !drm_bo_check_unfenced(bo));
888                 mutex_lock(&bo->mutex);
889                 if (ret == -EINTR)
890                         return -EAGAIN;
891                 if (ret) {
892                         DRM_ERROR
893                             ("Error waiting for buffer to become fenced\n");
894                         return ret;
895                 }
896                 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
897         } while (ret && !time_after_eq(jiffies, _end));
898         if (ret) {
899                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
900                 return ret;
901         }
902         if (eagain_if_wait)
903                 return -EAGAIN;
904
905         return 0;
906 }
907
908 /*
909  * Fill in the ioctl reply argument with buffer info.
910  * Bo locked. 
911  */
912
913 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
914                                 drm_bo_arg_reply_t * rep)
915 {
916         rep->handle = bo->base.hash.key;
917         rep->flags = bo->flags;
918         rep->size = bo->num_pages * PAGE_SIZE;
919         rep->offset = bo->offset;
920
921         if (bo->ttm_object) {
922                 rep->arg_handle = bo->ttm_object->map_list.user_token;
923         } else {
924                 rep->arg_handle = 0;
925         }
926
927         rep->mask = bo->mask;
928         rep->buffer_start = bo->buffer_start;
929         rep->fence_flags = bo->fence_type;
930         rep->rep_flags = 0;
931         rep->page_alignment = bo->page_alignment;
932
933         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
934                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
935                                 DRM_BO_REP_BUSY);
936         }
937 }
938
939 /*
940  * Wait for buffer idle and register that we've mapped the buffer.
941  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1, 
942  * so that if the client dies, the mapping is automatically 
943  * unregistered.
944  */
945
946 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
947                                  uint32_t map_flags, unsigned hint,
948                                  drm_bo_arg_reply_t * rep)
949 {
950         drm_buffer_object_t *bo;
951         drm_device_t *dev = priv->head->dev;
952         int ret = 0;
953         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
954
955         mutex_lock(&dev->struct_mutex);
956         bo = drm_lookup_buffer_object(priv, handle, 1);
957         mutex_unlock(&dev->struct_mutex);
958
959         if (!bo)
960                 return -EINVAL;
961
962         mutex_lock(&bo->mutex);
963         if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
964                 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
965                 if (ret)
966                         goto out;
967         }
968
969         /*
970          * If this returns true, we are currently unmapped.
971          * We need to do this test, because unmapping can
972          * be done without the bo->mutex held.
973          */
974
975         while (1) {
976                 if (atomic_inc_and_test(&bo->mapped)) {
977                         if (no_wait && drm_bo_busy(bo)) {
978                                 atomic_dec(&bo->mapped);
979                                 ret = -EBUSY;
980                                 goto out;
981                         }
982                         ret = drm_bo_wait(bo, 0, 0, no_wait);
983                         if (ret) {
984                                 atomic_dec(&bo->mapped);
985                                 goto out;
986                         }
987
988                         if ((map_flags & DRM_BO_FLAG_READ) &&
989                             (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
990                             (!(bo->flags & DRM_BO_FLAG_CACHED))) {
991                                 drm_bo_read_cached(bo);
992                         }
993                         break;
994                 } else if ((map_flags & DRM_BO_FLAG_READ) &&
995                            (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
996                            (!(bo->flags & DRM_BO_FLAG_CACHED))) {
997
998                         /*
999                          * We are already mapped with different flags.
1000                          * need to wait for unmap.
1001                          */
1002
1003                         ret = drm_bo_wait_unmapped(bo, no_wait);
1004                         if (ret)
1005                                 goto out;
1006
1007                         continue;
1008                 }
1009                 break;
1010         }
1011
1012         mutex_lock(&dev->struct_mutex);
1013         ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1014         mutex_unlock(&dev->struct_mutex);
1015         if (ret) {
1016                 if (atomic_add_negative(-1, &bo->mapped))
1017                         DRM_WAKEUP(&bo->event_queue);
1018
1019         } else
1020                 drm_bo_fill_rep_arg(bo, rep);
1021       out:
1022         mutex_unlock(&bo->mutex);
1023         drm_bo_usage_deref_unlocked(bo);
1024         return ret;
1025 }
1026
1027 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1028 {
1029         drm_device_t *dev = priv->head->dev;
1030         drm_buffer_object_t *bo;
1031         drm_ref_object_t *ro;
1032         int ret = 0;
1033
1034         mutex_lock(&dev->struct_mutex);
1035
1036         bo = drm_lookup_buffer_object(priv, handle, 1);
1037         if (!bo) {
1038                 ret = -EINVAL;
1039                 goto out;
1040         }
1041
1042         ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1043         if (!ro) {
1044                 ret = -EINVAL;
1045                 goto out;
1046         }
1047
1048         drm_remove_ref_object(priv, ro);
1049         drm_bo_usage_deref_locked(bo);
1050       out:
1051         mutex_unlock(&dev->struct_mutex);
1052         return ret;
1053 }
1054
1055 /*
1056  * Call struct-sem locked.
1057  */
1058
1059 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1060                                          drm_user_object_t * uo,
1061                                          drm_ref_t action)
1062 {
1063         drm_buffer_object_t *bo =
1064             drm_user_object_entry(uo, drm_buffer_object_t, base);
1065
1066         /*
1067          * We DON'T want to take the bo->lock here, because we want to
1068          * hold it when we wait for unmapped buffer.
1069          */
1070
1071         BUG_ON(action != _DRM_REF_TYPE1);
1072
1073         if (atomic_add_negative(-1, &bo->mapped))
1074                 DRM_WAKEUP(&bo->event_queue);
1075 }
1076
1077 /*
1078  * bo->mutex locked. 
1079  */
1080
1081 static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags,
1082                               int no_wait, int force_no_move)
1083 {
1084         int ret = 0;
1085
1086         /*
1087          * Flush outstanding fences.
1088          */
1089         drm_bo_busy(bo);
1090
1091         /*
1092          * Make sure we're not mapped.
1093          */
1094
1095         ret = drm_bo_wait_unmapped(bo, no_wait);
1096         if (ret)
1097                 return ret;
1098
1099         /*
1100          * Wait for outstanding fences.
1101          */
1102
1103         ret = drm_bo_wait(bo, 0, 0, no_wait);
1104
1105         if (ret == -EINTR)
1106                 return -EAGAIN;
1107         if (ret)
1108                 return ret;
1109
1110         if (new_flags & DRM_BO_FLAG_MEM_TT) {
1111                 ret = drm_move_local_to_tt(bo, no_wait);
1112                 if (ret)
1113                         return ret;
1114         } else {
1115                 drm_move_tt_to_local(bo, 0, force_no_move);
1116         }
1117
1118         return 0;
1119 }
1120
1121 /*
1122  * bo locked.
1123  */
1124
1125 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1126                                       uint32_t new_flags,
1127                                       int move_unfenced, int no_wait)
1128 {
1129         drm_device_t *dev = bo->dev;
1130         drm_buffer_manager_t *bm = &dev->bm;
1131         uint32_t flag_diff = (new_flags ^ bo->flags);
1132         drm_bo_driver_t *driver = dev->driver->bo_driver;
1133
1134         int ret;
1135
1136         if (new_flags & DRM_BO_FLAG_MEM_VRAM) {
1137                 DRM_ERROR("Vram support not implemented yet\n");
1138                 return -EINVAL;
1139         }
1140
1141         DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->flags);
1142         ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type);
1143         if (ret) {
1144                 DRM_ERROR("Driver did not support given buffer permissions\n");
1145                 return ret;
1146         }
1147
1148         /*
1149          * Move out if we need to change caching policy.
1150          */
1151
1152         if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
1153             !(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) {
1154                 if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1155                         DRM_ERROR("Cannot change caching policy of "
1156                                   "pinned buffer.\n");
1157                         return -EINVAL;
1158                 }
1159                 ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0);
1160                 if (ret) {
1161                         if (ret != -EAGAIN)
1162                                 DRM_ERROR("Failed moving buffer.\n");
1163                         return ret;
1164                 }
1165         }
1166         DRM_MASK_VAL(bo->flags, DRM_BO_FLAG_BIND_CACHED, new_flags);
1167         flag_diff = (new_flags ^ bo->flags);
1168
1169         /*
1170          * Check whether we dropped no_move policy, and in that case,
1171          * release reserved manager regions.
1172          */
1173
1174         if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
1175             !(new_flags & DRM_BO_FLAG_NO_MOVE)) {
1176                 mutex_lock(&dev->struct_mutex);
1177                 if (bo->mm_node) {
1178                         drm_mm_put_block(bo->mm_node);
1179                         bo->mm_node = NULL;
1180                 }
1181                 mutex_unlock(&dev->struct_mutex);
1182         }
1183
1184         /*
1185          * Check whether we need to move buffer.
1186          */
1187
1188         if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) {
1189                 ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1);
1190                 if (ret) {
1191                         if (ret != -EAGAIN)
1192                                 DRM_ERROR("Failed moving buffer.\n");
1193                         return ret;
1194                 }
1195         }
1196
1197         if (move_unfenced) {
1198
1199                 /*
1200                  * Place on unfenced list.
1201                  */
1202
1203                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1204                                 _DRM_BO_FLAG_UNFENCED);
1205                 mutex_lock(&dev->struct_mutex);
1206                 list_del(&bo->lru);
1207                 list_add_tail(&bo->lru, &bm->unfenced);
1208                 mutex_unlock(&dev->struct_mutex);
1209         } else {
1210
1211                 mutex_lock(&dev->struct_mutex);
1212                 list_del_init(&bo->lru);
1213                 drm_bo_add_to_lru(bo, bm);
1214                 mutex_unlock(&dev->struct_mutex);
1215         }
1216
1217         bo->flags = new_flags;
1218         return 0;
1219 }
1220
1221 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1222                                   uint32_t flags, uint32_t mask, uint32_t hint,
1223                                   drm_bo_arg_reply_t * rep)
1224 {
1225         drm_buffer_object_t *bo;
1226         drm_device_t *dev = priv->head->dev;
1227         int ret;
1228         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1229         uint32_t new_flags;
1230
1231         bo = drm_lookup_buffer_object(priv, handle, 1);
1232         if (!bo) {
1233                 return -EINVAL;
1234         }
1235
1236         mutex_lock(&bo->mutex);
1237         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1238
1239         if (ret)
1240                 goto out;
1241
1242         ret = drm_bo_new_flags(dev, bo->flags,
1243                                (flags & mask) | (bo->mask & ~mask), hint,
1244                                0, &new_flags, &bo->mask);
1245
1246         if (ret)
1247                 goto out;
1248
1249         ret =
1250             drm_buffer_object_validate(bo, new_flags,
1251                                        !(hint & DRM_BO_HINT_DONT_FENCE),
1252                                        no_wait);
1253         drm_bo_fill_rep_arg(bo, rep);
1254
1255       out:
1256
1257         mutex_unlock(&bo->mutex);
1258         drm_bo_usage_deref_unlocked(bo);
1259         return ret;
1260 }
1261
1262 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1263                               drm_bo_arg_reply_t * rep)
1264 {
1265         drm_buffer_object_t *bo;
1266
1267         bo = drm_lookup_buffer_object(priv, handle, 1);
1268         if (!bo) {
1269                 return -EINVAL;
1270         }
1271         mutex_lock(&bo->mutex);
1272         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1273                 (void)drm_bo_busy(bo);
1274         drm_bo_fill_rep_arg(bo, rep);
1275         mutex_unlock(&bo->mutex);
1276         drm_bo_usage_deref_unlocked(bo);
1277         return 0;
1278 }
1279
1280 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1281                               uint32_t hint, drm_bo_arg_reply_t * rep)
1282 {
1283         drm_buffer_object_t *bo;
1284         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1285         int ret;
1286
1287         bo = drm_lookup_buffer_object(priv, handle, 1);
1288         if (!bo) {
1289                 return -EINVAL;
1290         }
1291
1292         mutex_lock(&bo->mutex);
1293         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1294         if (ret)
1295                 goto out;
1296         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1297         if (ret)
1298                 goto out;
1299
1300         drm_bo_fill_rep_arg(bo, rep);
1301
1302       out:
1303         mutex_unlock(&bo->mutex);
1304         drm_bo_usage_deref_unlocked(bo);
1305         return ret;
1306 }
1307
1308 /*
1309  * Call bo->mutex locked.
1310  */
1311
1312 static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo)
1313 {
1314         drm_device_t *dev = bo->dev;
1315         drm_ttm_object_t *to = NULL;
1316         int ret = 0;
1317         uint32_t ttm_flags = 0;
1318
1319         bo->ttm_object = NULL;
1320         bo->ttm = NULL;
1321
1322         switch (bo->type) {
1323         case drm_bo_type_dc:
1324                 mutex_lock(&dev->struct_mutex);
1325                 ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE,
1326                                             ttm_flags, &to);
1327                 mutex_unlock(&dev->struct_mutex);
1328                 break;
1329         case drm_bo_type_user:
1330         case drm_bo_type_fake:
1331                 break;
1332         default:
1333                 DRM_ERROR("Illegal buffer object type\n");
1334                 ret = -EINVAL;
1335                 break;
1336         }
1337
1338         if (ret) {
1339                 return ret;
1340         }
1341
1342         if (to) {
1343                 bo->ttm_object = to;
1344                 bo->ttm = drm_ttm_from_object(to);
1345         }
1346         return ret;
1347 }
1348
1349 /*
1350  * Transfer a buffer object's memory and LRU status to a newly
1351  * created object. User-space references remains with the old
1352  * object. Call bo->mutex locked.
1353  */
1354
1355 int drm_buffer_object_transfer(drm_buffer_object_t *bo,
1356                                drm_buffer_object_t **new_obj)
1357 {
1358         drm_buffer_object_t *fbo;
1359         drm_device_t *dev = bo->dev;
1360         drm_buffer_manager_t *bm = &dev->bm;
1361
1362         fbo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1363         if (!fbo)
1364                 return -ENOMEM;
1365         
1366         *fbo = *bo;
1367         mutex_init(&fbo->mutex);
1368         mutex_lock(&fbo->mutex);
1369         mutex_lock(&dev->struct_mutex);
1370
1371         INIT_LIST_HEAD(&fbo->ddestroy);
1372         INIT_LIST_HEAD(&fbo->lru);
1373         list_splice_init(&bo->lru, &fbo->lru);
1374
1375         bo->mm_node = NULL;
1376         bo->ttm = NULL;
1377         bo->ttm_object = NULL;
1378         bo->fence = NULL;
1379         bo->flags = 0;
1380
1381         fbo->mm_node->private = (void *)fbo;
1382         atomic_set(&fbo->usage, 1);
1383         atomic_inc(&bm->count);
1384         mutex_unlock(&dev->struct_mutex);
1385         mutex_unlock(&fbo->mutex);
1386
1387         *new_obj = fbo;
1388         return 0;
1389 }
1390                 
1391
1392 int drm_buffer_object_create(drm_file_t * priv,
1393                              unsigned long size,
1394                              drm_bo_type_t type,
1395                              uint32_t mask,
1396                              uint32_t hint,
1397                              uint32_t page_alignment,
1398                              unsigned long buffer_start,
1399                              drm_buffer_object_t ** buf_obj)
1400 {
1401         drm_device_t *dev = priv->head->dev;
1402         drm_buffer_manager_t *bm = &dev->bm;
1403         drm_buffer_object_t *bo;
1404         int ret = 0;
1405         uint32_t new_flags;
1406         unsigned long num_pages;
1407
1408         if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1409                 DRM_ERROR("Invalid buffer object start.\n");
1410                 return -EINVAL;
1411         }
1412         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1413         if (num_pages == 0) {
1414                 DRM_ERROR("Illegal buffer object size.\n");
1415                 return -EINVAL;
1416         }
1417
1418         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1419
1420         if (!bo)
1421                 return -ENOMEM;
1422
1423         mutex_init(&bo->mutex);
1424         mutex_lock(&bo->mutex);
1425
1426         atomic_set(&bo->usage, 1);
1427         atomic_set(&bo->mapped, -1);
1428         DRM_INIT_WAITQUEUE(&bo->event_queue);
1429         INIT_LIST_HEAD(&bo->lru);
1430         INIT_LIST_HEAD(&bo->ddestroy);
1431         bo->dev = dev;
1432         bo->type = type;
1433         bo->num_pages = num_pages;
1434         bo->mm_node = NULL;
1435         bo->page_alignment = page_alignment;
1436         if (bo->type == drm_bo_type_fake) {
1437                 bo->offset = buffer_start;
1438                 bo->buffer_start = 0;
1439         } else {
1440                 bo->buffer_start = buffer_start;
1441         }
1442         bo->priv_flags = 0;
1443         bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
1444         atomic_inc(&bm->count);
1445         ret = drm_bo_new_flags(dev, bo->flags, mask, hint,
1446                                1, &new_flags, &bo->mask);
1447         if (ret)
1448                 goto out_err;
1449         ret = drm_bo_add_ttm(priv, bo);
1450         if (ret)
1451                 goto out_err;
1452
1453         ret = drm_buffer_object_validate(bo, new_flags, 0,
1454                                          hint & DRM_BO_HINT_DONT_BLOCK);
1455         if (ret)
1456                 goto out_err;
1457
1458         mutex_unlock(&bo->mutex);
1459         *buf_obj = bo;
1460         return 0;
1461
1462       out_err:
1463         mutex_unlock(&bo->mutex);
1464         drm_bo_usage_deref_unlocked(bo);
1465         return ret;
1466 }
1467
1468 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1469                                   int shareable)
1470 {
1471         drm_device_t *dev = priv->head->dev;
1472         int ret;
1473
1474         mutex_lock(&dev->struct_mutex);
1475         ret = drm_add_user_object(priv, &bo->base, shareable);
1476         if (ret)
1477                 goto out;
1478
1479         bo->base.remove = drm_bo_base_deref_locked;
1480         bo->base.type = drm_buffer_type;
1481         bo->base.ref_struct_locked = NULL;
1482         bo->base.unref = drm_buffer_user_object_unmap;
1483
1484       out:
1485         mutex_unlock(&dev->struct_mutex);
1486         return ret;
1487 }
1488
1489 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1490 {
1491         LOCK_TEST_WITH_RETURN(dev, filp);
1492         return 0;
1493 }
1494
1495 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1496 {
1497         DRM_DEVICE;
1498         drm_bo_arg_t arg;
1499         drm_bo_arg_request_t *req = &arg.d.req;
1500         drm_bo_arg_reply_t rep;
1501         unsigned long next;
1502         drm_user_object_t *uo;
1503         drm_buffer_object_t *entry;
1504
1505         if (!dev->bm.initialized) {
1506                 DRM_ERROR("Buffer object manager is not initialized.\n");
1507                 return -EINVAL;
1508         }
1509
1510         do {
1511                 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1512
1513                 if (arg.handled) {
1514                         data = arg.next;
1515                         continue;
1516                 }
1517
1518                 rep.ret = 0;
1519                 switch (req->op) {
1520                 case drm_bo_create:
1521                         rep.ret =
1522                             drm_buffer_object_create(priv, req->size,
1523                                                      req->type,
1524                                                      req->mask,
1525                                                      req->hint,
1526                                                      req->page_alignment,
1527                                                      req->buffer_start, &entry);
1528                         if (rep.ret)
1529                                 break;
1530
1531                         rep.ret =
1532                             drm_bo_add_user_object(priv, entry,
1533                                                    req->
1534                                                    mask &
1535                                                    DRM_BO_FLAG_SHAREABLE);
1536                         if (rep.ret)
1537                                 drm_bo_usage_deref_unlocked(entry);
1538
1539                         if (rep.ret)
1540                                 break;
1541
1542                         mutex_lock(&entry->mutex);
1543                         drm_bo_fill_rep_arg(entry, &rep);
1544                         mutex_unlock(&entry->mutex);
1545                         break;
1546                 case drm_bo_unmap:
1547                         rep.ret = drm_buffer_object_unmap(priv, req->handle);
1548                         break;
1549                 case drm_bo_map:
1550                         rep.ret = drm_buffer_object_map(priv, req->handle,
1551                                                         req->mask,
1552                                                         req->hint, &rep);
1553                         break;
1554                 case drm_bo_destroy:
1555                         mutex_lock(&dev->struct_mutex);
1556                         uo = drm_lookup_user_object(priv, req->handle);
1557                         if (!uo || (uo->type != drm_buffer_type)
1558                             || uo->owner != priv) {
1559                                 mutex_unlock(&dev->struct_mutex);
1560                                 rep.ret = -EINVAL;
1561                                 break;
1562                         }
1563                         rep.ret = drm_remove_user_object(priv, uo);
1564                         mutex_unlock(&dev->struct_mutex);
1565                         break;
1566                 case drm_bo_reference:
1567                         rep.ret = drm_user_object_ref(priv, req->handle,
1568                                                       drm_buffer_type, &uo);
1569                         if (rep.ret)
1570                                 break;
1571                         mutex_lock(&dev->struct_mutex);
1572                         uo = drm_lookup_user_object(priv, req->handle);
1573                         entry =
1574                             drm_user_object_entry(uo, drm_buffer_object_t,
1575                                                   base);
1576                         atomic_dec(&entry->usage);
1577                         mutex_unlock(&dev->struct_mutex);
1578                         mutex_lock(&entry->mutex);
1579                         drm_bo_fill_rep_arg(entry, &rep);
1580                         mutex_unlock(&entry->mutex);
1581                         break;
1582                 case drm_bo_unreference:
1583                         rep.ret = drm_user_object_unref(priv, req->handle,
1584                                                         drm_buffer_type);
1585                         break;
1586                 case drm_bo_validate:
1587                         rep.ret = drm_bo_lock_test(dev, filp);
1588
1589                         if (rep.ret)
1590                                 break;
1591                         rep.ret =
1592                             drm_bo_handle_validate(priv, req->handle, req->mask,
1593                                                    req->arg_handle, req->hint,
1594                                                    &rep);
1595                         break;
1596                 case drm_bo_fence:
1597                         rep.ret = drm_bo_lock_test(dev, filp);
1598                         if (rep.ret)
1599                                 break;
1600                          /**/ break;
1601                 case drm_bo_info:
1602                         rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1603                         break;
1604                 case drm_bo_wait_idle:
1605                         rep.ret = drm_bo_handle_wait(priv, req->handle,
1606                                                      req->hint, &rep);
1607                         break;
1608                 case drm_bo_ref_fence:
1609                         rep.ret = -EINVAL;
1610                         DRM_ERROR("Function is not implemented yet.\n");
1611                 default:
1612                         rep.ret = -EINVAL;
1613                 }
1614                 next = arg.next;
1615
1616                 /*
1617                  * A signal interrupted us. Make sure the ioctl is restartable.
1618                  */
1619
1620                 if (rep.ret == -EAGAIN)
1621                         return -EAGAIN;
1622
1623                 arg.handled = 1;
1624                 arg.d.rep = rep;
1625                 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1626                 data = next;
1627         } while (data);
1628         return 0;
1629 }
1630
1631 /*
1632  * dev->struct_sem locked.
1633  */
1634
1635 static int drm_bo_force_list_clean(drm_device_t * dev,
1636                                    struct list_head *head,
1637                                    unsigned mem_type,
1638                                    int force_no_move, int allow_errors)
1639 {
1640         drm_buffer_manager_t *bm = &dev->bm;
1641         struct list_head *list, *next, *prev;
1642         drm_buffer_object_t *entry;
1643         int ret;
1644         int clean;
1645
1646       retry:
1647         clean = 1;
1648         list_for_each_safe(list, next, head) {
1649                 prev = list->prev;
1650                 entry = list_entry(list, drm_buffer_object_t, lru);
1651                 atomic_inc(&entry->usage);
1652                 mutex_unlock(&dev->struct_mutex);
1653                 mutex_lock(&entry->mutex);
1654                 mutex_lock(&dev->struct_mutex);
1655
1656                 if (prev != list->prev || next != list->next) {
1657                         mutex_unlock(&entry->mutex);
1658                         drm_bo_usage_deref_locked(entry);
1659                         goto retry;
1660                 }
1661                 if (entry->mm_node) {
1662                         clean = 0;
1663
1664                         /*
1665                          * Expire the fence.
1666                          */
1667
1668                         mutex_unlock(&dev->struct_mutex);
1669                         if (entry->fence && bm->nice_mode) {
1670                                 unsigned long _end = jiffies + 3 * DRM_HZ;
1671                                 do {
1672                                         ret = drm_bo_wait(entry, 0, 1, 0);
1673                                         if (ret && allow_errors) {
1674                                                 if (ret == -EINTR)
1675                                                         ret = -EAGAIN;
1676                                                 goto out_err;
1677                                         }
1678                                 } while (ret && !time_after_eq(jiffies, _end));
1679
1680                                 if (entry->fence) {
1681                                         bm->nice_mode = 0;
1682                                         DRM_ERROR("Detected GPU hang or "
1683                                                   "fence manager was taken down. "
1684                                                   "Evicting waiting buffers\n");
1685                                 }
1686                         }
1687                         if (entry->fence) {
1688                                 drm_fence_usage_deref_unlocked(dev,
1689                                                                entry->fence);
1690                                 entry->fence = NULL;
1691                         }
1692
1693                         DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED,
1694                                      0);
1695
1696                         if (force_no_move) {
1697                                 DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE,
1698                                              0);
1699                         }
1700                         if (entry->flags & DRM_BO_FLAG_NO_EVICT) {
1701                                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1702                                           "cleanup. Removing flag and evicting.\n");
1703                                 entry->flags &= ~DRM_BO_FLAG_NO_EVICT;
1704                                 entry->mask &= ~DRM_BO_FLAG_NO_EVICT;
1705                         }
1706
1707                         ret = drm_bo_evict(entry, mem_type, 1, force_no_move);
1708                         if (ret) {
1709                                 if (allow_errors) {
1710                                         goto out_err;
1711                                 } else {
1712                                         DRM_ERROR("Aargh. Eviction failed.\n");
1713                                 }
1714                         }
1715                         mutex_lock(&dev->struct_mutex);
1716                 }
1717                 mutex_unlock(&entry->mutex);
1718                 drm_bo_usage_deref_locked(entry);
1719                 if (prev != list->prev || next != list->next) {
1720                         goto retry;
1721                 }
1722         }
1723         if (!clean)
1724                 goto retry;
1725         return 0;
1726       out_err:
1727         mutex_unlock(&entry->mutex);
1728         drm_bo_usage_deref_unlocked(entry);
1729         mutex_lock(&dev->struct_mutex);
1730         return ret;
1731 }
1732
1733 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1734 {
1735         drm_buffer_manager_t *bm = &dev->bm;
1736         int ret = -EINVAL;
1737
1738         if (mem_type >= DRM_BO_MEM_TYPES) {
1739                 DRM_ERROR("Illegal memory type %d\n", mem_type);
1740                 return ret;
1741         }
1742
1743         if (!bm->has_type[mem_type]) {
1744                 DRM_ERROR("Trying to take down uninitialized "
1745                           "memory manager type\n");
1746                 return ret;
1747         }
1748         bm->use_type[mem_type] = 0;
1749         bm->has_type[mem_type] = 0;
1750
1751         ret = 0;
1752         if (mem_type > 0) {
1753
1754                 /*
1755                  * Throw out unfenced buffers.
1756                  */
1757
1758                 drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0);
1759
1760                 /*
1761                  * Throw out evicted no-move buffers.
1762                  */
1763
1764                 drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL],
1765                                         mem_type, 1, 0);
1766                 drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1,
1767                                         0);
1768                 drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1,
1769                                         0);
1770
1771                 if (drm_mm_clean(&bm->manager[mem_type])) {
1772                         drm_mm_takedown(&bm->manager[mem_type]);
1773                 } else {
1774                         ret = -EBUSY;
1775                 }
1776         }
1777
1778         return ret;
1779 }
1780
1781 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1782 {
1783         int ret;
1784         drm_buffer_manager_t *bm = &dev->bm;
1785
1786         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1787                 DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
1788                 return -EINVAL;
1789         }
1790
1791         ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
1792         if (ret)
1793                 return ret;
1794         ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1);
1795         if (ret)
1796                 return ret;
1797         ret =
1798             drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1);
1799         return ret;
1800 }
1801
1802 static int drm_bo_init_mm(drm_device_t * dev,
1803                           unsigned type,
1804                           unsigned long p_offset, unsigned long p_size)
1805 {
1806         drm_buffer_manager_t *bm = &dev->bm;
1807         int ret = -EINVAL;
1808
1809         if (type >= DRM_BO_MEM_TYPES) {
1810                 DRM_ERROR("Illegal memory type %d\n", type);
1811                 return ret;
1812         }
1813         if (bm->has_type[type]) {
1814                 DRM_ERROR("Memory manager already initialized for type %d\n",
1815                           type);
1816                 return ret;
1817         }
1818
1819         ret = 0;
1820         if (type != DRM_BO_MEM_LOCAL) {
1821                 if (!p_size) {
1822                         DRM_ERROR("Zero size memory manager type %d\n", type);
1823                         return ret;
1824                 }
1825                 ret = drm_mm_init(&bm->manager[type], p_offset, p_size);
1826                 if (ret)
1827                         return ret;
1828         }
1829         bm->has_type[type] = 1;
1830         bm->use_type[type] = 1;
1831
1832         INIT_LIST_HEAD(&bm->lru[type]);
1833         INIT_LIST_HEAD(&bm->pinned[type]);
1834
1835         return 0;
1836 }
1837
1838 /*
1839  * This is called from lastclose, so we don't need to bother about
1840  * any clients still running when we set the initialized flag to zero.
1841  */
1842
1843 int drm_bo_driver_finish(drm_device_t * dev)
1844 {
1845         drm_buffer_manager_t *bm = &dev->bm;
1846         int ret = 0;
1847         unsigned i = DRM_BO_MEM_TYPES;
1848
1849         mutex_lock(&dev->bm.init_mutex);
1850         mutex_lock(&dev->struct_mutex);
1851
1852         if (!bm->initialized)
1853                 goto out;
1854         bm->initialized = 0;
1855
1856         while (i--) {
1857                 if (bm->has_type[i]) {
1858                         bm->use_type[i] = 0;
1859                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
1860                                 ret = -EBUSY;
1861                                 DRM_ERROR("DRM memory manager type %d "
1862                                           "is not clean.\n", i);
1863                         }
1864                         bm->has_type[i] = 0;
1865                 }
1866         }
1867         mutex_unlock(&dev->struct_mutex);
1868         if (!cancel_delayed_work(&bm->wq)) {
1869                 flush_scheduled_work();
1870         }
1871         mutex_lock(&dev->struct_mutex);
1872         drm_bo_delayed_delete(dev, 1);
1873         if (list_empty(&bm->ddestroy)) {
1874                 DRM_DEBUG("Delayed destroy list was clean\n");
1875         }
1876         if (list_empty(&bm->lru[0])) {
1877                 DRM_DEBUG("Swap list was clean\n");
1878         }
1879         if (list_empty(&bm->pinned[0])) {
1880                 DRM_DEBUG("NO_MOVE list was clean\n");
1881         }
1882         if (list_empty(&bm->unfenced)) {
1883                 DRM_DEBUG("Unfenced list was clean\n");
1884         }
1885       out:
1886         mutex_unlock(&dev->struct_mutex);
1887         mutex_unlock(&dev->bm.init_mutex);
1888         return ret;
1889 }
1890
1891 int drm_bo_driver_init(drm_device_t * dev)
1892 {
1893         drm_bo_driver_t *driver = dev->driver->bo_driver;
1894         drm_buffer_manager_t *bm = &dev->bm;
1895         int ret = -EINVAL;
1896
1897         mutex_lock(&dev->bm.init_mutex);
1898         mutex_lock(&dev->struct_mutex);
1899         if (!driver)
1900                 goto out_unlock;
1901
1902         /*
1903          * Initialize the system memory buffer type.
1904          * Other types need to be driver / IOCTL initialized.
1905          */
1906
1907         ret = drm_bo_init_mm(dev, 0, 0, 0);
1908         if (ret)
1909                 goto out_unlock;
1910
1911 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1912         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
1913 #else
1914         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
1915 #endif
1916         bm->initialized = 1;
1917         bm->nice_mode = 1;
1918         atomic_set(&bm->count, 0);
1919         bm->cur_pages = 0;
1920         INIT_LIST_HEAD(&bm->unfenced);
1921         INIT_LIST_HEAD(&bm->ddestroy);
1922       out_unlock:
1923         mutex_unlock(&dev->struct_mutex);
1924         mutex_unlock(&dev->bm.init_mutex);
1925         return ret;
1926 }
1927
1928 EXPORT_SYMBOL(drm_bo_driver_init);
1929
1930 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
1931 {
1932         DRM_DEVICE;
1933
1934         int ret = 0;
1935         drm_mm_init_arg_t arg;
1936         drm_buffer_manager_t *bm = &dev->bm;
1937         drm_bo_driver_t *driver = dev->driver->bo_driver;
1938
1939         if (!driver) {
1940                 DRM_ERROR("Buffer objects are not supported by this driver\n");
1941                 return -EINVAL;
1942         }
1943
1944         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1945
1946         switch (arg.req.op) {
1947         case mm_init:
1948                 ret = -EINVAL;
1949                 mutex_lock(&dev->bm.init_mutex);
1950                 mutex_lock(&dev->struct_mutex);
1951                 if (!bm->initialized) {
1952                         DRM_ERROR("DRM memory manager was not initialized.\n");
1953                         break;
1954                 }
1955                 if (arg.req.mem_type == 0) {
1956                         DRM_ERROR
1957                             ("System memory buffers already initialized.\n");
1958                         break;
1959                 }
1960                 ret = drm_bo_init_mm(dev, arg.req.mem_type,
1961                                      arg.req.p_offset, arg.req.p_size);
1962                 break;
1963         case mm_takedown:
1964                 LOCK_TEST_WITH_RETURN(dev, filp);
1965                 mutex_lock(&dev->bm.init_mutex);
1966                 mutex_lock(&dev->struct_mutex);
1967                 ret = -EINVAL;
1968                 if (!bm->initialized) {
1969                         DRM_ERROR("DRM memory manager was not initialized\n");
1970                         break;
1971                 }
1972                 if (arg.req.mem_type == 0) {
1973                         DRM_ERROR("No takedown for System memory buffers.\n");
1974                         break;
1975                 }
1976                 ret = 0;
1977                 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
1978                         DRM_ERROR("Memory manager type %d not clean. "
1979                                   "Delaying takedown\n", arg.req.mem_type);
1980                 }
1981                 break;
1982         case mm_lock:
1983                 LOCK_TEST_WITH_RETURN(dev, filp);
1984                 mutex_lock(&dev->bm.init_mutex);
1985                 mutex_lock(&dev->struct_mutex);
1986                 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
1987                 break;
1988         case mm_unlock:
1989                 LOCK_TEST_WITH_RETURN(dev, filp);
1990                 mutex_lock(&dev->bm.init_mutex);
1991                 mutex_lock(&dev->struct_mutex);
1992                 ret = 0;
1993                 break;
1994         default:
1995                 DRM_ERROR("Function not implemented yet\n");
1996                 return -EINVAL;
1997         }
1998
1999         mutex_unlock(&dev->struct_mutex);
2000         mutex_unlock(&dev->bm.init_mutex);
2001         if (ret)
2002                 return ret;
2003
2004         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2005         return 0;
2006 }