Implement a drm_mem_reg_t substructure in the buffer object type.
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  * 
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  * 
26  * 
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30  */
31
32 #include "drmP.h"
33
34 /*
35  * Locking may look a bit complicated but isn't really:
36  *
37  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
38  * when there is a chance that it can be zero before or after the operation.
39  * 
40  * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
41  * heads.
42  *
43  * bo->mutex protects the buffer object itself excluding the usage field.
44  * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
45  * both the bo->mutex and the dev->struct_mutex.
46  *
47  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
48  * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
49  * traversal will, in general, need to be restarted.
50  *
51  */
52
53
54
55 static void drm_bo_destroy_locked(drm_buffer_object_t *bo);
56 static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo);
57 static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo);
58 static void drm_bo_unmap_virtual(drm_buffer_object_t *bo);
59 static int drm_bo_mem_space(drm_device_t *dev, drm_bo_mem_reg_t *mem,
60                             int no_wait);
61
62 #define DRM_FLAG_MASKED(_old, _new, _mask) {\
63 (_old) ^= (((_old) ^ (_new)) & (_mask)); \
64 }
65
66 static inline uint32_t drm_bo_type_flags(unsigned type)
67 {
68         return (1 << (24 + type));
69 }
70
71 /*
72  * bo locked. dev->struct_mutex locked.
73  */
74
75 static void drm_bo_add_to_lru(drm_buffer_object_t * bo,
76                               drm_buffer_manager_t * bm)
77 {
78         struct list_head *list;
79         drm_mem_type_manager_t *man;
80
81         bo->mem.mem_type = 0;
82         
83         switch(bo->mem.flags & DRM_BO_MASK_MEM) {
84         case DRM_BO_FLAG_MEM_TT:
85                 bo->mem.mem_type = DRM_BO_MEM_TT;
86                 break;
87         case DRM_BO_FLAG_MEM_VRAM:
88                 bo->mem.mem_type = DRM_BO_MEM_VRAM;
89                 break;
90         case DRM_BO_FLAG_MEM_LOCAL:
91                 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
92                 break;
93         default:
94                 BUG_ON(1);              
95         }
96         
97         man = &bm->man[bo->mem.mem_type];
98         list = (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
99                 &man->pinned : &man->lru;
100         list_add_tail(&bo->lru, list);
101         return;
102 }
103
104 /*
105  * bo locked.
106  */
107
108 static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict,
109                                 int force_no_move)
110 {
111         drm_device_t *dev = bo->dev;
112         int ret = 0;
113
114         if (bo->mem.mm_node) {
115 #ifdef DRM_ODD_MM_COMPAT
116                 mutex_lock(&dev->struct_mutex);
117                 ret = drm_bo_lock_kmm(bo);
118                 if (ret) {
119                         mutex_unlock(&dev->struct_mutex);
120                         if (ret == -EAGAIN)
121                                 schedule();
122                         return ret;
123                 }
124                 drm_bo_unmap_virtual(bo);
125                 drm_bo_finish_unmap(bo);
126                 drm_bo_unlock_kmm(bo);
127 #else
128                 drm_bo_unmap_virtual(bo);
129                 mutex_lock(&dev->struct_mutex);
130 #endif
131                 if (evict)
132                         drm_ttm_evict(bo->ttm);
133                 else
134                         drm_ttm_unbind(bo->ttm);
135
136                 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
137                 if (!(bo->mem.flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) {
138                         drm_mm_put_block(bo->mem.mm_node);
139                         bo->mem.mm_node = NULL;
140                 }
141                 mutex_unlock(&dev->struct_mutex);
142         }
143
144         bo->mem.flags &= ~DRM_BO_FLAG_MEM_TT;
145         bo->mem.flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
146
147         return 0;
148 }
149
150
151 /*
152  * Call bo->mutex locked.
153  * Wait until the buffer is idle.
154  */
155
156 static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
157                        int no_wait)
158 {
159
160         drm_fence_object_t *fence = bo->fence;
161         int ret;
162
163         if (fence) {
164                 drm_device_t *dev = bo->dev;
165                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
166                         drm_fence_usage_deref_unlocked(dev, fence);
167                         bo->fence = NULL;
168                         return 0;
169                 }
170                 if (no_wait) {
171                         return -EBUSY;
172                 }
173                 ret =
174                     drm_fence_object_wait(dev, fence, lazy, ignore_signals,
175                                           bo->fence_type);
176                 if (ret)
177                         return ret;
178
179                 drm_fence_usage_deref_unlocked(dev, fence);
180                 bo->fence = NULL;
181
182         }
183         return 0;
184 }
185
186 /*
187  * Call dev->struct_mutex locked.
188  * Attempts to remove all private references to a buffer by expiring its
189  * fence object and removing from lru lists and memory managers.
190  */
191
192
193 static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all)
194 {
195         drm_device_t *dev = bo->dev;
196         drm_buffer_manager_t *bm = &dev->bm;
197
198         atomic_inc(&bo->usage);
199         mutex_unlock(&dev->struct_mutex);
200         mutex_lock(&bo->mutex);
201
202         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
203
204         if (bo->fence && drm_fence_object_signaled(bo->fence,
205                                                    bo->fence_type)) {
206                 drm_fence_usage_deref_locked(dev, bo->fence);
207                 bo->fence = NULL;
208         }
209
210         if (bo->fence && remove_all) {
211                 if (bm->nice_mode) {
212                         unsigned long _end = jiffies + 3 * DRM_HZ;
213                         int ret;
214                         do {
215                                 ret = drm_bo_wait(bo, 0, 1, 0);
216                         } while (ret && !time_after_eq(jiffies, _end));
217
218                         if (bo->fence) {
219                                 bm->nice_mode = 0;
220                                 DRM_ERROR("Detected GPU lockup or "
221                                           "fence driver was taken down. "
222                                           "Evicting waiting buffers.\n");
223                         }
224                         if (bo->fence) {
225                                 drm_fence_usage_deref_unlocked(dev, bo->fence);
226                                 bo->fence = NULL;
227                         }
228                 }
229         }
230         mutex_lock(&dev->struct_mutex);
231
232         if (!atomic_dec_and_test(&bo->usage)) {
233                 goto out;
234         }
235
236         if (!bo->fence) {
237                 list_del_init(&bo->lru);
238                 if (bo->mem.mm_node) {
239                         drm_mm_put_block(bo->mem.mm_node);
240                         bo->mem.mm_node = NULL;
241                 }
242                 list_del_init(&bo->ddestroy);
243                 mutex_unlock(&bo->mutex);
244                 drm_bo_destroy_locked(bo);
245                 return;
246         }
247
248         if (list_empty(&bo->ddestroy)) {
249                 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
250                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
251                 schedule_delayed_work(&bm->wq,
252                                       ((DRM_HZ / 100) <
253                                        1) ? 1 : DRM_HZ / 100);
254         }
255
256 out:
257         mutex_unlock(&bo->mutex);
258         return;
259 }
260
261
262 /*
263  * Verify that refcount is 0 and that there are no internal references
264  * to the buffer object. Then destroy it.
265  */
266
267 static void drm_bo_destroy_locked(drm_buffer_object_t *bo)
268 {
269         drm_device_t *dev = bo->dev;
270         drm_buffer_manager_t *bm = &dev->bm;
271
272         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && atomic_read(&bo->usage) == 0) {
273                 BUG_ON(bo->fence != NULL);
274
275 #ifdef DRM_ODD_MM_COMPAT
276                 BUG_ON(!list_empty(&bo->vma_list));
277                 BUG_ON(!list_empty(&bo->p_mm_list));
278 #endif
279
280                 if (bo->ttm) {
281                         drm_ttm_unbind(bo->ttm);
282                         drm_destroy_ttm(bo->ttm);
283                         bo->ttm = NULL;
284                 }
285                 atomic_dec(&bm->count);
286
287                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
288
289                 return;
290         }
291
292         /*
293          * Some stuff is still trying to reference the buffer object.
294          * Get rid of those references.
295          */
296
297         drm_bo_cleanup_refs(bo, 0);
298
299         return;
300 }
301
302
303 /*
304  * Call dev->struct_mutex locked.
305  */
306
307 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
308 {
309         drm_buffer_manager_t *bm = &dev->bm;
310
311         drm_buffer_object_t *entry, *nentry;
312         struct list_head *list, *next;
313
314         list_for_each_safe(list, next, &bm->ddestroy) {
315                 entry = list_entry(list, drm_buffer_object_t, ddestroy);
316
317                 nentry = NULL;
318                 if (next != &bm->ddestroy) {
319                         nentry = list_entry(next, drm_buffer_object_t,
320                                             ddestroy);
321                         atomic_inc(&nentry->usage);
322                 }
323
324                 drm_bo_cleanup_refs(entry, remove_all);
325
326                 if (nentry) {
327                         atomic_dec(&nentry->usage);
328                 }
329         }
330
331 }
332
333 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
334 static void drm_bo_delayed_workqueue(void *data)
335 #else
336 static void drm_bo_delayed_workqueue(struct work_struct *work)
337 #endif
338 {
339 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
340         drm_device_t *dev = (drm_device_t *) data;
341         drm_buffer_manager_t *bm = &dev->bm;
342 #else
343         drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work);
344         drm_device_t *dev = container_of(bm, drm_device_t, bm);
345 #endif
346
347
348         DRM_DEBUG("Delayed delete Worker\n");
349
350         mutex_lock(&dev->struct_mutex);
351         if (!bm->initialized) {
352                 mutex_unlock(&dev->struct_mutex);
353                 return;
354         }
355         drm_bo_delayed_delete(dev, 0);
356         if (bm->initialized && !list_empty(&bm->ddestroy)) {
357                 schedule_delayed_work(&bm->wq,
358                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
359         }
360         mutex_unlock(&dev->struct_mutex);
361 }
362
363 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
364 {
365         if (atomic_dec_and_test(&bo->usage)) {
366                 drm_bo_destroy_locked(bo);
367         }
368 }
369
370 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
371 {
372         drm_buffer_object_t *bo =
373                 drm_user_object_entry(uo, drm_buffer_object_t, base);
374
375         drm_bo_takedown_vm_locked(bo);
376         drm_bo_usage_deref_locked(bo);                                         
377 }
378
379 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
380 {
381         drm_device_t *dev = bo->dev;
382
383         if (atomic_dec_and_test(&bo->usage)) {
384                 mutex_lock(&dev->struct_mutex);
385                 if (atomic_read(&bo->usage) == 0)
386                         drm_bo_destroy_locked(bo);
387                 mutex_unlock(&dev->struct_mutex);
388         }
389 }
390
391 /*
392  * Note. The caller has to register (if applicable) 
393  * and deregister fence object usage.
394  */
395
396 int drm_fence_buffer_objects(drm_file_t * priv,
397                              struct list_head *list,
398                              uint32_t fence_flags,
399                              drm_fence_object_t * fence,
400                              drm_fence_object_t ** used_fence)
401 {
402         drm_device_t *dev = priv->head->dev;
403         drm_buffer_manager_t *bm = &dev->bm;
404
405         drm_buffer_object_t *entry;
406         uint32_t fence_type = 0;
407         int count = 0;
408         int ret = 0;
409         struct list_head *l;
410         LIST_HEAD(f_list);
411
412         mutex_lock(&dev->struct_mutex);
413
414         if (!list)
415                 list = &bm->unfenced;
416
417         list_for_each_entry(entry, list, lru) {
418                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
419                 fence_type |= entry->fence_type;
420                 if (entry->fence_class != 0) {
421                         DRM_ERROR("Fence class %d is not implemented yet.\n",
422                                   entry->fence_class);
423                         ret = -EINVAL;
424                         goto out;
425                 }
426                 count++;
427         }
428
429         if (!count) {
430                 ret = -EINVAL;
431                 goto out;
432         }
433
434         /*
435          * Transfer to a local list before we release the dev->struct_mutex;
436          * This is so we don't get any new unfenced objects while fencing 
437          * the ones we already have..
438          */
439
440         list_splice_init(list, &f_list);
441
442         if (fence) {
443                 if ((fence_type & fence->type) != fence_type) {
444                         DRM_ERROR("Given fence doesn't match buffers "
445                                   "on unfenced list.\n");
446                         ret = -EINVAL;
447                         goto out;
448                 }
449         } else {
450                 mutex_unlock(&dev->struct_mutex);
451                 ret = drm_fence_object_create(dev, fence_type,
452                                               fence_flags | DRM_FENCE_FLAG_EMIT,
453                                               &fence);
454                 mutex_lock(&dev->struct_mutex);
455                 if (ret)
456                         goto out;
457         }
458
459         count = 0;
460         l = f_list.next;
461         while (l != &f_list) {
462                 entry = list_entry(l, drm_buffer_object_t, lru);
463                 atomic_inc(&entry->usage);
464                 mutex_unlock(&dev->struct_mutex);
465                 mutex_lock(&entry->mutex);
466                 mutex_lock(&dev->struct_mutex);
467                 list_del_init(l);
468                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
469                         count++;
470                         if (entry->fence)
471                                 drm_fence_usage_deref_locked(dev, entry->fence);
472                         entry->fence = fence;
473                         DRM_FLAG_MASKED(entry->priv_flags, 0,
474                                         _DRM_BO_FLAG_UNFENCED);
475                         DRM_WAKEUP(&entry->event_queue);
476                         drm_bo_add_to_lru(entry, bm);
477                 }
478                 mutex_unlock(&entry->mutex);
479                 drm_bo_usage_deref_locked(entry);
480                 l = f_list.next;
481         }
482         atomic_add(count, &fence->usage);
483         DRM_DEBUG("Fenced %d buffers\n", count);
484       out:
485         mutex_unlock(&dev->struct_mutex);
486         *used_fence = fence;
487         return ret;
488 }
489
490 EXPORT_SYMBOL(drm_fence_buffer_objects);
491
492 /*
493  * bo->mutex locked 
494  */
495
496 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
497                         int no_wait, int force_no_move)
498 {
499         int ret = 0;
500         drm_device_t *dev = bo->dev;
501         drm_buffer_manager_t *bm = &dev->bm;
502         drm_bo_mem_reg_t evict_mem;
503
504         /*
505          * Someone might have modified the buffer before we took the buffer mutex.
506          */
507
508         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
509                 goto out;
510         if (!(bo->mem.flags & drm_bo_type_flags(mem_type)))
511                 goto out;
512
513         ret = drm_bo_wait(bo, 0, 0, no_wait);
514
515         if (ret && ret != -EAGAIN) {
516                 DRM_ERROR("Failed to expire fence before "
517                           "buffer eviction.\n");
518                 goto out;
519         }
520
521         evict_mem = bo->mem;
522         evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type);
523         
524         ret = drm_bo_mem_space(dev, &evict_mem, no_wait);
525
526         if (ret && ret != -EAGAIN) {
527                 DRM_ERROR("Failed to find memory space for "
528                           "buffer eviction.\n");
529                 goto out;
530         }
531         
532         if ((mem_type != DRM_BO_MEM_TT) &&
533             (evict_mem.mem_type != DRM_BO_MEM_LOCAL)) {
534                 ret = -EINVAL;
535                 DRM_ERROR("Unsupported memory types for eviction.\n");
536                 goto out;
537         }
538       
539         ret = drm_move_tt_to_local(bo, 1, force_no_move);
540         if (ret)
541                 goto out;
542         mutex_lock(&dev->struct_mutex);
543         list_del_init(&bo->lru);
544         drm_bo_add_to_lru(bo, bm);
545         mutex_unlock(&dev->struct_mutex);
546
547         if (ret)
548                 goto out;
549
550         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
551                         _DRM_BO_FLAG_EVICTED);
552       out:
553         return ret;
554 }
555
556
557
558 static int drm_bo_mem_force_space(drm_device_t *dev,
559                                   drm_bo_mem_reg_t *mem,
560                                   uint32_t mem_type,
561                                   int no_wait)
562 {
563         drm_mm_node_t *node;
564         drm_buffer_manager_t *bm = &dev->bm;
565         drm_buffer_object_t *entry;
566         drm_mem_type_manager_t *man = &bm->man[mem_type];
567         struct list_head *lru;
568         unsigned long num_pages = mem->num_pages;
569         int ret;
570
571         mutex_lock(&dev->struct_mutex);
572         do {
573                 node = drm_mm_search_free(&man->manager, num_pages, 
574                                           mem->page_alignment, 1);
575                 if (node)
576                         break;
577
578                 lru = &man->lru;
579                 if (lru->next == lru)
580                         break;
581
582                 entry = list_entry(lru->next, drm_buffer_object_t, lru);
583                 atomic_inc(&entry->usage);
584                 mutex_unlock(&dev->struct_mutex);
585                 mutex_lock(&entry->mutex);
586                 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
587
588                 ret = drm_bo_evict(entry, mem_type, no_wait, 0);
589                 mutex_unlock(&entry->mutex);
590                 drm_bo_usage_deref_unlocked(entry);
591                 if (ret)
592                         return ret;
593                 mutex_lock(&dev->struct_mutex);
594         } while (1);
595
596         if (!node) {
597                 mutex_unlock(&dev->struct_mutex);
598                 return -ENOMEM;
599         }
600
601         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
602         mutex_unlock(&dev->struct_mutex);
603         mem->mm_node = node;
604         mem->mem_type = mem_type;
605         mem->flags = drm_bo_type_flags(mem_type);
606         return 0;
607 }
608
609
610 static int drm_bo_mem_space(drm_device_t *dev,
611                             drm_bo_mem_reg_t *mem,
612                             int no_wait)
613 {
614         drm_buffer_manager_t *bm= &dev->bm;
615         drm_mem_type_manager_t *man; 
616
617         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
618         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
619         uint32_t i;
620         uint32_t mem_type = DRM_BO_MEM_LOCAL;
621         int type_found = 0;
622         int type_ok = 0;
623         int has_eagain = 0;
624         drm_mm_node_t *node = NULL;
625         int ret;
626
627         for (i=0; i<num_prios; ++i) {
628                 mem_type = prios[i];
629                 type_ok = drm_bo_type_flags(mem_type) & mem->mask ;
630                 if (!type_ok)
631                         continue;
632
633                 if (mem_type == DRM_BO_MEM_LOCAL)
634                         break;
635
636                 man = &bm->man[mem_type];
637                 mutex_lock(&dev->struct_mutex);
638                 if (man->has_type && man->use_type) {
639                         type_found = 1;
640                         node = drm_mm_search_free(&man->manager, mem->num_pages, 
641                                                   mem->page_alignment, 1);
642                         if (node) 
643                                 node = drm_mm_get_block(node, mem->num_pages, 
644                                                         mem->page_alignment);
645                 }
646                 mutex_unlock(&dev->struct_mutex);
647                 if (node)
648                         break;
649         }
650         
651         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
652                 mem->mm_node = node;
653                 mem->mem_type = mem_type;
654                 mem->flags = drm_bo_type_flags(mem_type);
655                 return 0;
656         }
657
658         if (!type_found) {
659                 DRM_ERROR("Requested memory types are not supported\n");
660                 return -EINVAL;
661         }
662
663         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
664         prios = dev->driver->bo_driver->mem_busy_prio;
665
666         for (i=0; i<num_prios; ++i) {
667                 mem_type = prios[i];
668                 if (!(drm_bo_type_flags(mem_type) & mem->mask))
669                         continue;
670                 
671                 man = &bm->man[mem_type];
672                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
673                 
674                 if (ret == 0) 
675                         return 0;
676                 
677                 if (ret == -EAGAIN)
678                         has_eagain = 1;
679         }
680
681         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
682         return ret;
683 }
684
685
686
687
688 static int drm_move_local_to_tt(drm_buffer_object_t * bo, 
689                                 drm_bo_mem_reg_t * mem, 
690                                 int no_wait)
691 {
692         drm_device_t *dev = bo->dev;
693         int ret = 0;
694         
695         bo->mem.mm_node = mem->mm_node;
696
697         DRM_DEBUG("Flipping in to AGP 0x%08lx 0x%08lx\n", 
698                   bo->mem.mm_node->start, bo->mem.mm_node->size);
699         
700 #ifdef DRM_ODD_MM_COMPAT
701         mutex_lock(&dev->struct_mutex);
702         ret = drm_bo_lock_kmm(bo);
703         if (ret) {
704                 mutex_unlock(&dev->struct_mutex);
705                 goto out_put_unlock;
706         }
707 #endif
708         drm_bo_unmap_virtual(bo);
709         ret = drm_bind_ttm(bo->ttm, bo->mem.flags & DRM_BO_FLAG_BIND_CACHED,
710                            bo->mem.mm_node->start);
711         
712         if (ret) {
713 #ifdef DRM_ODD_MM_COMPAT
714                 drm_bo_unlock_kmm(bo);
715                 mutex_unlock(&dev->struct_mutex);
716 #endif
717                 goto out_put_unlock;
718         }
719         
720         if (!(bo->mem.flags & DRM_BO_FLAG_BIND_CACHED))
721                 bo->mem.flags &= DRM_BO_FLAG_CACHED;
722         bo->mem.flags &= ~DRM_BO_MASK_MEM;
723         bo->mem.flags |= DRM_BO_FLAG_MEM_TT;
724         bo->mem.mem_type = DRM_BO_MEM_TT;
725         bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
726
727 #ifdef DRM_ODD_MM_COMPAT
728         ret = drm_bo_remap_bound(bo);
729         if (ret) {
730                 /* FIXME */
731         }
732         drm_bo_unlock_kmm(bo);
733         mutex_unlock(&dev->struct_mutex);
734 #endif
735
736         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
737                 ret = dev->driver->bo_driver->invalidate_caches(dev, bo->mem.flags);
738                 if (ret)
739                         DRM_ERROR("Could not flush read caches\n");
740         }
741         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED);
742
743         return 0;
744
745 out_put_unlock:
746         mutex_lock(&dev->struct_mutex);
747         drm_mm_put_block(bo->mem.mm_node);
748         bo->mem.mm_node = NULL;
749         mutex_unlock(&dev->struct_mutex);
750         return ret;
751 }
752
753 static int drm_bo_new_flags(drm_device_t * dev,
754                             uint32_t flags, uint32_t new_mask, uint32_t hint,
755                             int init, uint32_t * n_flags, uint32_t * n_mask)
756 {
757         uint32_t new_flags = 0;
758         uint32_t new_props;
759         drm_buffer_manager_t *bm = &dev->bm;
760         unsigned i;
761
762         /*
763          * First adjust the mask to take away nonexistant memory types. 
764          */
765
766         for (i = 0; i < DRM_BO_MEM_TYPES; ++i) {
767                 if (!bm->man[i].use_type)
768                         new_mask &= ~drm_bo_type_flags(i);
769         }
770
771         if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
772                 DRM_ERROR
773                     ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
774                      "processes\n");
775                 return -EPERM;
776         }
777         if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
778                 if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
779                      !(bm->man[DRM_BO_MEM_TT].flags &
780                         _DRM_FLAG_MEMTYPE_CACHED) &&
781                      ((new_mask & DRM_BO_FLAG_MEM_VRAM)
782                       && !(bm->man[DRM_BO_MEM_VRAM].flags &
783                            _DRM_FLAG_MEMTYPE_CACHED)))) {
784                         new_mask &= ~DRM_BO_FLAG_BIND_CACHED;
785                 } else {
786                         if (!(bm->man[DRM_BO_MEM_TT].flags &
787                               _DRM_FLAG_MEMTYPE_CACHED))
788                                 new_flags &= DRM_BO_FLAG_MEM_TT;
789                         if (!(bm->man[DRM_BO_MEM_VRAM].flags &
790                               _DRM_FLAG_MEMTYPE_CACHED))
791                                 new_flags &= DRM_BO_FLAG_MEM_VRAM;
792                 }
793         }
794
795         if ((new_mask & DRM_BO_FLAG_READ_CACHED) &&
796             !(new_mask & DRM_BO_FLAG_BIND_CACHED)) {
797                 if ((new_mask & DRM_BO_FLAG_NO_EVICT) &&
798                     !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) {
799                         DRM_ERROR
800                             ("Cannot read cached from a pinned VRAM / TT buffer\n");
801                         return -EINVAL;
802                 }
803         }
804
805         /*
806          * Determine new memory location:
807          */
808
809         if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) {
810
811                 new_flags = new_mask & DRM_BO_MASK_MEM;
812
813                 if (!new_flags) {
814                         DRM_ERROR("Invalid buffer object memory flags\n");
815                         return -EINVAL;
816                 }
817
818                 if (new_flags & DRM_BO_FLAG_MEM_LOCAL) {
819                         if ((hint & DRM_BO_HINT_AVOID_LOCAL) &&
820                             new_flags & (DRM_BO_FLAG_MEM_VRAM |
821                                          DRM_BO_FLAG_MEM_TT)) {
822                                 new_flags &= ~DRM_BO_FLAG_MEM_LOCAL;
823                         } else {
824                                 new_flags = DRM_BO_FLAG_MEM_LOCAL;
825                         }
826                 }
827                 if (new_flags & DRM_BO_FLAG_MEM_TT) {
828                         if ((new_mask & DRM_BO_FLAG_PREFER_VRAM) &&
829                             new_flags & DRM_BO_FLAG_MEM_VRAM) {
830                                 new_flags = DRM_BO_FLAG_MEM_VRAM;
831                         } else {
832                                 new_flags = DRM_BO_FLAG_MEM_TT;
833                         }
834                 }
835         } else {
836                 new_flags = flags & DRM_BO_MASK_MEM;
837         }
838
839         new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
840                                 DRM_BO_FLAG_READ);
841
842         if (!new_props) {
843                 DRM_ERROR("Invalid buffer object rwx properties\n");
844                 return -EINVAL;
845         }
846
847         new_flags |= new_mask & ~DRM_BO_MASK_MEM;
848
849         if (((flags ^ new_flags) & DRM_BO_FLAG_BIND_CACHED) &&
850             (new_flags & DRM_BO_FLAG_NO_EVICT) &&
851             (flags & (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM))) {
852                 if (!(flags & DRM_BO_FLAG_CACHED)) {
853                         DRM_ERROR
854                             ("Cannot change caching policy of pinned buffer\n");
855                         return -EINVAL;
856                 } else {
857                         new_flags &= ~DRM_BO_FLAG_CACHED;
858                 }
859         }
860
861         *n_flags = new_flags;
862         *n_mask = new_mask;
863         return 0;
864 }
865
866 /*
867  * Call dev->struct_mutex locked.
868  */
869
870 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
871                                               uint32_t handle, int check_owner)
872 {
873         drm_user_object_t *uo;
874         drm_buffer_object_t *bo;
875
876         uo = drm_lookup_user_object(priv, handle);
877
878         if (!uo || (uo->type != drm_buffer_type)) {
879                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
880                 return NULL;
881         }
882
883         if (check_owner && priv != uo->owner) {
884                 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
885                         return NULL;
886         }
887
888         bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
889         atomic_inc(&bo->usage);
890         return bo;
891 }
892
893 /*
894  * Call bo->mutex locked.
895  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
896  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
897  */
898
899 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
900 {
901         drm_fence_object_t *fence = bo->fence;
902
903         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
904         if (fence) {
905                 drm_device_t *dev = bo->dev;
906                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
907                         drm_fence_usage_deref_unlocked(dev, fence);
908                         bo->fence = NULL;
909                         return 0;
910                 }
911                 return 1;
912         }
913         return 0;
914 }
915
916 /*
917  * Call bo->mutex locked.
918  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
919  */
920
921 static int drm_bo_busy(drm_buffer_object_t * bo)
922 {
923         drm_fence_object_t *fence = bo->fence;
924
925         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
926         if (fence) {
927                 drm_device_t *dev = bo->dev;
928                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
929                         drm_fence_usage_deref_unlocked(dev, fence);
930                         bo->fence = NULL;
931                         return 0;
932                 }
933                 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
934                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
935                         drm_fence_usage_deref_unlocked(dev, fence);
936                         bo->fence = NULL;
937                         return 0;
938                 }
939                 return 1;
940         }
941         return 0;
942 }
943
944 static int drm_bo_read_cached(drm_buffer_object_t * bo)
945 {
946         int ret = 0;
947
948         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
949         if (bo->mem.mm_node)
950                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
951         return ret;
952 }
953
954 /*
955  * Wait until a buffer is unmapped.
956  */
957
958 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
959 {
960         int ret = 0;
961
962         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
963                 return -EBUSY;
964
965         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
966                     atomic_read(&bo->mapped) == -1);
967
968         if (ret == -EINTR)
969                 ret = -EAGAIN;
970
971         return ret;
972 }
973
974 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
975 {
976         int ret;
977
978         mutex_lock(&bo->mutex);
979         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
980         mutex_unlock(&bo->mutex);
981         return ret;
982 }
983
984 /*
985  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
986  * Until then, we cannot really do anything with it except delete it.
987  * The unfenced list is a PITA, and the operations
988  * 1) validating
989  * 2) submitting commands
990  * 3) fencing
991  * Should really be an atomic operation. 
992  * We now "solve" this problem by keeping
993  * the buffer "unfenced" after validating, but before fencing.
994  */
995
996 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
997                                 int eagain_if_wait)
998 {
999         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1000         unsigned long _end = jiffies + 3 * DRM_HZ;
1001
1002         if (ret && no_wait)
1003                 return -EBUSY;
1004         else if (!ret)
1005                 return 0;
1006
1007         do {
1008                 mutex_unlock(&bo->mutex);
1009                 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1010                             !drm_bo_check_unfenced(bo));
1011                 mutex_lock(&bo->mutex);
1012                 if (ret == -EINTR)
1013                         return -EAGAIN;
1014                 if (ret) {
1015                         DRM_ERROR
1016                             ("Error waiting for buffer to become fenced\n");
1017                         return ret;
1018                 }
1019                 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1020         } while (ret && !time_after_eq(jiffies, _end));
1021         if (ret) {
1022                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1023                 return ret;
1024         }
1025         if (eagain_if_wait)
1026                 return -EAGAIN;
1027
1028         return 0;
1029 }
1030
1031 /*
1032  * Fill in the ioctl reply argument with buffer info.
1033  * Bo locked. 
1034  */
1035
1036 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
1037                                 drm_bo_arg_reply_t * rep)
1038 {
1039         rep->handle = bo->base.hash.key;
1040         rep->flags = bo->mem.flags;
1041         rep->size = bo->mem.num_pages * PAGE_SIZE;
1042         rep->offset = bo->offset;
1043         rep->arg_handle = bo->map_list.user_token;
1044         rep->mask = bo->mem.mask;
1045         rep->buffer_start = bo->buffer_start;
1046         rep->fence_flags = bo->fence_type;
1047         rep->rep_flags = 0;
1048         rep->page_alignment = bo->mem.page_alignment;
1049
1050         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1051                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1052                                 DRM_BO_REP_BUSY);
1053         }
1054 }
1055
1056 /*
1057  * Wait for buffer idle and register that we've mapped the buffer.
1058  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1, 
1059  * so that if the client dies, the mapping is automatically 
1060  * unregistered.
1061  */
1062
1063 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1064                                  uint32_t map_flags, unsigned hint,
1065                                  drm_bo_arg_reply_t * rep)
1066 {
1067         drm_buffer_object_t *bo;
1068         drm_device_t *dev = priv->head->dev;
1069         int ret = 0;
1070         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1071
1072         mutex_lock(&dev->struct_mutex);
1073         bo = drm_lookup_buffer_object(priv, handle, 1);
1074         mutex_unlock(&dev->struct_mutex);
1075
1076         if (!bo)
1077                 return -EINVAL;
1078
1079         mutex_lock(&bo->mutex);
1080         if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1081                 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1082                 if (ret)
1083                         goto out;
1084         }
1085
1086         /*
1087          * If this returns true, we are currently unmapped.
1088          * We need to do this test, because unmapping can
1089          * be done without the bo->mutex held.
1090          */
1091
1092         while (1) {
1093                 if (atomic_inc_and_test(&bo->mapped)) {
1094                         if (no_wait && drm_bo_busy(bo)) {
1095                                 atomic_dec(&bo->mapped);
1096                                 ret = -EBUSY;
1097                                 goto out;
1098                         }
1099                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1100                         if (ret) {
1101                                 atomic_dec(&bo->mapped);
1102                                 goto out;
1103                         }
1104
1105                         if ((map_flags & DRM_BO_FLAG_READ) &&
1106                             (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1107                             (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1108                                 drm_bo_read_cached(bo);
1109                         }
1110                         break;
1111                 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1112                            (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1113                            (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1114
1115                         /*
1116                          * We are already mapped with different flags.
1117                          * need to wait for unmap.
1118                          */
1119
1120                         ret = drm_bo_wait_unmapped(bo, no_wait);
1121                         if (ret)
1122                                 goto out;
1123
1124                         continue;
1125                 }
1126                 break;
1127         }
1128
1129         mutex_lock(&dev->struct_mutex);
1130         ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1131         mutex_unlock(&dev->struct_mutex);
1132         if (ret) {
1133                 if (atomic_add_negative(-1, &bo->mapped))
1134                         DRM_WAKEUP(&bo->event_queue);
1135
1136         } else
1137                 drm_bo_fill_rep_arg(bo, rep);
1138       out:
1139         mutex_unlock(&bo->mutex);
1140         drm_bo_usage_deref_unlocked(bo);
1141         return ret;
1142 }
1143
1144 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1145 {
1146         drm_device_t *dev = priv->head->dev;
1147         drm_buffer_object_t *bo;
1148         drm_ref_object_t *ro;
1149         int ret = 0;
1150
1151         mutex_lock(&dev->struct_mutex);
1152
1153         bo = drm_lookup_buffer_object(priv, handle, 1);
1154         if (!bo) {
1155                 ret = -EINVAL;
1156                 goto out;
1157         }
1158
1159         ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1160         if (!ro) {
1161                 ret = -EINVAL;
1162                 goto out;
1163         }
1164
1165         drm_remove_ref_object(priv, ro);
1166         drm_bo_usage_deref_locked(bo);
1167       out:
1168         mutex_unlock(&dev->struct_mutex);
1169         return ret;
1170 }
1171
1172 /*
1173  * Call struct-sem locked.
1174  */
1175
1176 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1177                                          drm_user_object_t * uo,
1178                                          drm_ref_t action)
1179 {
1180         drm_buffer_object_t *bo =
1181             drm_user_object_entry(uo, drm_buffer_object_t, base);
1182
1183         /*
1184          * We DON'T want to take the bo->lock here, because we want to
1185          * hold it when we wait for unmapped buffer.
1186          */
1187
1188         BUG_ON(action != _DRM_REF_TYPE1);
1189
1190         if (atomic_add_negative(-1, &bo->mapped))
1191                 DRM_WAKEUP(&bo->event_queue);
1192 }
1193
1194 /*
1195  * bo->mutex locked. 
1196  */
1197
1198 static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1199                               int no_wait, int force_no_move)
1200 {
1201         drm_device_t *dev = bo->dev;
1202         drm_buffer_manager_t *bm = &dev->bm;
1203         int ret = 0;
1204         drm_bo_mem_reg_t mem;
1205
1206         /*
1207          * Flush outstanding fences.
1208          */
1209
1210         drm_bo_busy(bo);
1211
1212         /*
1213          * Make sure we're not mapped.
1214          */
1215
1216         ret = drm_bo_wait_unmapped(bo, no_wait);
1217         if (ret)
1218                 return ret;
1219
1220         /*
1221          * Wait for outstanding fences.
1222          */
1223
1224         ret = drm_bo_wait(bo, 0, 0, no_wait);
1225         if (ret)
1226                 return ret;
1227
1228
1229         mem.num_pages = bo->mem.num_pages;
1230         mem.size = mem.num_pages << PAGE_SHIFT;
1231         mem.mask = new_mem_flags;
1232         mem.page_alignment = bo->mem.page_alignment;
1233
1234         mutex_lock(&bm->evict_mutex);
1235         mutex_lock(&dev->struct_mutex);
1236         list_del(&bo->lru);
1237         list_add_tail(&bo->lru,&bm->unfenced);
1238         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, 
1239                         _DRM_BO_FLAG_UNFENCED);
1240         mutex_unlock(&dev->struct_mutex);
1241
1242         /*
1243          * Determine where to move the buffer.
1244          */
1245
1246         ret = drm_bo_mem_space(dev, &mem, no_wait);
1247         mutex_unlock(&bm->evict_mutex);
1248         
1249         if (ret)
1250                 return ret;
1251
1252         if (mem.mem_type == DRM_BO_MEM_TT) {
1253                 ret = drm_move_local_to_tt(bo, &mem, no_wait);
1254                 if (ret) {
1255                         mutex_lock(&dev->struct_mutex);
1256                         list_del_init(&bo->lru);
1257                         drm_bo_add_to_lru(bo, bm);
1258                         mutex_unlock(&dev->struct_mutex);
1259                         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1260                         return ret;
1261                 }
1262         } else {
1263                 drm_move_tt_to_local(bo, 0, force_no_move);
1264         }
1265         return 0;
1266 }
1267
1268 /*
1269  * bo locked.
1270  */
1271
1272 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1273                                       uint32_t new_flags,
1274                                       int move_unfenced, int no_wait)
1275 {
1276         drm_device_t *dev = bo->dev;
1277         drm_buffer_manager_t *bm = &dev->bm;
1278         uint32_t flag_diff = (new_flags ^ bo->mem.flags);
1279         drm_bo_driver_t *driver = dev->driver->bo_driver;
1280
1281         int ret;
1282
1283         if (new_flags & DRM_BO_FLAG_MEM_VRAM) {
1284                 DRM_ERROR("Vram support not implemented yet\n");
1285                 return -EINVAL;
1286         }
1287
1288         DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->mem.flags);
1289         ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type);
1290         if (ret) {
1291                 DRM_ERROR("Driver did not support given buffer permissions\n");
1292                 return ret;
1293         }
1294
1295         /*
1296          * Move out if we need to change caching policy.
1297          */
1298
1299         if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
1300             !(bo->mem.flags & DRM_BO_FLAG_MEM_LOCAL)) {
1301                 if (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1302                         DRM_ERROR("Cannot change caching policy of "
1303                                   "pinned buffer.\n");
1304                         return -EINVAL;
1305                 }
1306                 ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0);
1307                 if (ret) {
1308                         if (ret != -EAGAIN)
1309                                 DRM_ERROR("Failed moving buffer.\n");
1310                         return ret;
1311                 }
1312         }
1313         DRM_MASK_VAL(bo->mem.flags, DRM_BO_FLAG_BIND_CACHED, new_flags);
1314         flag_diff = (new_flags ^ bo->mem.flags);
1315
1316         /*
1317          * Check whether we dropped no_move policy, and in that case,
1318          * release reserved manager regions.
1319          */
1320
1321         if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
1322             !(new_flags & DRM_BO_FLAG_NO_MOVE)) {
1323                 mutex_lock(&dev->struct_mutex);
1324                 if (bo->mem.mm_node) {
1325                         drm_mm_put_block(bo->mem.mm_node);
1326                         bo->mem.mm_node = NULL;
1327                 }
1328                 mutex_unlock(&dev->struct_mutex);
1329         }
1330
1331         /*
1332          * Check whether we need to move buffer.
1333          */
1334
1335         if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) {
1336                 ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1);
1337                 if (ret) {
1338                         if (ret != -EAGAIN)
1339                                 DRM_ERROR("Failed moving buffer.\n");
1340                         return ret;
1341                 }
1342         }
1343
1344         if (move_unfenced) {
1345
1346                 /*
1347                  * Place on unfenced list.
1348                  */
1349
1350                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1351                                 _DRM_BO_FLAG_UNFENCED);
1352                 mutex_lock(&dev->struct_mutex);
1353                 list_del(&bo->lru);
1354                 list_add_tail(&bo->lru, &bm->unfenced);
1355                 mutex_unlock(&dev->struct_mutex);
1356         } else {
1357                 DRM_FLAG_MASKED(bo->priv_flags, 0,
1358                                 _DRM_BO_FLAG_UNFENCED);
1359                 mutex_lock(&dev->struct_mutex);
1360                 list_del_init(&bo->lru);
1361                 drm_bo_add_to_lru(bo, bm);
1362                 mutex_unlock(&dev->struct_mutex);
1363         }
1364         
1365         bo->mem.flags = new_flags;
1366         return 0;
1367 }
1368
1369 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1370                                   uint32_t flags, uint32_t mask, uint32_t hint,
1371                                   drm_bo_arg_reply_t * rep)
1372 {
1373         drm_buffer_object_t *bo;
1374         drm_device_t *dev = priv->head->dev;
1375         int ret;
1376         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1377         uint32_t new_flags;
1378
1379         bo = drm_lookup_buffer_object(priv, handle, 1);
1380         if (!bo) {
1381                 return -EINVAL;
1382         }
1383
1384         mutex_lock(&bo->mutex);
1385         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1386
1387         if (ret)
1388                 goto out;
1389
1390         ret = drm_bo_new_flags(dev, bo->mem.flags,
1391                                (flags & mask) | (bo->mem.mask & ~mask), hint,
1392                                0, &new_flags, &bo->mem.mask);
1393
1394         if (ret)
1395                 goto out;
1396
1397         ret =
1398             drm_buffer_object_validate(bo, new_flags,
1399                                        !(hint & DRM_BO_HINT_DONT_FENCE),
1400                                        no_wait);
1401         drm_bo_fill_rep_arg(bo, rep);
1402
1403       out:
1404
1405         mutex_unlock(&bo->mutex);
1406         drm_bo_usage_deref_unlocked(bo);
1407         return ret;
1408 }
1409
1410 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1411                               drm_bo_arg_reply_t * rep)
1412 {
1413         drm_buffer_object_t *bo;
1414
1415         bo = drm_lookup_buffer_object(priv, handle, 1);
1416         if (!bo) {
1417                 return -EINVAL;
1418         }
1419         mutex_lock(&bo->mutex);
1420         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1421                 (void)drm_bo_busy(bo);
1422         drm_bo_fill_rep_arg(bo, rep);
1423         mutex_unlock(&bo->mutex);
1424         drm_bo_usage_deref_unlocked(bo);
1425         return 0;
1426 }
1427
1428 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1429                               uint32_t hint, drm_bo_arg_reply_t * rep)
1430 {
1431         drm_buffer_object_t *bo;
1432         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1433         int ret;
1434
1435         bo = drm_lookup_buffer_object(priv, handle, 1);
1436         if (!bo) {
1437                 return -EINVAL;
1438         }
1439
1440         mutex_lock(&bo->mutex);
1441         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1442         if (ret)
1443                 goto out;
1444         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1445         if (ret)
1446                 goto out;
1447
1448         drm_bo_fill_rep_arg(bo, rep);
1449
1450       out:
1451         mutex_unlock(&bo->mutex);
1452         drm_bo_usage_deref_unlocked(bo);
1453         return ret;
1454 }
1455
1456 /*
1457  * Call bo->mutex locked.
1458  */
1459
1460 static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo)
1461 {
1462         drm_device_t *dev = bo->dev;
1463         int ret = 0;
1464
1465         bo->ttm = NULL;
1466         bo->map_list.user_token = 0ULL;
1467
1468         switch (bo->type) {
1469         case drm_bo_type_dc:
1470                 mutex_lock(&dev->struct_mutex);
1471                 ret = drm_bo_setup_vm_locked(bo);
1472                 mutex_unlock(&dev->struct_mutex);
1473                 if (ret)
1474                         break;
1475                 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
1476                 if (!bo->ttm)
1477                         ret = -ENOMEM;
1478                 break;
1479         case drm_bo_type_user:
1480         case drm_bo_type_fake:
1481                 break;
1482         default:
1483                 DRM_ERROR("Illegal buffer object type\n");
1484                 ret = -EINVAL;
1485                 break;
1486         }
1487
1488         return ret;
1489 }
1490
1491 /*
1492  * Transfer a buffer object's memory and LRU status to a newly
1493  * created object. User-space references remains with the old
1494  * object. Call bo->mutex locked.
1495  */
1496
1497 int drm_buffer_object_transfer(drm_buffer_object_t *bo,
1498                                drm_buffer_object_t **new_obj)
1499 {
1500         drm_buffer_object_t *fbo;
1501         drm_device_t *dev = bo->dev;
1502         drm_buffer_manager_t *bm = &dev->bm;
1503
1504         fbo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1505         if (!fbo)
1506                 return -ENOMEM;
1507         
1508         *fbo = *bo;
1509         mutex_init(&fbo->mutex);
1510         mutex_lock(&fbo->mutex);
1511         mutex_lock(&dev->struct_mutex);
1512
1513         INIT_LIST_HEAD(&fbo->ddestroy);
1514         INIT_LIST_HEAD(&fbo->lru);
1515         list_splice_init(&bo->lru, &fbo->lru);
1516
1517         bo->mem.mm_node = NULL;
1518         bo->ttm = NULL;
1519         bo->fence = NULL;
1520         bo->mem.flags = 0;
1521
1522         fbo->mem.mm_node->private = (void *)fbo;
1523         atomic_set(&fbo->usage, 1);
1524         atomic_inc(&bm->count);
1525         mutex_unlock(&dev->struct_mutex);
1526         mutex_unlock(&fbo->mutex);
1527
1528         *new_obj = fbo;
1529         return 0;
1530 }
1531                 
1532
1533 int drm_buffer_object_create(drm_file_t * priv,
1534                              unsigned long size,
1535                              drm_bo_type_t type,
1536                              uint32_t mask,
1537                              uint32_t hint,
1538                              uint32_t page_alignment,
1539                              unsigned long buffer_start,
1540                              drm_buffer_object_t ** buf_obj)
1541 {
1542         drm_device_t *dev = priv->head->dev;
1543         drm_buffer_manager_t *bm = &dev->bm;
1544         drm_buffer_object_t *bo;
1545         int ret = 0;
1546         uint32_t new_flags;
1547         unsigned long num_pages;
1548
1549         if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1550                 DRM_ERROR("Invalid buffer object start.\n");
1551                 return -EINVAL;
1552         }
1553         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1554         if (num_pages == 0) {
1555                 DRM_ERROR("Illegal buffer object size.\n");
1556                 return -EINVAL;
1557         }
1558
1559         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1560
1561         if (!bo)
1562                 return -ENOMEM;
1563
1564         mutex_init(&bo->mutex);
1565         mutex_lock(&bo->mutex);
1566
1567         atomic_set(&bo->usage, 1);
1568         atomic_set(&bo->mapped, -1);
1569         DRM_INIT_WAITQUEUE(&bo->event_queue);
1570         INIT_LIST_HEAD(&bo->lru);
1571         INIT_LIST_HEAD(&bo->ddestroy);
1572 #ifdef DRM_ODD_MM_COMPAT
1573         INIT_LIST_HEAD(&bo->p_mm_list);
1574         INIT_LIST_HEAD(&bo->vma_list);
1575 #endif
1576         bo->dev = dev;
1577         bo->type = type;
1578         bo->mem.num_pages = num_pages;
1579         bo->mem.mm_node = NULL;
1580         bo->mem.page_alignment = page_alignment;
1581         if (bo->type == drm_bo_type_fake) {
1582                 bo->offset = buffer_start;
1583                 bo->buffer_start = 0;
1584         } else {
1585                 bo->buffer_start = buffer_start;
1586         }
1587         bo->priv_flags = 0;
1588         bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
1589         atomic_inc(&bm->count);
1590         ret = drm_bo_new_flags(dev, bo->mem.flags, mask, hint,
1591                                1, &new_flags, &bo->mem.mask);
1592         if (ret)
1593                 goto out_err;
1594         ret = drm_bo_add_ttm(priv, bo);
1595         if (ret)
1596                 goto out_err;
1597
1598         ret = drm_buffer_object_validate(bo, new_flags, 0,
1599                                          hint & DRM_BO_HINT_DONT_BLOCK);
1600         if (ret)
1601                 goto out_err;
1602
1603         mutex_unlock(&bo->mutex);
1604         *buf_obj = bo;
1605         return 0;
1606
1607       out_err:
1608         mutex_unlock(&bo->mutex);
1609         drm_bo_usage_deref_unlocked(bo);
1610         return ret;
1611 }
1612
1613 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1614                                   int shareable)
1615 {
1616         drm_device_t *dev = priv->head->dev;
1617         int ret;
1618
1619         mutex_lock(&dev->struct_mutex);
1620         ret = drm_add_user_object(priv, &bo->base, shareable);
1621         if (ret)
1622                 goto out;
1623
1624         bo->base.remove = drm_bo_base_deref_locked;
1625         bo->base.type = drm_buffer_type;
1626         bo->base.ref_struct_locked = NULL;
1627         bo->base.unref = drm_buffer_user_object_unmap;
1628
1629       out:
1630         mutex_unlock(&dev->struct_mutex);
1631         return ret;
1632 }
1633
1634 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1635 {
1636         LOCK_TEST_WITH_RETURN(dev, filp);
1637         return 0;
1638 }
1639
1640 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1641 {
1642         DRM_DEVICE;
1643         drm_bo_arg_t arg;
1644         drm_bo_arg_request_t *req = &arg.d.req;
1645         drm_bo_arg_reply_t rep;
1646         unsigned long next;
1647         drm_user_object_t *uo;
1648         drm_buffer_object_t *entry;
1649
1650         if (!dev->bm.initialized) {
1651                 DRM_ERROR("Buffer object manager is not initialized.\n");
1652                 return -EINVAL;
1653         }
1654
1655         do {
1656                 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1657
1658                 if (arg.handled) {
1659                         data = arg.next;
1660                         continue;
1661                 }
1662
1663                 rep.ret = 0;
1664                 switch (req->op) {
1665                 case drm_bo_create:
1666                         rep.ret =
1667                             drm_buffer_object_create(priv, req->size,
1668                                                      req->type,
1669                                                      req->mask,
1670                                                      req->hint,
1671                                                      req->page_alignment,
1672                                                      req->buffer_start, &entry);
1673                         if (rep.ret)
1674                                 break;
1675
1676                         rep.ret =
1677                             drm_bo_add_user_object(priv, entry,
1678                                                    req->
1679                                                    mask &
1680                                                    DRM_BO_FLAG_SHAREABLE);
1681                         if (rep.ret)
1682                                 drm_bo_usage_deref_unlocked(entry);
1683
1684                         if (rep.ret)
1685                                 break;
1686
1687                         mutex_lock(&entry->mutex);
1688                         drm_bo_fill_rep_arg(entry, &rep);
1689                         mutex_unlock(&entry->mutex);
1690                         break;
1691                 case drm_bo_unmap:
1692                         rep.ret = drm_buffer_object_unmap(priv, req->handle);
1693                         break;
1694                 case drm_bo_map:
1695                         rep.ret = drm_buffer_object_map(priv, req->handle,
1696                                                         req->mask,
1697                                                         req->hint, &rep);
1698                         break;
1699                 case drm_bo_destroy:
1700                         mutex_lock(&dev->struct_mutex);
1701                         uo = drm_lookup_user_object(priv, req->handle);
1702                         if (!uo || (uo->type != drm_buffer_type)
1703                             || uo->owner != priv) {
1704                                 mutex_unlock(&dev->struct_mutex);
1705                                 rep.ret = -EINVAL;
1706                                 break;
1707                         }
1708                         rep.ret = drm_remove_user_object(priv, uo);
1709                         mutex_unlock(&dev->struct_mutex);
1710                         break;
1711                 case drm_bo_reference:
1712                         rep.ret = drm_user_object_ref(priv, req->handle,
1713                                                       drm_buffer_type, &uo);
1714                         if (rep.ret)
1715                                 break;
1716                         mutex_lock(&dev->struct_mutex);
1717                         uo = drm_lookup_user_object(priv, req->handle);
1718                         entry =
1719                             drm_user_object_entry(uo, drm_buffer_object_t,
1720                                                   base);
1721                         atomic_dec(&entry->usage);
1722                         mutex_unlock(&dev->struct_mutex);
1723                         mutex_lock(&entry->mutex);
1724                         drm_bo_fill_rep_arg(entry, &rep);
1725                         mutex_unlock(&entry->mutex);
1726                         break;
1727                 case drm_bo_unreference:
1728                         rep.ret = drm_user_object_unref(priv, req->handle,
1729                                                         drm_buffer_type);
1730                         break;
1731                 case drm_bo_validate:
1732                         rep.ret = drm_bo_lock_test(dev, filp);
1733
1734                         if (rep.ret)
1735                                 break;
1736                         rep.ret =
1737                             drm_bo_handle_validate(priv, req->handle, req->mask,
1738                                                    req->arg_handle, req->hint,
1739                                                    &rep);
1740                         break;
1741                 case drm_bo_fence:
1742                         rep.ret = drm_bo_lock_test(dev, filp);
1743                         if (rep.ret)
1744                                 break;
1745                          /**/ break;
1746                 case drm_bo_info:
1747                         rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1748                         break;
1749                 case drm_bo_wait_idle:
1750                         rep.ret = drm_bo_handle_wait(priv, req->handle,
1751                                                      req->hint, &rep);
1752                         break;
1753                 case drm_bo_ref_fence:
1754                         rep.ret = -EINVAL;
1755                         DRM_ERROR("Function is not implemented yet.\n");
1756                 default:
1757                         rep.ret = -EINVAL;
1758                 }
1759                 next = arg.next;
1760
1761                 /*
1762                  * A signal interrupted us. Make sure the ioctl is restartable.
1763                  */
1764
1765                 if (rep.ret == -EAGAIN)
1766                         return -EAGAIN;
1767
1768                 arg.handled = 1;
1769                 arg.d.rep = rep;
1770                 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1771                 data = next;
1772         } while (data);
1773         return 0;
1774 }
1775
1776 /*
1777  * dev->struct_sem locked.
1778  */
1779
1780 static int drm_bo_force_list_clean(drm_device_t * dev,
1781                                    struct list_head *head,
1782                                    unsigned mem_type,
1783                                    int force_no_move, int allow_errors)
1784 {
1785         drm_buffer_manager_t *bm = &dev->bm;
1786         struct list_head *list, *next, *prev;
1787         drm_buffer_object_t *entry;
1788         int ret;
1789         int clean;
1790
1791       retry:
1792         clean = 1;
1793         list_for_each_safe(list, next, head) {
1794                 prev = list->prev;
1795                 entry = list_entry(list, drm_buffer_object_t, lru);
1796                 atomic_inc(&entry->usage);
1797                 mutex_unlock(&dev->struct_mutex);
1798                 mutex_lock(&entry->mutex);
1799                 mutex_lock(&dev->struct_mutex);
1800
1801                 if (prev != list->prev || next != list->next) {
1802                         mutex_unlock(&entry->mutex);
1803                         drm_bo_usage_deref_locked(entry);
1804                         goto retry;
1805                 }
1806                 if (entry->mem.mm_node) {
1807                         clean = 0;
1808
1809                         /*
1810                          * Expire the fence.
1811                          */
1812
1813                         mutex_unlock(&dev->struct_mutex);
1814                         if (entry->fence && bm->nice_mode) {
1815                                 unsigned long _end = jiffies + 3 * DRM_HZ;
1816                                 do {
1817                                         ret = drm_bo_wait(entry, 0, 1, 0);
1818                                         if (ret && allow_errors) {
1819                                                 if (ret == -EINTR)
1820                                                         ret = -EAGAIN;
1821                                                 goto out_err;
1822                                         }
1823                                 } while (ret && !time_after_eq(jiffies, _end));
1824
1825                                 if (entry->fence) {
1826                                         bm->nice_mode = 0;
1827                                         DRM_ERROR("Detected GPU hang or "
1828                                                   "fence manager was taken down. "
1829                                                   "Evicting waiting buffers\n");
1830                                 }
1831                         }
1832                         if (entry->fence) {
1833                                 drm_fence_usage_deref_unlocked(dev,
1834                                                                entry->fence);
1835                                 entry->fence = NULL;
1836                         }
1837
1838                         DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED,
1839                                      0);
1840
1841                         if (force_no_move) {
1842                                 DRM_MASK_VAL(entry->mem.flags, DRM_BO_FLAG_NO_MOVE,
1843                                              0);
1844                         }
1845                         if (entry->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1846                                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1847                                           "cleanup. Removing flag and evicting.\n");
1848                                 entry->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1849                                 entry->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1850                         }
1851
1852                         ret = drm_bo_evict(entry, mem_type, 1, force_no_move);
1853                         if (ret) {
1854                                 if (allow_errors) {
1855                                         goto out_err;
1856                                 } else {
1857                                         DRM_ERROR("Aargh. Eviction failed.\n");
1858                                 }
1859                         }
1860                         mutex_lock(&dev->struct_mutex);
1861                 }
1862                 mutex_unlock(&entry->mutex);
1863                 drm_bo_usage_deref_locked(entry);
1864                 if (prev != list->prev || next != list->next) {
1865                         goto retry;
1866                 }
1867         }
1868         if (!clean)
1869                 goto retry;
1870         return 0;
1871       out_err:
1872         mutex_unlock(&entry->mutex);
1873         drm_bo_usage_deref_unlocked(entry);
1874         mutex_lock(&dev->struct_mutex);
1875         return ret;
1876 }
1877
1878 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1879 {
1880         drm_buffer_manager_t *bm = &dev->bm;
1881         drm_mem_type_manager_t *man = &bm->man[mem_type];
1882         drm_mem_type_manager_t *local_man = &bm->man[DRM_BO_MEM_LOCAL];
1883         int ret = -EINVAL;
1884
1885         if (mem_type >= DRM_BO_MEM_TYPES) {
1886                 DRM_ERROR("Illegal memory type %d\n", mem_type);
1887                 return ret;
1888         }
1889
1890         if (!man->has_type) {
1891                 DRM_ERROR("Trying to take down uninitialized "
1892                           "memory manager type\n");
1893                 return ret;
1894         }
1895         man->use_type = 0;
1896         man->has_type = 0;
1897
1898         ret = 0;
1899         if (mem_type > 0) {
1900
1901                 /*
1902                  * Throw out unfenced buffers.
1903                  */
1904
1905                 drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0);
1906
1907                 /*
1908                  * Throw out evicted no-move buffers.
1909                  */
1910
1911                 drm_bo_force_list_clean(dev, &local_man->pinned, mem_type, 1, 0);
1912                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0);
1913                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0);
1914
1915                 if (drm_mm_clean(&man->manager)) {
1916                         drm_mm_takedown(&man->manager);
1917                 } else {
1918                         ret = -EBUSY;
1919                 }
1920         }
1921
1922         return ret;
1923 }
1924
1925 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1926 {
1927         int ret;
1928         drm_buffer_manager_t *bm = &dev->bm;
1929         drm_mem_type_manager_t *man = &bm->man[mem_type];
1930
1931         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1932                 DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
1933                 return -EINVAL;
1934         }
1935
1936         ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
1937         if (ret)
1938                 return ret;
1939         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1);
1940         if (ret)
1941                 return ret;
1942         ret =
1943             drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1);
1944         return ret;
1945 }
1946
1947 static int drm_bo_init_mm(drm_device_t * dev,
1948                           unsigned type,
1949                           unsigned long p_offset, unsigned long p_size)
1950 {
1951         drm_buffer_manager_t *bm = &dev->bm;
1952         int ret = -EINVAL;
1953         drm_mem_type_manager_t *man;
1954
1955         if (type >= DRM_BO_MEM_TYPES) {
1956                 DRM_ERROR("Illegal memory type %d\n", type);
1957                 return ret;
1958         }
1959         
1960         man = &bm->man[type];
1961         if (man->has_type) {
1962                 DRM_ERROR("Memory manager already initialized for type %d\n",
1963                           type);
1964                 return ret;
1965         }
1966
1967         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
1968         if (ret) 
1969                 return ret;
1970
1971         ret = 0;
1972         if (type != DRM_BO_MEM_LOCAL) {
1973                 if (!p_size) {
1974                         DRM_ERROR("Zero size memory manager type %d\n", type);
1975                         return ret;
1976                 }
1977                 ret = drm_mm_init(&man->manager, p_offset, p_size);
1978                 if (ret)
1979                         return ret;
1980         }
1981         man->has_type = 1;
1982         man->use_type = 1;
1983
1984         INIT_LIST_HEAD(&man->lru);
1985         INIT_LIST_HEAD(&man->pinned);
1986
1987         return 0;
1988 }
1989
1990 /*
1991  * This is called from lastclose, so we don't need to bother about
1992  * any clients still running when we set the initialized flag to zero.
1993  */
1994
1995 int drm_bo_driver_finish(drm_device_t * dev)
1996 {
1997         drm_buffer_manager_t *bm = &dev->bm;
1998         int ret = 0;
1999         unsigned i = DRM_BO_MEM_TYPES;
2000         drm_mem_type_manager_t *man;
2001
2002         mutex_lock(&dev->bm.init_mutex);
2003         mutex_lock(&dev->struct_mutex);
2004
2005         if (!bm->initialized)
2006                 goto out;
2007         bm->initialized = 0;
2008
2009         while (i--) {
2010                 man = &bm->man[i];
2011                 if (man->has_type) {
2012                         man->use_type = 0;
2013                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2014                                 ret = -EBUSY;
2015                                 DRM_ERROR("DRM memory manager type %d "
2016                                           "is not clean.\n", i);
2017                         }
2018                         man->has_type = 0;
2019                 }
2020         }
2021         mutex_unlock(&dev->struct_mutex);
2022         if (!cancel_delayed_work(&bm->wq)) {
2023                 flush_scheduled_work();
2024         }
2025         mutex_lock(&dev->struct_mutex);
2026         drm_bo_delayed_delete(dev, 1);
2027         if (list_empty(&bm->ddestroy)) {
2028                 DRM_DEBUG("Delayed destroy list was clean\n");
2029         }
2030         if (list_empty(&bm->man[0].lru)) {
2031                 DRM_DEBUG("Swap list was clean\n");
2032         }
2033         if (list_empty(&bm->man[0].pinned)) {
2034                 DRM_DEBUG("NO_MOVE list was clean\n");
2035         }
2036         if (list_empty(&bm->unfenced)) {
2037                 DRM_DEBUG("Unfenced list was clean\n");
2038         }
2039       out:
2040         mutex_unlock(&dev->struct_mutex);
2041         mutex_unlock(&dev->bm.init_mutex);
2042         return ret;
2043 }
2044
2045 int drm_bo_driver_init(drm_device_t * dev)
2046 {
2047         drm_bo_driver_t *driver = dev->driver->bo_driver;
2048         drm_buffer_manager_t *bm = &dev->bm;
2049         int ret = -EINVAL;
2050
2051         mutex_lock(&dev->bm.init_mutex);
2052         mutex_lock(&dev->struct_mutex);
2053         if (!driver)
2054                 goto out_unlock;
2055
2056         /*
2057          * Initialize the system memory buffer type.
2058          * Other types need to be driver / IOCTL initialized.
2059          */
2060
2061         ret = drm_bo_init_mm(dev, 0, 0, 0);
2062         if (ret)
2063                 goto out_unlock;
2064
2065 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2066         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2067 #else
2068         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2069 #endif
2070         bm->initialized = 1;
2071         bm->nice_mode = 1;
2072         atomic_set(&bm->count, 0);
2073         bm->cur_pages = 0;
2074         INIT_LIST_HEAD(&bm->unfenced);
2075         INIT_LIST_HEAD(&bm->ddestroy);
2076       out_unlock:
2077         mutex_unlock(&dev->struct_mutex);
2078         mutex_unlock(&dev->bm.init_mutex);
2079         return ret;
2080 }
2081
2082 EXPORT_SYMBOL(drm_bo_driver_init);
2083
2084 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
2085 {
2086         DRM_DEVICE;
2087
2088         int ret = 0;
2089         drm_mm_init_arg_t arg;
2090         drm_buffer_manager_t *bm = &dev->bm;
2091         drm_bo_driver_t *driver = dev->driver->bo_driver;
2092
2093         if (!driver) {
2094                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2095                 return -EINVAL;
2096         }
2097
2098         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2099
2100         switch (arg.req.op) {
2101         case mm_init:
2102                 ret = -EINVAL;
2103                 mutex_lock(&dev->bm.init_mutex);
2104                 mutex_lock(&dev->struct_mutex);
2105                 if (!bm->initialized) {
2106                         DRM_ERROR("DRM memory manager was not initialized.\n");
2107                         break;
2108                 }
2109                 if (arg.req.mem_type == 0) {
2110                         DRM_ERROR
2111                             ("System memory buffers already initialized.\n");
2112                         break;
2113                 }
2114                 ret = drm_bo_init_mm(dev, arg.req.mem_type,
2115                                      arg.req.p_offset, arg.req.p_size);
2116                 break;
2117         case mm_takedown:
2118                 LOCK_TEST_WITH_RETURN(dev, filp);
2119                 mutex_lock(&dev->bm.init_mutex);
2120                 mutex_lock(&dev->struct_mutex);
2121                 ret = -EINVAL;
2122                 if (!bm->initialized) {
2123                         DRM_ERROR("DRM memory manager was not initialized\n");
2124                         break;
2125                 }
2126                 if (arg.req.mem_type == 0) {
2127                         DRM_ERROR("No takedown for System memory buffers.\n");
2128                         break;
2129                 }
2130                 ret = 0;
2131                 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
2132                         DRM_ERROR("Memory manager type %d not clean. "
2133                                   "Delaying takedown\n", arg.req.mem_type);
2134                 }
2135                 break;
2136         case mm_lock:
2137                 LOCK_TEST_WITH_RETURN(dev, filp);
2138                 mutex_lock(&dev->bm.init_mutex);
2139                 mutex_lock(&dev->struct_mutex);
2140                 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
2141                 break;
2142         case mm_unlock:
2143                 LOCK_TEST_WITH_RETURN(dev, filp);
2144                 mutex_lock(&dev->bm.init_mutex);
2145                 mutex_lock(&dev->struct_mutex);
2146                 ret = 0;
2147                 break;
2148         default:
2149                 DRM_ERROR("Function not implemented yet\n");
2150                 return -EINVAL;
2151         }
2152
2153         mutex_unlock(&dev->struct_mutex);
2154         mutex_unlock(&dev->bm.init_mutex);
2155         if (ret)
2156                 return ret;
2157
2158         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2159         return 0;
2160 }
2161
2162 /*
2163  * buffer object vm functions.
2164  */
2165
2166 /**
2167  * \c Get the PCI offset for the buffer object memory.
2168  *
2169  * \param bo The buffer object.
2170  * \param bus_base On return the base of the PCI region
2171  * \param bus_offset On return the byte offset into the PCI region
2172  * \param bus_size On return the byte size of the buffer object or zero if
2173  *     the buffer object memory is not accessible through a PCI region.
2174  * \return Failure indication.
2175  * 
2176  * Returns -EINVAL if the buffer object is currently not mappable.
2177  * Otherwise returns zero. Call bo->mutex locked.
2178  */
2179
2180 int drm_bo_pci_offset(const drm_buffer_object_t *bo,
2181                       unsigned long *bus_base,
2182                       unsigned long *bus_offset,
2183                       unsigned long *bus_size)
2184 {
2185         drm_device_t *dev = bo->dev;
2186         drm_buffer_manager_t *bm = &dev->bm;
2187         drm_mem_type_manager_t *man = &bm->man[bo->mem.mem_type]; 
2188
2189         *bus_size = 0;
2190         if (bo->type != drm_bo_type_dc)
2191                 return -EINVAL;
2192         
2193         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) 
2194                 return -EINVAL;
2195                 
2196         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2197                 drm_ttm_t *ttm = bo->ttm;
2198
2199                 if (!bo->ttm) {
2200                         return -EINVAL;
2201                 }
2202                   
2203                 drm_ttm_fixup_caching(ttm);
2204
2205                 if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED)) 
2206                         return 0;
2207                 if (ttm->be->flags & DRM_BE_FLAG_CMA)
2208                         return 0;
2209                 *bus_base = ttm->be->aperture_base;
2210         } else {
2211                 *bus_base = man->io_offset;
2212         }
2213
2214         *bus_offset = bo->mem.mm_node->start << PAGE_SHIFT;
2215         *bus_size = bo->mem.num_pages << PAGE_SHIFT;
2216         return 0;
2217 }
2218
2219 /**
2220  * \c Return a kernel virtual address to the buffer object PCI memory.
2221  *
2222  * \param bo The buffer object.
2223  * \return Failure indication.
2224  * 
2225  * Returns -EINVAL if the buffer object is currently not mappable.
2226  * Returns -ENOMEM if the ioremap operation failed.
2227  * Otherwise returns zero.
2228  * 
2229  * After a successfull call, bo->iomap contains the virtual address, or NULL
2230  * if the buffer object content is not accessible through PCI space. 
2231  * Call bo->mutex locked.
2232  */
2233
2234 int drm_bo_ioremap(drm_buffer_object_t *bo)
2235 {
2236         drm_device_t *dev = bo->dev;
2237         drm_buffer_manager_t *bm = &dev->bm;
2238         drm_mem_type_manager_t *man = &bm->man[bo->mem.mem_type]; 
2239         unsigned long bus_offset;
2240         unsigned long bus_size;
2241         unsigned long bus_base;
2242         int ret;
2243
2244         BUG_ON(bo->iomap);
2245
2246         ret = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size);
2247         if (ret || bus_size == 0) 
2248                 return ret;
2249
2250         if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
2251                 bo->iomap = (void *) (((u8 *)man->io_addr) + bus_offset);
2252         else {
2253                 bo->iomap = ioremap_nocache(bus_base + bus_offset, bus_size);
2254                 if (bo->iomap)
2255                         return -ENOMEM;
2256         }
2257         
2258         return 0;
2259 }
2260
2261 /**
2262  * \c Unmap mapping obtained using drm_bo_ioremap
2263  *
2264  * \param bo The buffer object.
2265  *
2266  * Call bo->mutex locked.
2267  */
2268
2269 void drm_bo_iounmap(drm_buffer_object_t *bo)
2270 {
2271         drm_device_t *dev = bo->dev;
2272         drm_buffer_manager_t *bm; 
2273         drm_mem_type_manager_t *man; 
2274
2275
2276         bm = &dev->bm;
2277         man = &bm->man[bo->mem.mem_type];
2278         
2279         if (bo->iomap && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) 
2280                 iounmap(bo->iomap);
2281         
2282         bo->iomap = NULL;
2283 }
2284
2285 /**
2286  * \c Kill all user-space virtual mappings of this buffer object.
2287  *
2288  * \param bo The buffer object.
2289  *
2290  * Call bo->mutex locked.
2291  */
2292
2293 void drm_bo_unmap_virtual(drm_buffer_object_t *bo)
2294 {
2295         drm_device_t *dev = bo->dev;
2296         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2297         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2298
2299         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2300 }
2301
2302 static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo)
2303 {
2304         drm_map_list_t *list = &bo->map_list;
2305         drm_local_map_t *map;
2306         drm_device_t *dev = bo->dev;
2307         
2308         if (list->user_token) {
2309                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2310                 list->user_token = 0;
2311         }
2312         if (list->file_offset_node) {
2313                 drm_mm_put_block(list->file_offset_node);
2314                 list->file_offset_node = NULL;
2315         }
2316
2317         map = list->map;
2318         if (!map)
2319                 return;
2320
2321         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2322         list->map = NULL;
2323         list->user_token = 0ULL;
2324         drm_bo_usage_deref_locked(bo);
2325 }
2326
2327 static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo)
2328 {
2329         drm_map_list_t *list = &bo->map_list;
2330         drm_local_map_t *map;
2331         drm_device_t *dev = bo->dev;
2332         
2333         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2334         if (!list->map)
2335                 return -ENOMEM;
2336
2337         map = list->map;
2338         map->offset = 0;
2339         map->type = _DRM_TTM;
2340         map->flags = _DRM_REMOVABLE;
2341         map->size = bo->mem.num_pages * PAGE_SIZE;
2342         atomic_inc(&bo->usage);
2343         map->handle = (void *) bo;
2344         
2345         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2346                                                     bo->mem.num_pages, 0, 0);
2347
2348         if (!list->file_offset_node) {
2349                 drm_bo_takedown_vm_locked(bo);
2350                 return -ENOMEM;
2351         }
2352
2353         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2354                                                   bo->mem.num_pages, 0);
2355
2356         list->hash.key = list->file_offset_node->start;
2357         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2358                 drm_bo_takedown_vm_locked(bo);
2359                 return -ENOMEM;
2360         }
2361                 
2362         list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
2363
2364         return 0;
2365 }