ed089096db55d42b87b90241ff8e4ad242f020c2
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  * 
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  * 
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  * 
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  * 
39  * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
40  * heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44  * both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47  * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48  * traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(drm_buffer_object_t * bo);
53 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo);
54 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo);
55 static void drm_bo_unmap_virtual(drm_buffer_object_t * bo);
56
57 static inline uint32_t drm_bo_type_flags(unsigned type)
58 {
59         return (1 << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
67 {
68         drm_mem_type_manager_t *man;
69
70         man = &bo->dev->bm.man[bo->pinned_mem_type];
71         list_add_tail(&bo->pinned_lru, &man->pinned);
72 }
73
74 void drm_bo_add_to_lru(drm_buffer_object_t * bo)
75 {
76         drm_mem_type_manager_t *man;
77
78         if (bo->mem.mm_node != bo->pinned_node) {
79                 man = &bo->dev->bm.man[bo->mem.mem_type];
80                 list_add_tail(&bo->lru, &man->lru);
81         } else
82                 INIT_LIST_HEAD(&bo->lru);
83 }
84
85 static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
86 {
87 #ifdef DRM_ODD_MM_COMPAT
88         int ret;
89
90         ret = drm_bo_lock_kmm(bo);
91         if (ret)
92                 return ret;
93         drm_bo_unmap_virtual(bo);
94         if (old_is_pci)
95                 drm_bo_finish_unmap(bo);
96 #else
97         drm_bo_unmap_virtual(bo);
98 #endif
99         return 0;
100 }
101
102 static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
103 {
104 #ifdef DRM_ODD_MM_COMPAT
105         int ret;
106
107         ret = drm_bo_remap_bound(bo);
108         if (ret) {
109                 DRM_ERROR("Failed to remap a bound buffer object.\n"
110                           "\tThis might cause a sigbus later.\n");
111         }
112         drm_bo_unlock_kmm(bo);
113 #endif
114 }
115
116 /*
117  * Call bo->mutex locked.
118  */
119
120 static int drm_bo_add_ttm(drm_buffer_object_t * bo)
121 {
122         drm_device_t *dev = bo->dev;
123         int ret = 0;
124         bo->ttm = NULL;
125
126         switch (bo->type) {
127         case drm_bo_type_dc:
128                 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
129                 if (!bo->ttm)
130                         ret = -ENOMEM;
131                 break;
132         case drm_bo_type_user:
133         case drm_bo_type_fake:
134                 break;
135         default:
136                 DRM_ERROR("Illegal buffer object type\n");
137                 ret = -EINVAL;
138                 break;
139         }
140
141         return ret;
142 }
143
144 static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
145                                   drm_bo_mem_reg_t * mem,
146                                   int evict, int no_wait)
147 {
148         drm_device_t *dev = bo->dev;
149         drm_buffer_manager_t *bm = &dev->bm;
150         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
151         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
152         drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
153         drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
154         int ret = 0;
155
156         if (old_is_pci || new_is_pci)
157                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
158         if (ret)
159                 return ret;
160
161         /*
162          * Create and bind a ttm if required.
163          */
164
165         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
166                 ret = drm_bo_add_ttm(bo);
167                 if (ret)
168                         goto out_err;
169
170                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
171                         ret = drm_bind_ttm(bo->ttm, new_man->flags &
172                                            DRM_BO_FLAG_CACHED,
173                                            mem->mm_node->start);
174                         if (ret)
175                                 goto out_err;
176                 }
177         }
178
179         if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
180
181                 drm_bo_mem_reg_t *old_mem = &bo->mem;
182                 uint32_t save_flags = old_mem->flags;
183                 uint32_t save_mask = old_mem->mask;
184
185                 *old_mem = *mem;
186                 mem->mm_node = NULL;
187                 old_mem->mask = save_mask;
188                 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
189
190         } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
191                    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
192
193                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
194
195         } else if (dev->driver->bo_driver->move) {
196                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
197
198         } else {
199
200                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
201
202         }
203
204         if (ret)
205                 goto out_err;
206
207         if (old_is_pci || new_is_pci)
208                 drm_bo_vm_post_move(bo);
209
210         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
211                 ret =
212                     dev->driver->bo_driver->invalidate_caches(dev,
213                                                               bo->mem.flags);
214                 if (ret)
215                         DRM_ERROR("Can not flush read caches\n");
216         }
217
218         DRM_FLAG_MASKED(bo->priv_flags,
219                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
220                         _DRM_BO_FLAG_EVICTED);
221
222         if (bo->mem.mm_node)
223                 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
224
225         return 0;
226
227       out_err:
228         if (old_is_pci || new_is_pci)
229                 drm_bo_vm_post_move(bo);
230
231         new_man = &bm->man[bo->mem.mem_type];
232         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
233                 drm_ttm_unbind(bo->ttm);
234                 drm_destroy_ttm(bo->ttm);
235                 bo->ttm = NULL;
236         }
237
238         return ret;
239 }
240
241 /*
242  * Call bo->mutex locked.
243  * Wait until the buffer is idle.
244  */
245
246 int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
247                 int no_wait)
248 {
249
250         drm_fence_object_t *fence = bo->fence;
251         int ret;
252
253         if (fence) {
254                 drm_device_t *dev = bo->dev;
255                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
256                         drm_fence_usage_deref_unlocked(dev, fence);
257                         bo->fence = NULL;
258                         return 0;
259                 }
260                 if (no_wait) {
261                         return -EBUSY;
262                 }
263                 ret =
264                     drm_fence_object_wait(dev, fence, lazy, ignore_signals,
265                                           bo->fence_type);
266                 if (ret)
267                         return ret;
268
269                 drm_fence_usage_deref_unlocked(dev, fence);
270                 bo->fence = NULL;
271
272         }
273         return 0;
274 }
275
276 static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
277 {
278         drm_device_t *dev = bo->dev;
279         drm_buffer_manager_t *bm = &dev->bm;
280
281         if (bo->fence) {
282                 if (bm->nice_mode) {
283                         unsigned long _end = jiffies + 3 * DRM_HZ;
284                         int ret;
285                         do {
286                                 ret = drm_bo_wait(bo, 0, 1, 0);
287                                 if (ret && allow_errors)
288                                         return ret;
289
290                         } while (ret && !time_after_eq(jiffies, _end));
291
292                         if (bo->fence) {
293                                 bm->nice_mode = 0;
294                                 DRM_ERROR("Detected GPU lockup or "
295                                           "fence driver was taken down. "
296                                           "Evicting buffer.\n");
297                         }
298                 }
299                 if (bo->fence) {
300                         drm_fence_usage_deref_unlocked(dev, bo->fence);
301                         bo->fence = NULL;
302                 }
303         }
304         return 0;
305 }
306
307 /*
308  * Call dev->struct_mutex locked.
309  * Attempts to remove all private references to a buffer by expiring its
310  * fence object and removing from lru lists and memory managers.
311  */
312
313 static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
314 {
315         drm_device_t *dev = bo->dev;
316         drm_buffer_manager_t *bm = &dev->bm;
317
318         atomic_inc(&bo->usage);
319         mutex_unlock(&dev->struct_mutex);
320         mutex_lock(&bo->mutex);
321
322         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
323
324         if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) {
325                 drm_fence_usage_deref_locked(dev, bo->fence);
326                 bo->fence = NULL;
327         }
328
329         if (bo->fence && remove_all)
330                 (void)drm_bo_expire_fence(bo, 0);
331
332         mutex_lock(&dev->struct_mutex);
333
334         if (!atomic_dec_and_test(&bo->usage)) {
335                 goto out;
336         }
337
338         if (!bo->fence) {
339                 list_del_init(&bo->lru);
340                 if (bo->mem.mm_node) {
341                         drm_mm_put_block(bo->mem.mm_node);
342                         bo->mem.mm_node = NULL;
343                 }
344                 list_del_init(&bo->pinned_lru);
345                 if (bo->pinned_node) {
346                         drm_mm_put_block(bo->pinned_node);
347                         bo->pinned_node = NULL;
348                 }
349                 list_del_init(&bo->ddestroy);
350                 mutex_unlock(&bo->mutex);
351                 drm_bo_destroy_locked(bo);
352                 return;
353         }
354
355         if (list_empty(&bo->ddestroy)) {
356                 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
357                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
358                 schedule_delayed_work(&bm->wq,
359                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
360         }
361
362       out:
363         mutex_unlock(&bo->mutex);
364         return;
365 }
366
367 /*
368  * Verify that refcount is 0 and that there are no internal references
369  * to the buffer object. Then destroy it.
370  */
371
372 static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
373 {
374         drm_device_t *dev = bo->dev;
375         drm_buffer_manager_t *bm = &dev->bm;
376
377         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
378             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
379             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
380                 BUG_ON(bo->fence != NULL);
381
382 #ifdef DRM_ODD_MM_COMPAT
383                 BUG_ON(!list_empty(&bo->vma_list));
384                 BUG_ON(!list_empty(&bo->p_mm_list));
385 #endif
386
387                 if (bo->ttm) {
388                         drm_ttm_unbind(bo->ttm);
389                         drm_destroy_ttm(bo->ttm);
390                         bo->ttm = NULL;
391                 }
392
393                 atomic_dec(&bm->count);
394
395                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
396
397                 return;
398         }
399
400         /*
401          * Some stuff is still trying to reference the buffer object.
402          * Get rid of those references.
403          */
404
405         drm_bo_cleanup_refs(bo, 0);
406
407         return;
408 }
409
410 /*
411  * Call dev->struct_mutex locked.
412  */
413
414 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
415 {
416         drm_buffer_manager_t *bm = &dev->bm;
417
418         drm_buffer_object_t *entry, *nentry;
419         struct list_head *list, *next;
420
421         list_for_each_safe(list, next, &bm->ddestroy) {
422                 entry = list_entry(list, drm_buffer_object_t, ddestroy);
423
424                 nentry = NULL;
425                 if (next != &bm->ddestroy) {
426                         nentry = list_entry(next, drm_buffer_object_t,
427                                             ddestroy);
428                         atomic_inc(&nentry->usage);
429                 }
430
431                 drm_bo_cleanup_refs(entry, remove_all);
432
433                 if (nentry) {
434                         atomic_dec(&nentry->usage);
435                 }
436         }
437 }
438
439 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
440 static void drm_bo_delayed_workqueue(void *data)
441 #else
442 static void drm_bo_delayed_workqueue(struct work_struct *work)
443 #endif
444 {
445 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
446         drm_device_t *dev = (drm_device_t *) data;
447         drm_buffer_manager_t *bm = &dev->bm;
448 #else
449         drm_buffer_manager_t *bm =
450             container_of(work, drm_buffer_manager_t, wq.work);
451         drm_device_t *dev = container_of(bm, drm_device_t, bm);
452 #endif
453
454         DRM_DEBUG("Delayed delete Worker\n");
455
456         mutex_lock(&dev->struct_mutex);
457         if (!bm->initialized) {
458                 mutex_unlock(&dev->struct_mutex);
459                 return;
460         }
461         drm_bo_delayed_delete(dev, 0);
462         if (bm->initialized && !list_empty(&bm->ddestroy)) {
463                 schedule_delayed_work(&bm->wq,
464                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
465         }
466         mutex_unlock(&dev->struct_mutex);
467 }
468
469 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
470 {
471         if (atomic_dec_and_test(&bo->usage)) {
472                 drm_bo_destroy_locked(bo);
473         }
474 }
475
476 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
477 {
478         drm_buffer_object_t *bo =
479             drm_user_object_entry(uo, drm_buffer_object_t, base);
480
481         drm_bo_takedown_vm_locked(bo);
482         drm_bo_usage_deref_locked(bo);
483 }
484
485 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
486 {
487         drm_device_t *dev = bo->dev;
488
489         if (atomic_dec_and_test(&bo->usage)) {
490                 mutex_lock(&dev->struct_mutex);
491                 if (atomic_read(&bo->usage) == 0)
492                         drm_bo_destroy_locked(bo);
493                 mutex_unlock(&dev->struct_mutex);
494         }
495 }
496
497 /*
498  * Note. The caller has to register (if applicable) 
499  * and deregister fence object usage.
500  */
501
502 int drm_fence_buffer_objects(drm_file_t * priv,
503                              struct list_head *list,
504                              uint32_t fence_flags,
505                              drm_fence_object_t * fence,
506                              drm_fence_object_t ** used_fence)
507 {
508         drm_device_t *dev = priv->head->dev;
509         drm_buffer_manager_t *bm = &dev->bm;
510
511         drm_buffer_object_t *entry;
512         uint32_t fence_type = 0;
513         int count = 0;
514         int ret = 0;
515         struct list_head *l;
516         LIST_HEAD(f_list);
517
518         mutex_lock(&dev->struct_mutex);
519
520         if (!list)
521                 list = &bm->unfenced;
522
523         list_for_each_entry(entry, list, lru) {
524                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
525                 fence_type |= entry->fence_type;
526                 if (entry->fence_class != 0) {
527                         DRM_ERROR("Fence class %d is not implemented yet.\n",
528                                   entry->fence_class);
529                         ret = -EINVAL;
530                         goto out;
531                 }
532                 count++;
533         }
534
535         if (!count) {
536                 ret = -EINVAL;
537                 goto out;
538         }
539
540         /*
541          * Transfer to a local list before we release the dev->struct_mutex;
542          * This is so we don't get any new unfenced objects while fencing 
543          * the ones we already have..
544          */
545
546         list_splice_init(list, &f_list);
547
548         if (fence) {
549                 if ((fence_type & fence->type) != fence_type) {
550                         DRM_ERROR("Given fence doesn't match buffers "
551                                   "on unfenced list.\n");
552                         ret = -EINVAL;
553                         goto out;
554                 }
555         } else {
556                 mutex_unlock(&dev->struct_mutex);
557                 ret = drm_fence_object_create(dev, fence_type,
558                                               fence_flags | DRM_FENCE_FLAG_EMIT,
559                                               &fence);
560                 mutex_lock(&dev->struct_mutex);
561                 if (ret)
562                         goto out;
563         }
564
565         count = 0;
566         l = f_list.next;
567         while (l != &f_list) {
568                 entry = list_entry(l, drm_buffer_object_t, lru);
569                 atomic_inc(&entry->usage);
570                 mutex_unlock(&dev->struct_mutex);
571                 mutex_lock(&entry->mutex);
572                 mutex_lock(&dev->struct_mutex);
573                 list_del_init(l);
574                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
575                         count++;
576                         if (entry->fence)
577                                 drm_fence_usage_deref_locked(dev, entry->fence);
578                         entry->fence = fence;
579                         DRM_FLAG_MASKED(entry->priv_flags, 0,
580                                         _DRM_BO_FLAG_UNFENCED);
581                         DRM_WAKEUP(&entry->event_queue);
582                         drm_bo_add_to_lru(entry);
583                 }
584                 mutex_unlock(&entry->mutex);
585                 drm_bo_usage_deref_locked(entry);
586                 l = f_list.next;
587         }
588         atomic_add(count, &fence->usage);
589         DRM_DEBUG("Fenced %d buffers\n", count);
590       out:
591         mutex_unlock(&dev->struct_mutex);
592         *used_fence = fence;
593         return ret;
594 }
595
596 EXPORT_SYMBOL(drm_fence_buffer_objects);
597
598 /*
599  * bo->mutex locked 
600  */
601
602 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
603                         int no_wait)
604 {
605         int ret = 0;
606         drm_device_t *dev = bo->dev;
607         drm_bo_mem_reg_t evict_mem;
608
609         /*
610          * Someone might have modified the buffer before we took the buffer mutex.
611          */
612
613         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
614                 goto out;
615         if (bo->mem.mem_type != mem_type)
616                 goto out;
617
618         ret = drm_bo_wait(bo, 0, 0, no_wait);
619
620         if (ret && ret != -EAGAIN) {
621                 DRM_ERROR("Failed to expire fence before "
622                           "buffer eviction.\n");
623                 goto out;
624         }
625
626         evict_mem = bo->mem;
627         evict_mem.mm_node = NULL;
628
629         if (bo->type == drm_bo_type_fake) {
630                 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
631                 bo->mem.mm_node = NULL;
632                 bo->pinned_mem_type = DRM_BO_MEM_LOCAL;
633                 bo->pinned_node = NULL;
634                 goto out1;
635         }
636
637         evict_mem = bo->mem;
638         evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type);
639         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
640
641         if (ret) {
642                 if (ret != -EAGAIN)
643                         DRM_ERROR("Failed to find memory space for "
644                                   "buffer eviction.\n");
645                 goto out;
646         }
647
648         if (bo->pinned_node)
649                 DRM_ERROR("Evicting pinned buffer\n");
650
651         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
652
653         if (ret) {
654                 if (ret != -EAGAIN)
655                         DRM_ERROR("Buffer eviction failed\n");
656                 goto out;
657         }
658
659       out1:
660         mutex_lock(&dev->struct_mutex);
661         if (evict_mem.mm_node) {
662                 drm_mm_put_block(evict_mem.mm_node);
663                 evict_mem.mm_node = NULL;
664         }
665         list_del(&bo->lru);
666         drm_bo_add_to_lru(bo);
667         mutex_unlock(&dev->struct_mutex);
668
669         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
670                         _DRM_BO_FLAG_EVICTED);
671
672       out:
673         return ret;
674 }
675
676 static int drm_bo_mem_force_space(drm_device_t * dev,
677                                   drm_bo_mem_reg_t * mem,
678                                   uint32_t mem_type, int no_wait)
679 {
680         drm_mm_node_t *node;
681         drm_buffer_manager_t *bm = &dev->bm;
682         drm_buffer_object_t *entry;
683         drm_mem_type_manager_t *man = &bm->man[mem_type];
684         struct list_head *lru;
685         unsigned long num_pages = mem->num_pages;
686         int ret;
687
688         mutex_lock(&dev->struct_mutex);
689         do {
690                 node = drm_mm_search_free(&man->manager, num_pages,
691                                           mem->page_alignment, 1);
692                 if (node)
693                         break;
694
695                 lru = &man->lru;
696                 if (lru->next == lru)
697                         break;
698
699                 entry = list_entry(lru->next, drm_buffer_object_t, lru);
700                 atomic_inc(&entry->usage);
701                 mutex_unlock(&dev->struct_mutex);
702                 mutex_lock(&entry->mutex);
703                 BUG_ON(entry->mem.
704                        flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
705
706                 ret = drm_bo_evict(entry, mem_type, no_wait);
707                 mutex_unlock(&entry->mutex);
708                 drm_bo_usage_deref_unlocked(entry);
709                 if (ret)
710                         return ret;
711                 mutex_lock(&dev->struct_mutex);
712         } while (1);
713
714         if (!node) {
715                 mutex_unlock(&dev->struct_mutex);
716                 return -ENOMEM;
717         }
718
719         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
720         mutex_unlock(&dev->struct_mutex);
721         mem->mm_node = node;
722         mem->mem_type = mem_type;
723         return 0;
724 }
725
726 static int drm_bo_mt_compatible(drm_mem_type_manager_t * man,
727                                 uint32_t mem_type,
728                                 uint32_t mask, uint32_t * res_mask)
729 {
730         uint32_t cur_flags = drm_bo_type_flags(mem_type);
731         uint32_t flag_diff;
732
733         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
734                 cur_flags |= DRM_BO_FLAG_CACHED;
735         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
736                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
737         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
738                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
739
740         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) {
741                 return 0;
742         }
743         flag_diff = (mask ^ cur_flags);
744         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
745             (mask & DRM_BO_FLAG_FORCE_CACHING)) {
746                 return 0;
747         }
748         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
749             (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) {
750                 return 0;
751         }
752
753         *res_mask = cur_flags;
754         return 1;
755 }
756
757 int drm_bo_mem_space(drm_buffer_object_t * bo,
758                      drm_bo_mem_reg_t * mem, int no_wait)
759 {
760         drm_device_t *dev = bo->dev;
761         drm_buffer_manager_t *bm = &dev->bm;
762         drm_mem_type_manager_t *man;
763
764         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
765         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
766         uint32_t i;
767         uint32_t mem_type = DRM_BO_MEM_LOCAL;
768         uint32_t cur_flags;
769         int type_found = 0;
770         int type_ok = 0;
771         int has_eagain = 0;
772         drm_mm_node_t *node = NULL;
773         int ret;
774
775         for (i = 0; i < num_prios; ++i) {
776                 mem_type = prios[i];
777                 man = &bm->man[mem_type];
778
779                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
780                                                &cur_flags);
781
782                 if (!type_ok)
783                         continue;
784
785                 if (mem_type == DRM_BO_MEM_LOCAL)
786                         break;
787
788                 if ((mem_type == bo->pinned_mem_type) &&
789                     (bo->pinned_node != NULL)) {
790                         DRM_ERROR("Choosing pinned region\n");
791                         node = bo->pinned_node;
792                         break;
793                 }
794
795                 mutex_lock(&dev->struct_mutex);
796                 if (man->has_type && man->use_type) {
797                         type_found = 1;
798                         node = drm_mm_search_free(&man->manager, mem->num_pages,
799                                                   mem->page_alignment, 1);
800                         if (node)
801                                 node = drm_mm_get_block(node, mem->num_pages,
802                                                         mem->page_alignment);
803                 }
804                 mutex_unlock(&dev->struct_mutex);
805                 if (node)
806                         break;
807         }
808
809         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
810                 mem->mm_node = node;
811                 mem->mem_type = mem_type;
812                 mem->flags = cur_flags;
813                 return 0;
814         }
815
816         if (!type_found)
817                 return -EINVAL;
818
819         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
820         prios = dev->driver->bo_driver->mem_busy_prio;
821
822         for (i = 0; i < num_prios; ++i) {
823                 mem_type = prios[i];
824                 man = &bm->man[mem_type];
825
826                 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
827                         continue;
828
829                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
830
831                 if (ret == 0) {
832                         mem->flags = cur_flags;
833                         return 0;
834                 }
835
836                 if (ret == -EAGAIN)
837                         has_eagain = 1;
838         }
839
840         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
841         return ret;
842 }
843
844 EXPORT_SYMBOL(drm_bo_mem_space);
845
846 static int drm_bo_new_mask(drm_buffer_object_t * bo,
847                            uint32_t new_mask, uint32_t hint)
848 {
849         uint32_t new_props;
850
851         if (bo->type == drm_bo_type_user) {
852                 DRM_ERROR("User buffers are not supported yet\n");
853                 return -EINVAL;
854         }
855         if (bo->type == drm_bo_type_fake &&
856             !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
857                 DRM_ERROR("Fake buffers must be pinned.\n");
858                 return -EINVAL;
859         }
860
861         if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
862                 DRM_ERROR
863                     ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
864                      "processes\n");
865                 return -EPERM;
866         }
867
868         new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
869                                 DRM_BO_FLAG_READ);
870
871         if (!new_props) {
872                 DRM_ERROR("Invalid buffer object rwx properties\n");
873                 return -EINVAL;
874         }
875
876         bo->mem.mask = new_mask;
877         return 0;
878 }
879
880 /*
881  * Call dev->struct_mutex locked.
882  */
883
884 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
885                                               uint32_t handle, int check_owner)
886 {
887         drm_user_object_t *uo;
888         drm_buffer_object_t *bo;
889
890         uo = drm_lookup_user_object(priv, handle);
891
892         if (!uo || (uo->type != drm_buffer_type)) {
893                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
894                 return NULL;
895         }
896
897         if (check_owner && priv != uo->owner) {
898                 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
899                         return NULL;
900         }
901
902         bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
903         atomic_inc(&bo->usage);
904         return bo;
905 }
906
907 /*
908  * Call bo->mutex locked.
909  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
910  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
911  */
912
913 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
914 {
915         drm_fence_object_t *fence = bo->fence;
916
917         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
918         if (fence) {
919                 drm_device_t *dev = bo->dev;
920                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
921                         drm_fence_usage_deref_unlocked(dev, fence);
922                         bo->fence = NULL;
923                         return 0;
924                 }
925                 return 1;
926         }
927         return 0;
928 }
929
930 /*
931  * Call bo->mutex locked.
932  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
933  */
934
935 static int drm_bo_busy(drm_buffer_object_t * bo)
936 {
937         drm_fence_object_t *fence = bo->fence;
938
939         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
940         if (fence) {
941                 drm_device_t *dev = bo->dev;
942                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
943                         drm_fence_usage_deref_unlocked(dev, fence);
944                         bo->fence = NULL;
945                         return 0;
946                 }
947                 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
948                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
949                         drm_fence_usage_deref_unlocked(dev, fence);
950                         bo->fence = NULL;
951                         return 0;
952                 }
953                 return 1;
954         }
955         return 0;
956 }
957
958 static int drm_bo_read_cached(drm_buffer_object_t * bo)
959 {
960         int ret = 0;
961
962         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
963         if (bo->mem.mm_node)
964                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
965         return ret;
966 }
967
968 /*
969  * Wait until a buffer is unmapped.
970  */
971
972 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
973 {
974         int ret = 0;
975
976         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
977                 return -EBUSY;
978
979         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
980                     atomic_read(&bo->mapped) == -1);
981
982         if (ret == -EINTR)
983                 ret = -EAGAIN;
984
985         return ret;
986 }
987
988 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
989 {
990         int ret;
991
992         mutex_lock(&bo->mutex);
993         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
994         mutex_unlock(&bo->mutex);
995         return ret;
996 }
997
998 /*
999  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1000  * Until then, we cannot really do anything with it except delete it.
1001  * The unfenced list is a PITA, and the operations
1002  * 1) validating
1003  * 2) submitting commands
1004  * 3) fencing
1005  * Should really be an atomic operation. 
1006  * We now "solve" this problem by keeping
1007  * the buffer "unfenced" after validating, but before fencing.
1008  */
1009
1010 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
1011                                 int eagain_if_wait)
1012 {
1013         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1014         unsigned long _end = jiffies + 3 * DRM_HZ;
1015
1016         if (ret && no_wait)
1017                 return -EBUSY;
1018         else if (!ret)
1019                 return 0;
1020
1021         do {
1022                 mutex_unlock(&bo->mutex);
1023                 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1024                             !drm_bo_check_unfenced(bo));
1025                 mutex_lock(&bo->mutex);
1026                 if (ret == -EINTR)
1027                         return -EAGAIN;
1028                 if (ret) {
1029                         DRM_ERROR
1030                             ("Error waiting for buffer to become fenced\n");
1031                         return ret;
1032                 }
1033                 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1034         } while (ret && !time_after_eq(jiffies, _end));
1035         if (ret) {
1036                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1037                 return ret;
1038         }
1039         if (eagain_if_wait)
1040                 return -EAGAIN;
1041
1042         return 0;
1043 }
1044
1045 /*
1046  * Fill in the ioctl reply argument with buffer info.
1047  * Bo locked. 
1048  */
1049
1050 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
1051                                 drm_bo_arg_reply_t * rep)
1052 {
1053         rep->handle = bo->base.hash.key;
1054         rep->flags = bo->mem.flags;
1055         rep->size = bo->mem.num_pages * PAGE_SIZE;
1056         rep->offset = bo->offset;
1057         rep->arg_handle = bo->map_list.user_token;
1058         rep->mask = bo->mem.mask;
1059         rep->buffer_start = bo->buffer_start;
1060         rep->fence_flags = bo->fence_type;
1061         rep->rep_flags = 0;
1062         rep->page_alignment = bo->mem.page_alignment;
1063
1064         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1065                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1066                                 DRM_BO_REP_BUSY);
1067         }
1068 }
1069
1070 /*
1071  * Wait for buffer idle and register that we've mapped the buffer.
1072  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1, 
1073  * so that if the client dies, the mapping is automatically 
1074  * unregistered.
1075  */
1076
1077 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1078                                  uint32_t map_flags, unsigned hint,
1079                                  drm_bo_arg_reply_t * rep)
1080 {
1081         drm_buffer_object_t *bo;
1082         drm_device_t *dev = priv->head->dev;
1083         int ret = 0;
1084         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1085
1086         mutex_lock(&dev->struct_mutex);
1087         bo = drm_lookup_buffer_object(priv, handle, 1);
1088         mutex_unlock(&dev->struct_mutex);
1089
1090         if (!bo)
1091                 return -EINVAL;
1092
1093         mutex_lock(&bo->mutex);
1094         if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1095                 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1096                 if (ret)
1097                         goto out;
1098         }
1099
1100         /*
1101          * If this returns true, we are currently unmapped.
1102          * We need to do this test, because unmapping can
1103          * be done without the bo->mutex held.
1104          */
1105
1106         while (1) {
1107                 if (atomic_inc_and_test(&bo->mapped)) {
1108                         if (no_wait && drm_bo_busy(bo)) {
1109                                 atomic_dec(&bo->mapped);
1110                                 ret = -EBUSY;
1111                                 goto out;
1112                         }
1113                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1114                         if (ret) {
1115                                 atomic_dec(&bo->mapped);
1116                                 goto out;
1117                         }
1118
1119                         if ((map_flags & DRM_BO_FLAG_READ) &&
1120                             (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1121                             (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1122                                 drm_bo_read_cached(bo);
1123                         }
1124                         break;
1125                 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1126                            (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1127                            (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1128
1129                         /*
1130                          * We are already mapped with different flags.
1131                          * need to wait for unmap.
1132                          */
1133
1134                         ret = drm_bo_wait_unmapped(bo, no_wait);
1135                         if (ret)
1136                                 goto out;
1137
1138                         continue;
1139                 }
1140                 break;
1141         }
1142
1143         mutex_lock(&dev->struct_mutex);
1144         ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1145         mutex_unlock(&dev->struct_mutex);
1146         if (ret) {
1147                 if (atomic_add_negative(-1, &bo->mapped))
1148                         DRM_WAKEUP(&bo->event_queue);
1149
1150         } else
1151                 drm_bo_fill_rep_arg(bo, rep);
1152       out:
1153         mutex_unlock(&bo->mutex);
1154         drm_bo_usage_deref_unlocked(bo);
1155         return ret;
1156 }
1157
1158 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1159 {
1160         drm_device_t *dev = priv->head->dev;
1161         drm_buffer_object_t *bo;
1162         drm_ref_object_t *ro;
1163         int ret = 0;
1164
1165         mutex_lock(&dev->struct_mutex);
1166
1167         bo = drm_lookup_buffer_object(priv, handle, 1);
1168         if (!bo) {
1169                 ret = -EINVAL;
1170                 goto out;
1171         }
1172
1173         ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1174         if (!ro) {
1175                 ret = -EINVAL;
1176                 goto out;
1177         }
1178
1179         drm_remove_ref_object(priv, ro);
1180         drm_bo_usage_deref_locked(bo);
1181       out:
1182         mutex_unlock(&dev->struct_mutex);
1183         return ret;
1184 }
1185
1186 /*
1187  * Call struct-sem locked.
1188  */
1189
1190 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1191                                          drm_user_object_t * uo,
1192                                          drm_ref_t action)
1193 {
1194         drm_buffer_object_t *bo =
1195             drm_user_object_entry(uo, drm_buffer_object_t, base);
1196
1197         /*
1198          * We DON'T want to take the bo->lock here, because we want to
1199          * hold it when we wait for unmapped buffer.
1200          */
1201
1202         BUG_ON(action != _DRM_REF_TYPE1);
1203
1204         if (atomic_add_negative(-1, &bo->mapped))
1205                 DRM_WAKEUP(&bo->event_queue);
1206 }
1207
1208 /*
1209  * bo->mutex locked. 
1210  * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1211  */
1212
1213 int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1214                        int no_wait, int move_unfenced)
1215 {
1216         drm_device_t *dev = bo->dev;
1217         drm_buffer_manager_t *bm = &dev->bm;
1218         int ret = 0;
1219         drm_bo_mem_reg_t mem;
1220         /*
1221          * Flush outstanding fences.
1222          */
1223
1224         drm_bo_busy(bo);
1225
1226         /*
1227          * Wait for outstanding fences.
1228          */
1229
1230         ret = drm_bo_wait(bo, 0, 0, no_wait);
1231         if (ret)
1232                 return ret;
1233
1234         mem.num_pages = bo->mem.num_pages;
1235         mem.size = mem.num_pages << PAGE_SHIFT;
1236         mem.mask = new_mem_flags;
1237         mem.page_alignment = bo->mem.page_alignment;
1238
1239         mutex_lock(&bm->evict_mutex);
1240         mutex_lock(&dev->struct_mutex);
1241         list_del(&bo->lru);
1242         list_add_tail(&bo->lru, &bm->unfenced);
1243         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1244                         _DRM_BO_FLAG_UNFENCED);
1245         mutex_unlock(&dev->struct_mutex);
1246
1247         /*
1248          * Determine where to move the buffer.
1249          */
1250         ret = drm_bo_mem_space(bo, &mem, no_wait);
1251
1252         if (ret)
1253                 goto out_unlock;
1254
1255         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1256
1257       out_unlock:
1258         if (ret || !move_unfenced) {
1259                 mutex_lock(&dev->struct_mutex);
1260                 if (mem.mm_node) {
1261                         drm_mm_put_block(mem.mm_node);
1262                         mem.mm_node = NULL;
1263                 }
1264                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1265                 DRM_WAKEUP(&bo->event_queue);
1266                 list_del(&bo->lru);
1267                 drm_bo_add_to_lru(bo);
1268                 mutex_unlock(&dev->struct_mutex);
1269         }
1270
1271         mutex_unlock(&bm->evict_mutex);
1272         return ret;
1273 }
1274
1275 static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem)
1276 {
1277         uint32_t flag_diff = (mem->mask ^ mem->flags);
1278
1279         if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1280                 return 0;
1281         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1282             (mem->mask & DRM_BO_FLAG_FORCE_CACHING))
1283                 return 0;
1284         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1285             (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE))
1286                 return 0;
1287         return 1;
1288 }
1289
1290 static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
1291 {
1292         drm_buffer_manager_t *bm = &dev->bm;
1293         drm_mem_type_manager_t *man;
1294         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1295         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1296         uint32_t i;
1297         int type_ok = 0;
1298         uint32_t mem_type = 0;
1299         uint32_t cur_flags;
1300
1301         if (drm_bo_mem_compat(mem))
1302                 return 0;
1303
1304         BUG_ON(mem->mm_node);
1305
1306         for (i = 0; i < num_prios; ++i) {
1307                 mem_type = prios[i];
1308                 man = &bm->man[mem_type];
1309                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1310                                                &cur_flags);
1311                 if (type_ok)
1312                         break;
1313         }
1314
1315         if (type_ok) {
1316                 mem->mm_node = NULL;
1317                 mem->mem_type = mem_type;
1318                 mem->flags = cur_flags;
1319                 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1320                 return 0;
1321         }
1322
1323         DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask);
1324         return -EINVAL;
1325 }
1326
1327 /*
1328  * bo locked.
1329  */
1330
1331 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1332                                       int move_unfenced, int no_wait)
1333 {
1334         drm_device_t *dev = bo->dev;
1335         drm_buffer_manager_t *bm = &dev->bm;
1336         drm_bo_driver_t *driver = dev->driver->bo_driver;
1337         int ret;
1338
1339         DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask,
1340                   bo->mem.flags);
1341         ret =
1342             driver->fence_type(bo->mem.mask, &bo->fence_class, &bo->fence_type);
1343         if (ret) {
1344                 DRM_ERROR("Driver did not support given buffer permissions\n");
1345                 return ret;
1346         }
1347
1348         ret = drm_bo_wait_unmapped(bo, no_wait);
1349         if (ret)
1350                 return ret;
1351
1352         if (bo->type == drm_bo_type_fake) {
1353                 ret = drm_bo_check_fake(dev, &bo->mem);
1354                 if (ret)
1355                         return ret;
1356         }
1357
1358         /*
1359          * Check whether we need to move buffer.
1360          */
1361
1362         if (!drm_bo_mem_compat(&bo->mem)) {
1363                 ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE,
1364                                          no_wait, move_unfenced);
1365                 if (ret) {
1366                         if (ret != -EAGAIN)
1367                                 DRM_ERROR("Failed moving buffer.\n");
1368                         return ret;
1369                 }
1370         }
1371
1372         /*
1373          * Pinned buffers.
1374          */
1375
1376         if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1377
1378                 bo->pinned_mem_type = bo->mem.mem_type;
1379                 mutex_lock(&dev->struct_mutex);
1380                 list_del_init(&bo->pinned_lru);
1381                 drm_bo_add_to_pinned_lru(bo);
1382
1383                 if (bo->pinned_node != bo->mem.mm_node) {
1384                         drm_mm_put_block(bo->pinned_node);
1385                         bo->pinned_node = bo->mem.mm_node;
1386                 }
1387
1388                 mutex_unlock(&dev->struct_mutex);
1389
1390         } else if (bo->pinned_node != NULL) {
1391
1392                 mutex_lock(&dev->struct_mutex);
1393                 drm_mm_put_block(bo->pinned_node);
1394                 list_del_init(&bo->pinned_lru);
1395                 bo->pinned_node = NULL;
1396                 mutex_unlock(&dev->struct_mutex);
1397
1398         }
1399
1400         /*
1401          * We might need to add a TTM.
1402          */
1403
1404         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1405                 ret = drm_bo_add_ttm(bo);
1406                 if (ret)
1407                         return ret;
1408         }
1409         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1410
1411         /*
1412          * Finally, adjust lru to be sure.
1413          */
1414
1415         mutex_lock(&dev->struct_mutex);
1416         list_del(&bo->lru);
1417         if (move_unfenced) {
1418                 list_add_tail(&bo->lru, &bm->unfenced);
1419                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1420                                 _DRM_BO_FLAG_UNFENCED);
1421         } else {
1422                 drm_bo_add_to_lru(bo);
1423                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1424                         DRM_WAKEUP(&bo->event_queue);
1425                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1426                                         _DRM_BO_FLAG_UNFENCED);
1427                 }
1428         }
1429         mutex_unlock(&dev->struct_mutex);
1430
1431         return 0;
1432 }
1433
1434 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1435                                   uint32_t flags, uint32_t mask, uint32_t hint,
1436                                   drm_bo_arg_reply_t * rep)
1437 {
1438         drm_buffer_object_t *bo;
1439         int ret;
1440         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1441
1442         bo = drm_lookup_buffer_object(priv, handle, 1);
1443         if (!bo) {
1444                 return -EINVAL;
1445         }
1446
1447         mutex_lock(&bo->mutex);
1448         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1449
1450         if (ret)
1451                 goto out;
1452
1453         DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1454         ret = drm_bo_new_mask(bo, flags, hint);
1455         if (ret)
1456                 goto out;
1457
1458         ret =
1459             drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE),
1460                                        no_wait);
1461         drm_bo_fill_rep_arg(bo, rep);
1462
1463       out:
1464
1465         mutex_unlock(&bo->mutex);
1466
1467         drm_bo_usage_deref_unlocked(bo);
1468         return ret;
1469 }
1470
1471 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1472                               drm_bo_arg_reply_t * rep)
1473 {
1474         drm_buffer_object_t *bo;
1475
1476         bo = drm_lookup_buffer_object(priv, handle, 1);
1477         if (!bo) {
1478                 return -EINVAL;
1479         }
1480         mutex_lock(&bo->mutex);
1481         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1482                 (void)drm_bo_busy(bo);
1483         drm_bo_fill_rep_arg(bo, rep);
1484         mutex_unlock(&bo->mutex);
1485         drm_bo_usage_deref_unlocked(bo);
1486         return 0;
1487 }
1488
1489 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1490                               uint32_t hint, drm_bo_arg_reply_t * rep)
1491 {
1492         drm_buffer_object_t *bo;
1493         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1494         int ret;
1495
1496         bo = drm_lookup_buffer_object(priv, handle, 1);
1497         if (!bo) {
1498                 return -EINVAL;
1499         }
1500
1501         mutex_lock(&bo->mutex);
1502         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1503         if (ret)
1504                 goto out;
1505         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1506         if (ret)
1507                 goto out;
1508
1509         drm_bo_fill_rep_arg(bo, rep);
1510
1511       out:
1512         mutex_unlock(&bo->mutex);
1513         drm_bo_usage_deref_unlocked(bo);
1514         return ret;
1515 }
1516
1517 int drm_buffer_object_create(drm_file_t * priv,
1518                              unsigned long size,
1519                              drm_bo_type_t type,
1520                              uint32_t mask,
1521                              uint32_t hint,
1522                              uint32_t page_alignment,
1523                              unsigned long buffer_start,
1524                              drm_buffer_object_t ** buf_obj)
1525 {
1526         drm_device_t *dev = priv->head->dev;
1527         drm_buffer_manager_t *bm = &dev->bm;
1528         drm_buffer_object_t *bo;
1529         int ret = 0;
1530         unsigned long num_pages;
1531
1532         if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1533                 DRM_ERROR("Invalid buffer object start.\n");
1534                 return -EINVAL;
1535         }
1536         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1537         if (num_pages == 0) {
1538                 DRM_ERROR("Illegal buffer object size.\n");
1539                 return -EINVAL;
1540         }
1541
1542         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1543
1544         if (!bo)
1545                 return -ENOMEM;
1546
1547         mutex_init(&bo->mutex);
1548         mutex_lock(&bo->mutex);
1549
1550         atomic_set(&bo->usage, 1);
1551         atomic_set(&bo->mapped, -1);
1552         DRM_INIT_WAITQUEUE(&bo->event_queue);
1553         INIT_LIST_HEAD(&bo->lru);
1554         INIT_LIST_HEAD(&bo->pinned_lru);
1555         INIT_LIST_HEAD(&bo->ddestroy);
1556 #ifdef DRM_ODD_MM_COMPAT
1557         INIT_LIST_HEAD(&bo->p_mm_list);
1558         INIT_LIST_HEAD(&bo->vma_list);
1559 #endif
1560         bo->dev = dev;
1561         bo->type = type;
1562         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1563         bo->mem.num_pages = num_pages;
1564         bo->mem.mm_node = NULL;
1565         bo->mem.page_alignment = page_alignment;
1566         if (bo->type == drm_bo_type_fake) {
1567                 bo->offset = buffer_start;
1568                 bo->buffer_start = 0;
1569         } else {
1570                 bo->buffer_start = buffer_start;
1571         }
1572         bo->priv_flags = 0;
1573         bo->mem.flags = 0;
1574         bo->mem.mask = 0;
1575         atomic_inc(&bm->count);
1576         ret = drm_bo_new_mask(bo, mask, hint);
1577
1578         if (ret)
1579                 goto out_err;
1580
1581         if (bo->type == drm_bo_type_dc) {
1582                 mutex_lock(&dev->struct_mutex);
1583                 ret = drm_bo_setup_vm_locked(bo);
1584                 mutex_unlock(&dev->struct_mutex);
1585                 if (ret)
1586                         goto out_err;
1587         }
1588         ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1589         if (ret)
1590                 goto out_err;
1591
1592         mutex_unlock(&bo->mutex);
1593         *buf_obj = bo;
1594         return 0;
1595
1596       out_err:
1597         mutex_unlock(&bo->mutex);
1598
1599         drm_bo_usage_deref_unlocked(bo);
1600         return ret;
1601 }
1602
1603 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1604                                   int shareable)
1605 {
1606         drm_device_t *dev = priv->head->dev;
1607         int ret;
1608
1609         mutex_lock(&dev->struct_mutex);
1610         ret = drm_add_user_object(priv, &bo->base, shareable);
1611         if (ret)
1612                 goto out;
1613
1614         bo->base.remove = drm_bo_base_deref_locked;
1615         bo->base.type = drm_buffer_type;
1616         bo->base.ref_struct_locked = NULL;
1617         bo->base.unref = drm_buffer_user_object_unmap;
1618
1619       out:
1620         mutex_unlock(&dev->struct_mutex);
1621         return ret;
1622 }
1623
1624 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1625 {
1626         LOCK_TEST_WITH_RETURN(dev, filp);
1627         return 0;
1628 }
1629
1630 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1631 {
1632         DRM_DEVICE;
1633         drm_bo_arg_t arg;
1634         drm_bo_arg_request_t *req = &arg.d.req;
1635         drm_bo_arg_reply_t rep;
1636         unsigned long next;
1637         drm_user_object_t *uo;
1638         drm_buffer_object_t *entry;
1639
1640         if (!dev->bm.initialized) {
1641                 DRM_ERROR("Buffer object manager is not initialized.\n");
1642                 return -EINVAL;
1643         }
1644
1645         do {
1646                 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1647
1648                 if (arg.handled) {
1649                         data = arg.next;
1650                         continue;
1651                 }
1652
1653                 rep.ret = 0;
1654                 switch (req->op) {
1655                 case drm_bo_create:
1656                         rep.ret =
1657                             drm_buffer_object_create(priv, req->size,
1658                                                      req->type,
1659                                                      req->mask,
1660                                                      req->hint,
1661                                                      req->page_alignment,
1662                                                      req->buffer_start, &entry);
1663                         if (rep.ret)
1664                                 break;
1665
1666                         rep.ret =
1667                             drm_bo_add_user_object(priv, entry,
1668                                                    req->
1669                                                    mask &
1670                                                    DRM_BO_FLAG_SHAREABLE);
1671                         if (rep.ret)
1672                                 drm_bo_usage_deref_unlocked(entry);
1673
1674                         if (rep.ret)
1675                                 break;
1676
1677                         mutex_lock(&entry->mutex);
1678                         drm_bo_fill_rep_arg(entry, &rep);
1679                         mutex_unlock(&entry->mutex);
1680                         break;
1681                 case drm_bo_unmap:
1682                         rep.ret = drm_buffer_object_unmap(priv, req->handle);
1683                         break;
1684                 case drm_bo_map:
1685                         rep.ret = drm_buffer_object_map(priv, req->handle,
1686                                                         req->mask,
1687                                                         req->hint, &rep);
1688                         break;
1689                 case drm_bo_destroy:
1690                         mutex_lock(&dev->struct_mutex);
1691                         uo = drm_lookup_user_object(priv, req->handle);
1692                         if (!uo || (uo->type != drm_buffer_type)
1693                             || uo->owner != priv) {
1694                                 mutex_unlock(&dev->struct_mutex);
1695                                 rep.ret = -EINVAL;
1696                                 break;
1697                         }
1698                         rep.ret = drm_remove_user_object(priv, uo);
1699                         mutex_unlock(&dev->struct_mutex);
1700                         break;
1701                 case drm_bo_reference:
1702                         rep.ret = drm_user_object_ref(priv, req->handle,
1703                                                       drm_buffer_type, &uo);
1704                         if (rep.ret)
1705                                 break;
1706                         mutex_lock(&dev->struct_mutex);
1707                         uo = drm_lookup_user_object(priv, req->handle);
1708                         entry =
1709                             drm_user_object_entry(uo, drm_buffer_object_t,
1710                                                   base);
1711                         atomic_dec(&entry->usage);
1712                         mutex_unlock(&dev->struct_mutex);
1713                         mutex_lock(&entry->mutex);
1714                         drm_bo_fill_rep_arg(entry, &rep);
1715                         mutex_unlock(&entry->mutex);
1716                         break;
1717                 case drm_bo_unreference:
1718                         rep.ret = drm_user_object_unref(priv, req->handle,
1719                                                         drm_buffer_type);
1720                         break;
1721                 case drm_bo_validate:
1722                         rep.ret = drm_bo_lock_test(dev, filp);
1723
1724                         if (rep.ret)
1725                                 break;
1726                         rep.ret =
1727                             drm_bo_handle_validate(priv, req->handle, req->mask,
1728                                                    req->arg_handle, req->hint,
1729                                                    &rep);
1730                         break;
1731                 case drm_bo_fence:
1732                         rep.ret = drm_bo_lock_test(dev, filp);
1733                         if (rep.ret)
1734                                 break;
1735                          /**/ break;
1736                 case drm_bo_info:
1737                         rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1738                         break;
1739                 case drm_bo_wait_idle:
1740                         rep.ret = drm_bo_handle_wait(priv, req->handle,
1741                                                      req->hint, &rep);
1742                         break;
1743                 case drm_bo_ref_fence:
1744                         rep.ret = -EINVAL;
1745                         DRM_ERROR("Function is not implemented yet.\n");
1746                 default:
1747                         rep.ret = -EINVAL;
1748                 }
1749                 next = arg.next;
1750
1751                 /*
1752                  * A signal interrupted us. Make sure the ioctl is restartable.
1753                  */
1754
1755                 if (rep.ret == -EAGAIN)
1756                         return -EAGAIN;
1757
1758                 arg.handled = 1;
1759                 arg.d.rep = rep;
1760                 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1761                 data = next;
1762         } while (data);
1763         return 0;
1764 }
1765
1766 static int drm_bo_leave_list(drm_buffer_object_t * bo,
1767                              uint32_t mem_type,
1768                              int free_pinned, int allow_errors)
1769 {
1770         drm_device_t *dev = bo->dev;
1771         int ret = 0;
1772
1773         atomic_inc(&bo->usage);
1774         mutex_unlock(&dev->struct_mutex);
1775         mutex_lock(&bo->mutex);
1776
1777         ret = drm_bo_expire_fence(bo, allow_errors);
1778         if (ret)
1779                 goto out;
1780
1781         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1782
1783         if (free_pinned) {
1784                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1785                 mutex_lock(&dev->struct_mutex);
1786                 list_del_init(&bo->pinned_lru);
1787                 if (bo->pinned_node == bo->mem.mm_node)
1788                         bo->pinned_node = NULL;
1789                 if (bo->pinned_node != NULL) {
1790                         drm_mm_put_block(bo->pinned_node);
1791                         bo->pinned_node = NULL;
1792                 }
1793                 mutex_unlock(&dev->struct_mutex);
1794         }
1795
1796         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1797                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1798                           "cleanup. Removing flag and evicting.\n");
1799                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1800                 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1801         }
1802
1803         if (bo->mem.mem_type == mem_type)
1804                 ret = drm_bo_evict(bo, mem_type, 0);
1805
1806         if (ret) {
1807                 if (allow_errors) {
1808                         goto out;
1809                 } else {
1810                         ret = 0;
1811                         DRM_ERROR("Cleanup eviction failed\n");
1812                 }
1813         }
1814
1815       out:
1816         mutex_unlock(&bo->mutex);
1817         mutex_lock(&dev->struct_mutex);
1818         drm_bo_usage_deref_locked(bo);
1819         return ret;
1820 }
1821
1822 /*
1823  * dev->struct_sem locked.
1824  */
1825
1826 static int drm_bo_force_list_clean(drm_device_t * dev,
1827                                    struct list_head *head,
1828                                    unsigned mem_type,
1829                                    int free_pinned,
1830                                    int allow_errors, int pinned_list)
1831 {
1832         struct list_head *list, *next;
1833         drm_buffer_object_t *entry;
1834         int ret;
1835         int do_retry;
1836
1837         /*
1838          * We need to 
1839          * restart if a node disappears from under us.
1840          * Nodes cannot be added since the hardware lock is needed
1841          * For this operation.
1842          */
1843
1844       retry:
1845         list_for_each_safe(list, next, head) {
1846                 if (pinned_list)
1847                         entry = list_entry(list, drm_buffer_object_t,
1848                                            pinned_lru);
1849                 else
1850                         entry = list_entry(list, drm_buffer_object_t, lru);
1851                 atomic_inc(&entry->usage);
1852                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
1853                                         allow_errors);
1854
1855                 do_retry = list->next != next;
1856                 drm_bo_usage_deref_locked(entry);
1857
1858                 if (ret)
1859                         return ret;
1860
1861                 if (do_retry)
1862                         goto retry;
1863         }
1864         return 0;
1865 }
1866
1867 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1868 {
1869         drm_buffer_manager_t *bm = &dev->bm;
1870         drm_mem_type_manager_t *man = &bm->man[mem_type];
1871         int ret = -EINVAL;
1872
1873         if (mem_type >= DRM_BO_MEM_TYPES) {
1874                 DRM_ERROR("Illegal memory type %d\n", mem_type);
1875                 return ret;
1876         }
1877
1878         if (!man->has_type) {
1879                 DRM_ERROR("Trying to take down uninitialized "
1880                           "memory manager type\n");
1881                 return ret;
1882         }
1883         man->use_type = 0;
1884         man->has_type = 0;
1885
1886         ret = 0;
1887         if (mem_type > 0) {
1888
1889                 drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0, 0);
1890                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
1891                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
1892
1893                 if (drm_mm_clean(&man->manager)) {
1894                         drm_mm_takedown(&man->manager);
1895                 } else {
1896                         ret = -EBUSY;
1897                 }
1898         }
1899
1900         return ret;
1901 }
1902
1903 /**
1904  *Evict all buffers of a particular mem_type, but leave memory manager
1905  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
1906  *point since we have the hardware lock.
1907  */
1908
1909 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1910 {
1911         int ret;
1912         drm_buffer_manager_t *bm = &dev->bm;
1913         drm_mem_type_manager_t *man = &bm->man[mem_type];
1914
1915         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1916                 DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
1917                 return -EINVAL;
1918         }
1919
1920         ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1, 0);
1921         if (ret)
1922                 return ret;
1923         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
1924
1925         return ret;
1926 }
1927
1928 static int drm_bo_init_mm(drm_device_t * dev,
1929                           unsigned type,
1930                           unsigned long p_offset, unsigned long p_size)
1931 {
1932         drm_buffer_manager_t *bm = &dev->bm;
1933         int ret = -EINVAL;
1934         drm_mem_type_manager_t *man;
1935
1936         if (type >= DRM_BO_MEM_TYPES) {
1937                 DRM_ERROR("Illegal memory type %d\n", type);
1938                 return ret;
1939         }
1940
1941         man = &bm->man[type];
1942         if (man->has_type) {
1943                 DRM_ERROR("Memory manager already initialized for type %d\n",
1944                           type);
1945                 return ret;
1946         }
1947
1948         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
1949         if (ret)
1950                 return ret;
1951
1952         ret = 0;
1953         if (type != DRM_BO_MEM_LOCAL) {
1954                 if (!p_size) {
1955                         DRM_ERROR("Zero size memory manager type %d\n", type);
1956                         return ret;
1957                 }
1958                 ret = drm_mm_init(&man->manager, p_offset, p_size);
1959                 if (ret)
1960                         return ret;
1961         }
1962         man->has_type = 1;
1963         man->use_type = 1;
1964
1965         INIT_LIST_HEAD(&man->lru);
1966         INIT_LIST_HEAD(&man->pinned);
1967
1968         return 0;
1969 }
1970
1971 /*
1972  * This is called from lastclose, so we don't need to bother about
1973  * any clients still running when we set the initialized flag to zero.
1974  */
1975
1976 int drm_bo_driver_finish(drm_device_t * dev)
1977 {
1978         drm_buffer_manager_t *bm = &dev->bm;
1979         int ret = 0;
1980         unsigned i = DRM_BO_MEM_TYPES;
1981         drm_mem_type_manager_t *man;
1982
1983         mutex_lock(&dev->bm.init_mutex);
1984         mutex_lock(&dev->struct_mutex);
1985
1986         if (!bm->initialized)
1987                 goto out;
1988         bm->initialized = 0;
1989
1990         while (i--) {
1991                 man = &bm->man[i];
1992                 if (man->has_type) {
1993                         man->use_type = 0;
1994                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
1995                                 ret = -EBUSY;
1996                                 DRM_ERROR("DRM memory manager type %d "
1997                                           "is not clean.\n", i);
1998                         }
1999                         man->has_type = 0;
2000                 }
2001         }
2002         mutex_unlock(&dev->struct_mutex);
2003
2004         if (!cancel_delayed_work(&bm->wq)) {
2005                 flush_scheduled_work();
2006         }
2007         mutex_lock(&dev->struct_mutex);
2008         drm_bo_delayed_delete(dev, 1);
2009         if (list_empty(&bm->ddestroy)) {
2010                 DRM_DEBUG("Delayed destroy list was clean\n");
2011         }
2012         if (list_empty(&bm->man[0].lru)) {
2013                 DRM_DEBUG("Swap list was clean\n");
2014         }
2015         if (list_empty(&bm->man[0].pinned)) {
2016                 DRM_DEBUG("NO_MOVE list was clean\n");
2017         }
2018         if (list_empty(&bm->unfenced)) {
2019                 DRM_DEBUG("Unfenced list was clean\n");
2020         }
2021       out:
2022         mutex_unlock(&dev->struct_mutex);
2023         mutex_unlock(&dev->bm.init_mutex);
2024         return ret;
2025 }
2026
2027 int drm_bo_driver_init(drm_device_t * dev)
2028 {
2029         drm_bo_driver_t *driver = dev->driver->bo_driver;
2030         drm_buffer_manager_t *bm = &dev->bm;
2031         int ret = -EINVAL;
2032
2033         mutex_lock(&dev->bm.init_mutex);
2034         mutex_lock(&dev->struct_mutex);
2035         if (!driver)
2036                 goto out_unlock;
2037
2038         /*
2039          * Initialize the system memory buffer type.
2040          * Other types need to be driver / IOCTL initialized.
2041          */
2042
2043         ret = drm_bo_init_mm(dev, 0, 0, 0);
2044         if (ret)
2045                 goto out_unlock;
2046
2047 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2048         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2049 #else
2050         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2051 #endif
2052         bm->initialized = 1;
2053         bm->nice_mode = 1;
2054         atomic_set(&bm->count, 0);
2055         bm->cur_pages = 0;
2056         INIT_LIST_HEAD(&bm->unfenced);
2057         INIT_LIST_HEAD(&bm->ddestroy);
2058       out_unlock:
2059         mutex_unlock(&dev->struct_mutex);
2060         mutex_unlock(&dev->bm.init_mutex);
2061         return ret;
2062 }
2063
2064 EXPORT_SYMBOL(drm_bo_driver_init);
2065
2066 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
2067 {
2068         DRM_DEVICE;
2069
2070         int ret = 0;
2071         drm_mm_init_arg_t arg;
2072         drm_buffer_manager_t *bm = &dev->bm;
2073         drm_bo_driver_t *driver = dev->driver->bo_driver;
2074
2075         if (!driver) {
2076                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2077                 return -EINVAL;
2078         }
2079
2080         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2081
2082         switch (arg.req.op) {
2083         case mm_init:
2084                 ret = -EINVAL;
2085                 mutex_lock(&dev->bm.init_mutex);
2086                 mutex_lock(&dev->struct_mutex);
2087                 if (!bm->initialized) {
2088                         DRM_ERROR("DRM memory manager was not initialized.\n");
2089                         break;
2090                 }
2091                 if (arg.req.mem_type == 0) {
2092                         DRM_ERROR
2093                             ("System memory buffers already initialized.\n");
2094                         break;
2095                 }
2096                 ret = drm_bo_init_mm(dev, arg.req.mem_type,
2097                                      arg.req.p_offset, arg.req.p_size);
2098                 break;
2099         case mm_takedown:
2100                 LOCK_TEST_WITH_RETURN(dev, filp);
2101                 mutex_lock(&dev->bm.init_mutex);
2102                 mutex_lock(&dev->struct_mutex);
2103                 ret = -EINVAL;
2104                 if (!bm->initialized) {
2105                         DRM_ERROR("DRM memory manager was not initialized\n");
2106                         break;
2107                 }
2108                 if (arg.req.mem_type == 0) {
2109                         DRM_ERROR("No takedown for System memory buffers.\n");
2110                         break;
2111                 }
2112                 ret = 0;
2113                 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
2114                         DRM_ERROR("Memory manager type %d not clean. "
2115                                   "Delaying takedown\n", arg.req.mem_type);
2116                 }
2117                 break;
2118         case mm_lock:
2119                 LOCK_TEST_WITH_RETURN(dev, filp);
2120                 mutex_lock(&dev->bm.init_mutex);
2121                 mutex_lock(&dev->struct_mutex);
2122                 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
2123                 break;
2124         case mm_unlock:
2125                 LOCK_TEST_WITH_RETURN(dev, filp);
2126                 mutex_lock(&dev->bm.init_mutex);
2127                 mutex_lock(&dev->struct_mutex);
2128                 ret = 0;
2129                 break;
2130         default:
2131                 DRM_ERROR("Function not implemented yet\n");
2132                 return -EINVAL;
2133         }
2134
2135         mutex_unlock(&dev->struct_mutex);
2136         mutex_unlock(&dev->bm.init_mutex);
2137         if (ret)
2138                 return ret;
2139
2140         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2141         return 0;
2142 }
2143
2144 /*
2145  * buffer object vm functions.
2146  */
2147
2148 int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem)
2149 {
2150         drm_buffer_manager_t *bm = &dev->bm;
2151         drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2152
2153         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2154                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2155                         return 0;
2156
2157                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2158                         return 0;
2159
2160                 if (mem->flags & DRM_BO_FLAG_CACHED)
2161                         return 0;
2162         }
2163         return 1;
2164 }
2165
2166 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2167
2168 /**
2169  * \c Get the PCI offset for the buffer object memory.
2170  *
2171  * \param bo The buffer object.
2172  * \param bus_base On return the base of the PCI region
2173  * \param bus_offset On return the byte offset into the PCI region
2174  * \param bus_size On return the byte size of the buffer object or zero if
2175  *     the buffer object memory is not accessible through a PCI region.
2176  * \return Failure indication.
2177  * 
2178  * Returns -EINVAL if the buffer object is currently not mappable.
2179  * Otherwise returns zero.
2180  */
2181
2182 int drm_bo_pci_offset(drm_device_t * dev,
2183                       drm_bo_mem_reg_t * mem,
2184                       unsigned long *bus_base,
2185                       unsigned long *bus_offset, unsigned long *bus_size)
2186 {
2187         drm_buffer_manager_t *bm = &dev->bm;
2188         drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2189
2190         *bus_size = 0;
2191         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2192                 return -EINVAL;
2193
2194         if (drm_mem_reg_is_pci(dev, mem)) {
2195                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2196                 *bus_size = mem->num_pages << PAGE_SHIFT;
2197                 *bus_base = man->io_offset;
2198         }
2199
2200         return 0;
2201 }
2202
2203 /**
2204  * \c Kill all user-space virtual mappings of this buffer object.
2205  *
2206  * \param bo The buffer object.
2207  *
2208  * Call bo->mutex locked.
2209  */
2210
2211 void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
2212 {
2213         drm_device_t *dev = bo->dev;
2214         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2215         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2216
2217         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2218 }
2219
2220 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
2221 {
2222         drm_map_list_t *list = &bo->map_list;
2223         drm_local_map_t *map;
2224         drm_device_t *dev = bo->dev;
2225
2226         if (list->user_token) {
2227                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2228                 list->user_token = 0;
2229         }
2230         if (list->file_offset_node) {
2231                 drm_mm_put_block(list->file_offset_node);
2232                 list->file_offset_node = NULL;
2233         }
2234
2235         map = list->map;
2236         if (!map)
2237                 return;
2238
2239         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2240         list->map = NULL;
2241         list->user_token = 0ULL;
2242         drm_bo_usage_deref_locked(bo);
2243 }
2244
2245 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
2246 {
2247         drm_map_list_t *list = &bo->map_list;
2248         drm_local_map_t *map;
2249         drm_device_t *dev = bo->dev;
2250
2251         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2252         if (!list->map)
2253                 return -ENOMEM;
2254
2255         map = list->map;
2256         map->offset = 0;
2257         map->type = _DRM_TTM;
2258         map->flags = _DRM_REMOVABLE;
2259         map->size = bo->mem.num_pages * PAGE_SIZE;
2260         atomic_inc(&bo->usage);
2261         map->handle = (void *)bo;
2262
2263         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2264                                                     bo->mem.num_pages, 0, 0);
2265
2266         if (!list->file_offset_node) {
2267                 drm_bo_takedown_vm_locked(bo);
2268                 return -ENOMEM;
2269         }
2270
2271         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2272                                                   bo->mem.num_pages, 0);
2273
2274         list->hash.key = list->file_offset_node->start;
2275         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2276                 drm_bo_takedown_vm_locked(bo);
2277                 return -ENOMEM;
2278         }
2279
2280         list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
2281
2282         return 0;
2283 }