More bugfixes.
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  * 
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  * 
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  * 
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  * 
39  * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
40  * heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44  * both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47  * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48  * traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(drm_buffer_object_t * bo);
53 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo);
54 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo);
55 static void drm_bo_unmap_virtual(drm_buffer_object_t * bo);
56
57 static inline uint32_t drm_bo_type_flags(unsigned type)
58 {
59         return (1 << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
67 {
68         drm_mem_type_manager_t *man;
69
70         man = &bo->dev->bm.man[bo->pinned_mem_type];
71         list_add_tail(&bo->pinned_lru, &man->pinned);
72 }
73
74 void drm_bo_add_to_lru(drm_buffer_object_t * bo)
75 {
76         drm_mem_type_manager_t *man;
77
78         if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
79                 man = &bo->dev->bm.man[bo->mem.mem_type];
80                 list_add_tail(&bo->lru, &man->lru);
81         } else {
82                 INIT_LIST_HEAD(&bo->lru);
83         }
84 }
85
86 static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
87 {
88 #ifdef DRM_ODD_MM_COMPAT
89         int ret;
90
91         ret = drm_bo_lock_kmm(bo);
92         if (ret)
93                 return ret;
94         drm_bo_unmap_virtual(bo);
95         if (old_is_pci)
96                 drm_bo_finish_unmap(bo);
97 #else
98         drm_bo_unmap_virtual(bo);
99 #endif
100         return 0;
101 }
102
103 static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
104 {
105 #ifdef DRM_ODD_MM_COMPAT
106         int ret;
107
108         ret = drm_bo_remap_bound(bo);
109         if (ret) {
110                 DRM_ERROR("Failed to remap a bound buffer object.\n"
111                           "\tThis might cause a sigbus later.\n");
112         }
113         drm_bo_unlock_kmm(bo);
114 #endif
115 }
116
117 /*
118  * Call bo->mutex locked.
119  */
120
121 static int drm_bo_add_ttm(drm_buffer_object_t * bo)
122 {
123         drm_device_t *dev = bo->dev;
124         int ret = 0;
125         bo->ttm = NULL;
126
127         switch (bo->type) {
128         case drm_bo_type_dc:
129                 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
130                 if (!bo->ttm)
131                         ret = -ENOMEM;
132                 break;
133         case drm_bo_type_user:
134         case drm_bo_type_fake:
135                 break;
136         default:
137                 DRM_ERROR("Illegal buffer object type\n");
138                 ret = -EINVAL;
139                 break;
140         }
141
142         return ret;
143 }
144
145 static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
146                                   drm_bo_mem_reg_t * mem,
147                                   int evict, int no_wait)
148 {
149         drm_device_t *dev = bo->dev;
150         drm_buffer_manager_t *bm = &dev->bm;
151         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
152         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
153         drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
154         drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
155         int ret = 0;
156
157         if (old_is_pci || new_is_pci)
158                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
159         if (ret)
160                 return ret;
161
162         /*
163          * Create and bind a ttm if required.
164          */
165
166         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
167                 ret = drm_bo_add_ttm(bo);
168                 if (ret)
169                         goto out_err;
170
171                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
172                         ret = drm_bind_ttm(bo->ttm, new_man->flags &
173                                            DRM_BO_FLAG_CACHED,
174                                            mem->mm_node->start);
175                         if (ret)
176                                 goto out_err;
177                 }
178         }
179
180         if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
181
182                 drm_bo_mem_reg_t *old_mem = &bo->mem;
183                 uint32_t save_flags = old_mem->flags;
184                 uint32_t save_mask = old_mem->mask;
185
186                 *old_mem = *mem;
187                 mem->mm_node = NULL;
188                 old_mem->mask = save_mask;
189                 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
190
191         } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
192                    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
193
194                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
195
196         } else if (dev->driver->bo_driver->move) {
197                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
198
199         } else {
200
201                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
202
203         }
204
205         if (ret)
206                 goto out_err;
207
208         if (old_is_pci || new_is_pci)
209                 drm_bo_vm_post_move(bo);
210
211         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
212                 ret =
213                     dev->driver->bo_driver->invalidate_caches(dev,
214                                                               bo->mem.flags);
215                 if (ret)
216                         DRM_ERROR("Can not flush read caches\n");
217         }
218
219         DRM_FLAG_MASKED(bo->priv_flags,
220                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
221                         _DRM_BO_FLAG_EVICTED);
222
223         if (bo->mem.mm_node)
224                 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
225
226         return 0;
227
228       out_err:
229         if (old_is_pci || new_is_pci)
230                 drm_bo_vm_post_move(bo);
231
232         new_man = &bm->man[bo->mem.mem_type];
233         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
234                 drm_ttm_unbind(bo->ttm);
235                 drm_destroy_ttm(bo->ttm);
236                 bo->ttm = NULL;
237         }
238
239         return ret;
240 }
241
242 /*
243  * Call bo->mutex locked.
244  * Wait until the buffer is idle.
245  */
246
247 int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
248                 int no_wait)
249 {
250
251         drm_fence_object_t *fence = bo->fence;
252         int ret;
253
254         if (fence) {
255                 drm_device_t *dev = bo->dev;
256                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
257                         drm_fence_usage_deref_unlocked(dev, fence);
258                         bo->fence = NULL;
259                         return 0;
260                 }
261                 if (no_wait) {
262                         return -EBUSY;
263                 }
264                 ret =
265                     drm_fence_object_wait(dev, fence, lazy, ignore_signals,
266                                           bo->fence_type);
267                 if (ret)
268                         return ret;
269
270                 drm_fence_usage_deref_unlocked(dev, fence);
271                 bo->fence = NULL;
272
273         }
274         return 0;
275 }
276
277 static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
278 {
279         drm_device_t *dev = bo->dev;
280         drm_buffer_manager_t *bm = &dev->bm;
281
282         if (bo->fence) {
283                 if (bm->nice_mode) {
284                         unsigned long _end = jiffies + 3 * DRM_HZ;
285                         int ret;
286                         do {
287                                 ret = drm_bo_wait(bo, 0, 1, 0);
288                                 if (ret && allow_errors)
289                                         return ret;
290
291                         } while (ret && !time_after_eq(jiffies, _end));
292
293                         if (bo->fence) {
294                                 bm->nice_mode = 0;
295                                 DRM_ERROR("Detected GPU lockup or "
296                                           "fence driver was taken down. "
297                                           "Evicting buffer.\n");
298                         }
299                 }
300                 if (bo->fence) {
301                         drm_fence_usage_deref_unlocked(dev, bo->fence);
302                         bo->fence = NULL;
303                 }
304         }
305         return 0;
306 }
307
308 /*
309  * Call dev->struct_mutex locked.
310  * Attempts to remove all private references to a buffer by expiring its
311  * fence object and removing from lru lists and memory managers.
312  */
313
314 static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
315 {
316         drm_device_t *dev = bo->dev;
317         drm_buffer_manager_t *bm = &dev->bm;
318
319         atomic_inc(&bo->usage);
320         mutex_unlock(&dev->struct_mutex);
321         mutex_lock(&bo->mutex);
322
323         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
324
325         if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) {
326                 drm_fence_usage_deref_locked(dev, bo->fence);
327                 bo->fence = NULL;
328         }
329
330         if (bo->fence && remove_all)
331                 (void)drm_bo_expire_fence(bo, 0);
332
333         mutex_lock(&dev->struct_mutex);
334
335         if (!atomic_dec_and_test(&bo->usage)) {
336                 goto out;
337         }
338
339         if (!bo->fence) {
340                 list_del_init(&bo->lru);
341                 if (bo->mem.mm_node) {
342                         drm_mm_put_block(bo->mem.mm_node);
343                         if (bo->pinned_node == bo->mem.mm_node)
344                                 bo->pinned_node = NULL;
345                         bo->mem.mm_node = NULL;
346                 }
347                 list_del_init(&bo->pinned_lru);
348                 if (bo->pinned_node) {
349                         drm_mm_put_block(bo->pinned_node);
350                         bo->pinned_node = NULL;
351                 }
352                 list_del_init(&bo->ddestroy);
353                 mutex_unlock(&bo->mutex);
354                 drm_bo_destroy_locked(bo);
355                 return;
356         }
357
358         if (list_empty(&bo->ddestroy)) {
359                 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
360                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
361                 schedule_delayed_work(&bm->wq,
362                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
363         }
364
365       out:
366         mutex_unlock(&bo->mutex);
367         return;
368 }
369
370 /*
371  * Verify that refcount is 0 and that there are no internal references
372  * to the buffer object. Then destroy it.
373  */
374
375 static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
376 {
377         drm_device_t *dev = bo->dev;
378         drm_buffer_manager_t *bm = &dev->bm;
379
380         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
381             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
382             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
383                 if (bo->fence != NULL) {
384                         DRM_ERROR("Fence was non-zero.\n");
385                         drm_bo_cleanup_refs(bo, 0);
386                         return;
387                 }
388
389 #ifdef DRM_ODD_MM_COMPAT
390                 BUG_ON(!list_empty(&bo->vma_list));
391                 BUG_ON(!list_empty(&bo->p_mm_list));
392 #endif
393
394                 if (bo->ttm) {
395                         drm_ttm_unbind(bo->ttm);
396                         drm_destroy_ttm(bo->ttm);
397                         bo->ttm = NULL;
398                 }
399
400                 atomic_dec(&bm->count);
401
402                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
403
404                 return;
405         }
406
407         /*
408          * Some stuff is still trying to reference the buffer object.
409          * Get rid of those references.
410          */
411
412         drm_bo_cleanup_refs(bo, 0);
413
414         return;
415 }
416
417 /*
418  * Call dev->struct_mutex locked.
419  */
420
421 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
422 {
423         drm_buffer_manager_t *bm = &dev->bm;
424
425         drm_buffer_object_t *entry, *nentry;
426         struct list_head *list, *next;
427
428         list_for_each_safe(list, next, &bm->ddestroy) {
429                 entry = list_entry(list, drm_buffer_object_t, ddestroy);
430
431                 nentry = NULL;
432                 if (next != &bm->ddestroy) {
433                         nentry = list_entry(next, drm_buffer_object_t,
434                                             ddestroy);
435                         atomic_inc(&nentry->usage);
436                 }
437
438                 drm_bo_cleanup_refs(entry, remove_all);
439
440                 if (nentry) {
441                         atomic_dec(&nentry->usage);
442                 }
443         }
444 }
445
446 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
447 static void drm_bo_delayed_workqueue(void *data)
448 #else
449 static void drm_bo_delayed_workqueue(struct work_struct *work)
450 #endif
451 {
452 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
453         drm_device_t *dev = (drm_device_t *) data;
454         drm_buffer_manager_t *bm = &dev->bm;
455 #else
456         drm_buffer_manager_t *bm =
457             container_of(work, drm_buffer_manager_t, wq.work);
458         drm_device_t *dev = container_of(bm, drm_device_t, bm);
459 #endif
460
461         DRM_DEBUG("Delayed delete Worker\n");
462
463         mutex_lock(&dev->struct_mutex);
464         if (!bm->initialized) {
465                 mutex_unlock(&dev->struct_mutex);
466                 return;
467         }
468         drm_bo_delayed_delete(dev, 0);
469         if (bm->initialized && !list_empty(&bm->ddestroy)) {
470                 schedule_delayed_work(&bm->wq,
471                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
472         }
473         mutex_unlock(&dev->struct_mutex);
474 }
475
476 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
477 {
478         if (atomic_dec_and_test(&bo->usage)) {
479                 drm_bo_destroy_locked(bo);
480         }
481 }
482
483 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
484 {
485         drm_buffer_object_t *bo =
486             drm_user_object_entry(uo, drm_buffer_object_t, base);
487
488         drm_bo_takedown_vm_locked(bo);
489         drm_bo_usage_deref_locked(bo);
490 }
491
492 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
493 {
494         drm_device_t *dev = bo->dev;
495
496         if (atomic_dec_and_test(&bo->usage)) {
497                 mutex_lock(&dev->struct_mutex);
498                 if (atomic_read(&bo->usage) == 0)
499                         drm_bo_destroy_locked(bo);
500                 mutex_unlock(&dev->struct_mutex);
501         }
502 }
503
504 /*
505  * Note. The caller has to register (if applicable) 
506  * and deregister fence object usage.
507  */
508
509 int drm_fence_buffer_objects(drm_file_t * priv,
510                              struct list_head *list,
511                              uint32_t fence_flags,
512                              drm_fence_object_t * fence,
513                              drm_fence_object_t ** used_fence)
514 {
515         drm_device_t *dev = priv->head->dev;
516         drm_buffer_manager_t *bm = &dev->bm;
517
518         drm_buffer_object_t *entry;
519         uint32_t fence_type = 0;
520         int count = 0;
521         int ret = 0;
522         struct list_head *l;
523         LIST_HEAD(f_list);
524
525         mutex_lock(&dev->struct_mutex);
526
527         if (!list)
528                 list = &bm->unfenced;
529
530         list_for_each_entry(entry, list, lru) {
531                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
532                 fence_type |= entry->fence_type;
533                 if (entry->fence_class != 0) {
534                         DRM_ERROR("Fence class %d is not implemented yet.\n",
535                                   entry->fence_class);
536                         ret = -EINVAL;
537                         goto out;
538                 }
539                 count++;
540         }
541
542         if (!count) {
543                 ret = -EINVAL;
544                 goto out;
545         }
546
547         /*
548          * Transfer to a local list before we release the dev->struct_mutex;
549          * This is so we don't get any new unfenced objects while fencing 
550          * the ones we already have..
551          */
552
553         list_splice_init(list, &f_list);
554
555         if (fence) {
556                 if ((fence_type & fence->type) != fence_type) {
557                         DRM_ERROR("Given fence doesn't match buffers "
558                                   "on unfenced list.\n");
559                         ret = -EINVAL;
560                         goto out;
561                 }
562         } else {
563                 mutex_unlock(&dev->struct_mutex);
564                 ret = drm_fence_object_create(dev, fence_type,
565                                               fence_flags | DRM_FENCE_FLAG_EMIT,
566                                               &fence);
567                 mutex_lock(&dev->struct_mutex);
568                 if (ret)
569                         goto out;
570         }
571
572         count = 0;
573         l = f_list.next;
574         while (l != &f_list) {
575                 prefetch(l->next);
576                 entry = list_entry(l, drm_buffer_object_t, lru);
577                 atomic_inc(&entry->usage);
578                 mutex_unlock(&dev->struct_mutex);
579                 mutex_lock(&entry->mutex);
580                 mutex_lock(&dev->struct_mutex);
581                 list_del_init(l);
582                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
583                         count++;
584                         if (entry->fence)
585                                 drm_fence_usage_deref_locked(dev, entry->fence);
586                         entry->fence = fence;
587                         DRM_FLAG_MASKED(entry->priv_flags, 0,
588                                         _DRM_BO_FLAG_UNFENCED);
589                         DRM_WAKEUP(&entry->event_queue);
590                         drm_bo_add_to_lru(entry);
591                 }
592                 mutex_unlock(&entry->mutex);
593                 drm_bo_usage_deref_locked(entry);
594                 l = f_list.next;
595         }
596         atomic_add(count, &fence->usage);
597         DRM_DEBUG("Fenced %d buffers\n", count);
598       out:
599         mutex_unlock(&dev->struct_mutex);
600         *used_fence = fence;
601         return ret;
602 }
603
604 EXPORT_SYMBOL(drm_fence_buffer_objects);
605
606 /*
607  * bo->mutex locked 
608  */
609
610 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
611                         int no_wait)
612 {
613         int ret = 0;
614         drm_device_t *dev = bo->dev;
615         drm_bo_mem_reg_t evict_mem;
616
617         /*
618          * Someone might have modified the buffer before we took the buffer mutex.
619          */
620
621         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
622                 goto out;
623         if (bo->mem.mem_type != mem_type)
624                 goto out;
625
626         ret = drm_bo_wait(bo, 0, 0, no_wait);
627
628         if (ret && ret != -EAGAIN) {
629                 DRM_ERROR("Failed to expire fence before "
630                           "buffer eviction.\n");
631                 goto out;
632         }
633
634         evict_mem = bo->mem;
635         evict_mem.mm_node = NULL;
636
637         if (bo->type == drm_bo_type_fake) {
638                 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
639                 bo->mem.mm_node = NULL;
640                 goto out1;
641         }
642
643         evict_mem = bo->mem;
644         evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type);
645         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
646
647         if (ret) {
648                 if (ret != -EAGAIN)
649                         DRM_ERROR("Failed to find memory space for "
650                                   "buffer 0x%p eviction.\n", bo);
651                 goto out;
652         }
653
654         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
655
656         if (ret) {
657                 if (ret != -EAGAIN)
658                         DRM_ERROR("Buffer eviction failed\n");
659                 goto out;
660         }
661
662       out1:
663         mutex_lock(&dev->struct_mutex);
664         if (evict_mem.mm_node) {
665                 if (evict_mem.mm_node != bo->pinned_node)
666                         drm_mm_put_block(evict_mem.mm_node);
667                 evict_mem.mm_node = NULL;
668         }
669         list_del(&bo->lru);
670         drm_bo_add_to_lru(bo);
671         mutex_unlock(&dev->struct_mutex);
672
673         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
674                         _DRM_BO_FLAG_EVICTED);
675
676       out:
677         return ret;
678 }
679
680 static int drm_bo_mem_force_space(drm_device_t * dev,
681                                   drm_bo_mem_reg_t * mem,
682                                   uint32_t mem_type, int no_wait)
683 {
684         drm_mm_node_t *node;
685         drm_buffer_manager_t *bm = &dev->bm;
686         drm_buffer_object_t *entry;
687         drm_mem_type_manager_t *man = &bm->man[mem_type];
688         struct list_head *lru;
689         unsigned long num_pages = mem->num_pages;
690         int ret;
691
692         mutex_lock(&dev->struct_mutex);
693         do {
694                 node = drm_mm_search_free(&man->manager, num_pages,
695                                           mem->page_alignment, 1);
696                 if (node)
697                         break;
698
699                 lru = &man->lru;
700                 if (lru->next == lru)
701                         break;
702
703                 entry = list_entry(lru->next, drm_buffer_object_t, lru);
704                 atomic_inc(&entry->usage);
705                 mutex_unlock(&dev->struct_mutex);
706                 mutex_lock(&entry->mutex);
707                 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
708
709                 ret = drm_bo_evict(entry, mem_type, no_wait);
710                 mutex_unlock(&entry->mutex);
711                 drm_bo_usage_deref_unlocked(entry);
712                 if (ret)
713                         return ret;
714                 mutex_lock(&dev->struct_mutex);
715         } while (1);
716
717         if (!node) {
718                 mutex_unlock(&dev->struct_mutex);
719                 return -ENOMEM;
720         }
721
722         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
723         mutex_unlock(&dev->struct_mutex);
724         mem->mm_node = node;
725         mem->mem_type = mem_type;
726         return 0;
727 }
728
729 static int drm_bo_mt_compatible(drm_mem_type_manager_t * man,
730                                 uint32_t mem_type,
731                                 uint32_t mask, uint32_t * res_mask)
732 {
733         uint32_t cur_flags = drm_bo_type_flags(mem_type);
734         uint32_t flag_diff;
735
736         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
737                 cur_flags |= DRM_BO_FLAG_CACHED;
738         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
739                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
740         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
741                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
742
743         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
744                 return 0;
745
746         if (mem_type == DRM_BO_MEM_LOCAL) {
747                 *res_mask = cur_flags;
748                 return 1;
749         }
750
751         flag_diff = (mask ^ cur_flags);
752         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
753             (!(mask & DRM_BO_FLAG_CACHED) ||
754              (mask & DRM_BO_FLAG_FORCE_CACHING)))
755                 return 0;
756
757         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
758             ((mask & DRM_BO_FLAG_MAPPABLE) ||
759              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
760                 return 0;
761
762         *res_mask = cur_flags;
763         return 1;
764 }
765
766 int drm_bo_mem_space(drm_buffer_object_t * bo,
767                      drm_bo_mem_reg_t * mem, int no_wait)
768 {
769         drm_device_t *dev = bo->dev;
770         drm_buffer_manager_t *bm = &dev->bm;
771         drm_mem_type_manager_t *man;
772
773         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
774         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
775         uint32_t i;
776         uint32_t mem_type = DRM_BO_MEM_LOCAL;
777         uint32_t cur_flags;
778         int type_found = 0;
779         int type_ok = 0;
780         int has_eagain = 0;
781         drm_mm_node_t *node = NULL;
782         int ret;
783
784         mem->mm_node = NULL;
785         for (i = 0; i < num_prios; ++i) {
786                 mem_type = prios[i];
787                 man = &bm->man[mem_type];
788
789                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
790                                                &cur_flags);
791
792                 if (!type_ok)
793                         continue;
794
795                 if (mem_type == DRM_BO_MEM_LOCAL)
796                         break;
797
798                 if ((mem_type == bo->pinned_mem_type) &&
799                     (bo->pinned_node != NULL)) {
800                         node = bo->pinned_node;
801                         break;
802                 }
803
804                 mutex_lock(&dev->struct_mutex);
805                 if (man->has_type && man->use_type) {
806                         type_found = 1;
807                         node = drm_mm_search_free(&man->manager, mem->num_pages,
808                                                   mem->page_alignment, 1);
809                         if (node)
810                                 node = drm_mm_get_block(node, mem->num_pages,
811                                                         mem->page_alignment);
812                 }
813                 mutex_unlock(&dev->struct_mutex);
814                 if (node)
815                         break;
816         }
817
818         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
819                 mem->mm_node = node;
820                 mem->mem_type = mem_type;
821                 mem->flags = cur_flags;
822                 return 0;
823         }
824
825         if (!type_found)
826                 return -EINVAL;
827
828         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
829         prios = dev->driver->bo_driver->mem_busy_prio;
830
831         for (i = 0; i < num_prios; ++i) {
832                 mem_type = prios[i];
833                 man = &bm->man[mem_type];
834
835                 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
836                         continue;
837
838                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
839
840                 if (ret == 0) {
841                         mem->flags = cur_flags;
842                         return 0;
843                 }
844
845                 if (ret == -EAGAIN)
846                         has_eagain = 1;
847         }
848
849         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
850         return ret;
851 }
852
853 EXPORT_SYMBOL(drm_bo_mem_space);
854
855 static int drm_bo_new_mask(drm_buffer_object_t * bo,
856                            uint32_t new_mask, uint32_t hint)
857 {
858         uint32_t new_props;
859
860         if (bo->type == drm_bo_type_user) {
861                 DRM_ERROR("User buffers are not supported yet\n");
862                 return -EINVAL;
863         }
864         if (bo->type == drm_bo_type_fake &&
865             !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
866                 DRM_ERROR("Fake buffers must be pinned.\n");
867                 return -EINVAL;
868         }
869
870         if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
871                 DRM_ERROR
872                     ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
873                      "processes\n");
874                 return -EPERM;
875         }
876
877         new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
878                                 DRM_BO_FLAG_READ);
879
880         if (!new_props) {
881                 DRM_ERROR("Invalid buffer object rwx properties\n");
882                 return -EINVAL;
883         }
884
885         bo->mem.mask = new_mask;
886         return 0;
887 }
888
889 /*
890  * Call dev->struct_mutex locked.
891  */
892
893 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
894                                               uint32_t handle, int check_owner)
895 {
896         drm_user_object_t *uo;
897         drm_buffer_object_t *bo;
898
899         uo = drm_lookup_user_object(priv, handle);
900
901         if (!uo || (uo->type != drm_buffer_type)) {
902                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
903                 return NULL;
904         }
905
906         if (check_owner && priv != uo->owner) {
907                 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
908                         return NULL;
909         }
910
911         bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
912         atomic_inc(&bo->usage);
913         return bo;
914 }
915
916 /*
917  * Call bo->mutex locked.
918  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
919  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
920  */
921
922 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
923 {
924         drm_fence_object_t *fence = bo->fence;
925
926         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
927         if (fence) {
928                 drm_device_t *dev = bo->dev;
929                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
930                         drm_fence_usage_deref_unlocked(dev, fence);
931                         bo->fence = NULL;
932                         return 0;
933                 }
934                 return 1;
935         }
936         return 0;
937 }
938
939 /*
940  * Call bo->mutex locked.
941  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
942  */
943
944 static int drm_bo_busy(drm_buffer_object_t * bo)
945 {
946         drm_fence_object_t *fence = bo->fence;
947
948         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
949         if (fence) {
950                 drm_device_t *dev = bo->dev;
951                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
952                         drm_fence_usage_deref_unlocked(dev, fence);
953                         bo->fence = NULL;
954                         return 0;
955                 }
956                 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
957                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
958                         drm_fence_usage_deref_unlocked(dev, fence);
959                         bo->fence = NULL;
960                         return 0;
961                 }
962                 return 1;
963         }
964         return 0;
965 }
966
967 static int drm_bo_read_cached(drm_buffer_object_t * bo)
968 {
969         int ret = 0;
970
971         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
972         if (bo->mem.mm_node)
973                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
974         return ret;
975 }
976
977 /*
978  * Wait until a buffer is unmapped.
979  */
980
981 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
982 {
983         int ret = 0;
984
985         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
986                 return -EBUSY;
987
988         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
989                     atomic_read(&bo->mapped) == -1);
990
991         if (ret == -EINTR)
992                 ret = -EAGAIN;
993
994         return ret;
995 }
996
997 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
998 {
999         int ret;
1000
1001         mutex_lock(&bo->mutex);
1002         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1003         mutex_unlock(&bo->mutex);
1004         return ret;
1005 }
1006
1007 /*
1008  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1009  * Until then, we cannot really do anything with it except delete it.
1010  * The unfenced list is a PITA, and the operations
1011  * 1) validating
1012  * 2) submitting commands
1013  * 3) fencing
1014  * Should really be an atomic operation. 
1015  * We now "solve" this problem by keeping
1016  * the buffer "unfenced" after validating, but before fencing.
1017  */
1018
1019 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
1020                                 int eagain_if_wait)
1021 {
1022         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1023         unsigned long _end = jiffies + 3 * DRM_HZ;
1024
1025         if (ret && no_wait)
1026                 return -EBUSY;
1027         else if (!ret)
1028                 return 0;
1029
1030         do {
1031                 mutex_unlock(&bo->mutex);
1032                 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1033                             !drm_bo_check_unfenced(bo));
1034                 mutex_lock(&bo->mutex);
1035                 if (ret == -EINTR)
1036                         return -EAGAIN;
1037                 if (ret) {
1038                         DRM_ERROR
1039                             ("Error waiting for buffer to become fenced\n");
1040                         return ret;
1041                 }
1042                 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1043         } while (ret && !time_after_eq(jiffies, _end));
1044         if (ret) {
1045                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1046                 return ret;
1047         }
1048         if (eagain_if_wait)
1049                 return -EAGAIN;
1050
1051         return 0;
1052 }
1053
1054 /*
1055  * Fill in the ioctl reply argument with buffer info.
1056  * Bo locked. 
1057  */
1058
1059 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
1060                                 drm_bo_arg_reply_t * rep)
1061 {
1062         rep->handle = bo->base.hash.key;
1063         rep->flags = bo->mem.flags;
1064         rep->size = bo->mem.num_pages * PAGE_SIZE;
1065         rep->offset = bo->offset;
1066         rep->arg_handle = bo->map_list.user_token;
1067         rep->mask = bo->mem.mask;
1068         rep->buffer_start = bo->buffer_start;
1069         rep->fence_flags = bo->fence_type;
1070         rep->rep_flags = 0;
1071         rep->page_alignment = bo->mem.page_alignment;
1072
1073         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1074                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1075                                 DRM_BO_REP_BUSY);
1076         }
1077 }
1078
1079 /*
1080  * Wait for buffer idle and register that we've mapped the buffer.
1081  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1, 
1082  * so that if the client dies, the mapping is automatically 
1083  * unregistered.
1084  */
1085
1086 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1087                                  uint32_t map_flags, unsigned hint,
1088                                  drm_bo_arg_reply_t * rep)
1089 {
1090         drm_buffer_object_t *bo;
1091         drm_device_t *dev = priv->head->dev;
1092         int ret = 0;
1093         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1094
1095         mutex_lock(&dev->struct_mutex);
1096         bo = drm_lookup_buffer_object(priv, handle, 1);
1097         mutex_unlock(&dev->struct_mutex);
1098
1099         if (!bo)
1100                 return -EINVAL;
1101
1102         mutex_lock(&bo->mutex);
1103         if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1104                 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1105                 if (ret)
1106                         goto out;
1107         }
1108
1109         /*
1110          * If this returns true, we are currently unmapped.
1111          * We need to do this test, because unmapping can
1112          * be done without the bo->mutex held.
1113          */
1114
1115         while (1) {
1116                 if (atomic_inc_and_test(&bo->mapped)) {
1117                         if (no_wait && drm_bo_busy(bo)) {
1118                                 atomic_dec(&bo->mapped);
1119                                 ret = -EBUSY;
1120                                 goto out;
1121                         }
1122                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1123                         if (ret) {
1124                                 atomic_dec(&bo->mapped);
1125                                 goto out;
1126                         }
1127
1128                         if ((map_flags & DRM_BO_FLAG_READ) &&
1129                             (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1130                             (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1131                                 drm_bo_read_cached(bo);
1132                         }
1133                         break;
1134                 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1135                            (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1136                            (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1137
1138                         /*
1139                          * We are already mapped with different flags.
1140                          * need to wait for unmap.
1141                          */
1142
1143                         ret = drm_bo_wait_unmapped(bo, no_wait);
1144                         if (ret)
1145                                 goto out;
1146
1147                         continue;
1148                 }
1149                 break;
1150         }
1151
1152         mutex_lock(&dev->struct_mutex);
1153         ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1154         mutex_unlock(&dev->struct_mutex);
1155         if (ret) {
1156                 if (atomic_add_negative(-1, &bo->mapped))
1157                         DRM_WAKEUP(&bo->event_queue);
1158
1159         } else
1160                 drm_bo_fill_rep_arg(bo, rep);
1161       out:
1162         mutex_unlock(&bo->mutex);
1163         drm_bo_usage_deref_unlocked(bo);
1164         return ret;
1165 }
1166
1167 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1168 {
1169         drm_device_t *dev = priv->head->dev;
1170         drm_buffer_object_t *bo;
1171         drm_ref_object_t *ro;
1172         int ret = 0;
1173
1174         mutex_lock(&dev->struct_mutex);
1175
1176         bo = drm_lookup_buffer_object(priv, handle, 1);
1177         if (!bo) {
1178                 ret = -EINVAL;
1179                 goto out;
1180         }
1181
1182         ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1183         if (!ro) {
1184                 ret = -EINVAL;
1185                 goto out;
1186         }
1187
1188         drm_remove_ref_object(priv, ro);
1189         drm_bo_usage_deref_locked(bo);
1190       out:
1191         mutex_unlock(&dev->struct_mutex);
1192         return ret;
1193 }
1194
1195 /*
1196  * Call struct-sem locked.
1197  */
1198
1199 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1200                                          drm_user_object_t * uo,
1201                                          drm_ref_t action)
1202 {
1203         drm_buffer_object_t *bo =
1204             drm_user_object_entry(uo, drm_buffer_object_t, base);
1205
1206         /*
1207          * We DON'T want to take the bo->lock here, because we want to
1208          * hold it when we wait for unmapped buffer.
1209          */
1210
1211         BUG_ON(action != _DRM_REF_TYPE1);
1212
1213         if (atomic_add_negative(-1, &bo->mapped))
1214                 DRM_WAKEUP(&bo->event_queue);
1215 }
1216
1217 /*
1218  * bo->mutex locked. 
1219  * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1220  */
1221
1222 int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1223                        int no_wait, int move_unfenced)
1224 {
1225         drm_device_t *dev = bo->dev;
1226         drm_buffer_manager_t *bm = &dev->bm;
1227         int ret = 0;
1228         drm_bo_mem_reg_t mem;
1229         /*
1230          * Flush outstanding fences.
1231          */
1232
1233         drm_bo_busy(bo);
1234
1235         /*
1236          * Wait for outstanding fences.
1237          */
1238
1239         ret = drm_bo_wait(bo, 0, 0, no_wait);
1240         if (ret)
1241                 return ret;
1242
1243         mem.num_pages = bo->mem.num_pages;
1244         mem.size = mem.num_pages << PAGE_SHIFT;
1245         mem.mask = new_mem_flags;
1246         mem.page_alignment = bo->mem.page_alignment;
1247
1248         mutex_lock(&bm->evict_mutex);
1249         mutex_lock(&dev->struct_mutex);
1250         list_del(&bo->lru);
1251         list_add_tail(&bo->lru, &bm->unfenced);
1252         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1253                         _DRM_BO_FLAG_UNFENCED);
1254         mutex_unlock(&dev->struct_mutex);
1255
1256         /*
1257          * Determine where to move the buffer.
1258          */
1259         ret = drm_bo_mem_space(bo, &mem, no_wait);
1260         if (ret)
1261                 goto out_unlock;
1262
1263         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1264
1265  out_unlock:
1266         if (ret || !move_unfenced) {
1267                 mutex_lock(&dev->struct_mutex);
1268                 if (mem.mm_node) {
1269                         if (mem.mm_node != bo->pinned_node)
1270                                 drm_mm_put_block(mem.mm_node);
1271                         mem.mm_node = NULL;
1272                 }
1273                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1274                 DRM_WAKEUP(&bo->event_queue);
1275                 list_del(&bo->lru);
1276                 drm_bo_add_to_lru(bo);
1277                 mutex_unlock(&dev->struct_mutex);
1278         }
1279
1280         mutex_unlock(&bm->evict_mutex);
1281         return ret;
1282 }
1283
1284 static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem)
1285 {
1286         uint32_t flag_diff = (mem->mask ^ mem->flags);
1287
1288         if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1289                 return 0;
1290         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1291             (!(mem->mask & DRM_BO_FLAG_CACHED) ||
1292              (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1293           return 0;
1294         }
1295         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1296             ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1297              (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1298                 return 0;
1299         return 1;
1300 }
1301
1302 static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
1303 {
1304         drm_buffer_manager_t *bm = &dev->bm;
1305         drm_mem_type_manager_t *man;
1306         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1307         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1308         uint32_t i;
1309         int type_ok = 0;
1310         uint32_t mem_type = 0;
1311         uint32_t cur_flags;
1312
1313         if (drm_bo_mem_compat(mem))
1314                 return 0;
1315
1316         BUG_ON(mem->mm_node);
1317
1318         for (i = 0; i < num_prios; ++i) {
1319                 mem_type = prios[i];
1320                 man = &bm->man[mem_type];
1321                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1322                                                &cur_flags);
1323                 if (type_ok)
1324                         break;
1325         }
1326
1327         if (type_ok) {
1328                 mem->mm_node = NULL;
1329                 mem->mem_type = mem_type;
1330                 mem->flags = cur_flags;
1331                 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1332                 return 0;
1333         }
1334
1335         DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask);
1336         return -EINVAL;
1337 }
1338
1339 /*
1340  * bo locked.
1341  */
1342
1343 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1344                                       int move_unfenced, int no_wait)
1345 {
1346         drm_device_t *dev = bo->dev;
1347         drm_buffer_manager_t *bm = &dev->bm;
1348         drm_bo_driver_t *driver = dev->driver->bo_driver;
1349         int ret;
1350
1351         DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask,
1352                   bo->mem.flags);
1353         ret =
1354             driver->fence_type(bo->mem.mask, &bo->fence_class, &bo->fence_type);
1355         if (ret) {
1356                 DRM_ERROR("Driver did not support given buffer permissions\n");
1357                 return ret;
1358         }
1359
1360         ret = drm_bo_wait_unmapped(bo, no_wait);
1361         if (ret)
1362                 return ret;
1363
1364         if (bo->type == drm_bo_type_fake) {
1365                 ret = drm_bo_check_fake(dev, &bo->mem);
1366                 if (ret)
1367                         return ret;
1368         }
1369
1370         /*
1371          * Check whether we need to move buffer.
1372          */
1373
1374         if (!drm_bo_mem_compat(&bo->mem)) {
1375                 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1376                                          move_unfenced);
1377                 if (ret) {
1378                         if (ret != -EAGAIN)
1379                                 DRM_ERROR("Failed moving buffer.\n");
1380                         return ret;
1381                 }
1382         }
1383
1384         /*
1385          * Pinned buffers.
1386          */
1387
1388         if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1389                 bo->pinned_mem_type = bo->mem.mem_type;
1390                 mutex_lock(&dev->struct_mutex);
1391                 list_del_init(&bo->pinned_lru);
1392                 drm_bo_add_to_pinned_lru(bo);
1393
1394                 if (bo->pinned_node != bo->mem.mm_node) {
1395                         if (bo->pinned_node != NULL)
1396                                 drm_mm_put_block(bo->pinned_node);
1397                         bo->pinned_node = bo->mem.mm_node;
1398                 }
1399
1400                 mutex_unlock(&dev->struct_mutex);
1401
1402         } else if (bo->pinned_node != NULL) {
1403
1404                 mutex_lock(&dev->struct_mutex);
1405                 drm_mm_put_block(bo->pinned_node);
1406                 list_del_init(&bo->pinned_lru);
1407                 bo->pinned_node = NULL;
1408                 mutex_unlock(&dev->struct_mutex);
1409
1410         }
1411
1412         /*
1413          * We might need to add a TTM.
1414          */
1415
1416         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1417                 ret = drm_bo_add_ttm(bo);
1418                 if (ret)
1419                         return ret;
1420         }
1421         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1422
1423         /*
1424          * Finally, adjust lru to be sure.
1425          */
1426
1427         mutex_lock(&dev->struct_mutex);
1428         list_del(&bo->lru);
1429         if (move_unfenced) {
1430                 list_add_tail(&bo->lru, &bm->unfenced);
1431                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1432                                 _DRM_BO_FLAG_UNFENCED);
1433         } else {
1434                 drm_bo_add_to_lru(bo);
1435                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1436                         DRM_WAKEUP(&bo->event_queue);
1437                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1438                                         _DRM_BO_FLAG_UNFENCED);
1439                 }
1440         }
1441         mutex_unlock(&dev->struct_mutex);
1442
1443         return 0;
1444 }
1445
1446 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1447                                   uint32_t flags, uint32_t mask, uint32_t hint,
1448                                   drm_bo_arg_reply_t * rep)
1449 {
1450         drm_buffer_object_t *bo;
1451         int ret;
1452         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1453
1454         bo = drm_lookup_buffer_object(priv, handle, 1);
1455         if (!bo) {
1456                 return -EINVAL;
1457         }
1458
1459         mutex_lock(&bo->mutex);
1460         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1461
1462         if (ret)
1463                 goto out;
1464
1465         DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1466         ret = drm_bo_new_mask(bo, flags, hint);
1467         if (ret)
1468                 goto out;
1469
1470         ret =
1471             drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE),
1472                                        no_wait);
1473         drm_bo_fill_rep_arg(bo, rep);
1474
1475       out:
1476
1477         mutex_unlock(&bo->mutex);
1478
1479         drm_bo_usage_deref_unlocked(bo);
1480         return ret;
1481 }
1482
1483 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1484                               drm_bo_arg_reply_t * rep)
1485 {
1486         drm_buffer_object_t *bo;
1487
1488         bo = drm_lookup_buffer_object(priv, handle, 1);
1489         if (!bo) {
1490                 return -EINVAL;
1491         }
1492         mutex_lock(&bo->mutex);
1493         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1494                 (void)drm_bo_busy(bo);
1495         drm_bo_fill_rep_arg(bo, rep);
1496         mutex_unlock(&bo->mutex);
1497         drm_bo_usage_deref_unlocked(bo);
1498         return 0;
1499 }
1500
1501 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1502                               uint32_t hint, drm_bo_arg_reply_t * rep)
1503 {
1504         drm_buffer_object_t *bo;
1505         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1506         int ret;
1507
1508         bo = drm_lookup_buffer_object(priv, handle, 1);
1509         if (!bo) {
1510                 return -EINVAL;
1511         }
1512
1513         mutex_lock(&bo->mutex);
1514         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1515         if (ret)
1516                 goto out;
1517         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1518         if (ret)
1519                 goto out;
1520
1521         drm_bo_fill_rep_arg(bo, rep);
1522
1523       out:
1524         mutex_unlock(&bo->mutex);
1525         drm_bo_usage_deref_unlocked(bo);
1526         return ret;
1527 }
1528
1529 int drm_buffer_object_create(drm_file_t * priv,
1530                              unsigned long size,
1531                              drm_bo_type_t type,
1532                              uint32_t mask,
1533                              uint32_t hint,
1534                              uint32_t page_alignment,
1535                              unsigned long buffer_start,
1536                              drm_buffer_object_t ** buf_obj)
1537 {
1538         drm_device_t *dev = priv->head->dev;
1539         drm_buffer_manager_t *bm = &dev->bm;
1540         drm_buffer_object_t *bo;
1541         int ret = 0;
1542         unsigned long num_pages;
1543
1544         if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1545                 DRM_ERROR("Invalid buffer object start.\n");
1546                 return -EINVAL;
1547         }
1548         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1549         if (num_pages == 0) {
1550                 DRM_ERROR("Illegal buffer object size.\n");
1551                 return -EINVAL;
1552         }
1553
1554         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1555
1556         if (!bo)
1557                 return -ENOMEM;
1558
1559         mutex_init(&bo->mutex);
1560         mutex_lock(&bo->mutex);
1561
1562         atomic_set(&bo->usage, 1);
1563         atomic_set(&bo->mapped, -1);
1564         DRM_INIT_WAITQUEUE(&bo->event_queue);
1565         INIT_LIST_HEAD(&bo->lru);
1566         INIT_LIST_HEAD(&bo->pinned_lru);
1567         INIT_LIST_HEAD(&bo->ddestroy);
1568 #ifdef DRM_ODD_MM_COMPAT
1569         INIT_LIST_HEAD(&bo->p_mm_list);
1570         INIT_LIST_HEAD(&bo->vma_list);
1571 #endif
1572         bo->dev = dev;
1573         bo->type = type;
1574         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1575         bo->mem.num_pages = num_pages;
1576         bo->mem.mm_node = NULL;
1577         bo->mem.page_alignment = page_alignment;
1578         if (bo->type == drm_bo_type_fake) {
1579                 bo->offset = buffer_start;
1580                 bo->buffer_start = 0;
1581         } else {
1582                 bo->buffer_start = buffer_start;
1583         }
1584         bo->priv_flags = 0;
1585         bo->mem.flags = 0;
1586         bo->mem.mask = 0;
1587         atomic_inc(&bm->count);
1588         ret = drm_bo_new_mask(bo, mask, hint);
1589
1590         if (ret)
1591                 goto out_err;
1592
1593         if (bo->type == drm_bo_type_dc) {
1594                 mutex_lock(&dev->struct_mutex);
1595                 ret = drm_bo_setup_vm_locked(bo);
1596                 mutex_unlock(&dev->struct_mutex);
1597                 if (ret)
1598                         goto out_err;
1599         }
1600         ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1601         if (ret)
1602                 goto out_err;
1603
1604         mutex_unlock(&bo->mutex);
1605         *buf_obj = bo;
1606         return 0;
1607
1608       out_err:
1609         mutex_unlock(&bo->mutex);
1610
1611         drm_bo_usage_deref_unlocked(bo);
1612         return ret;
1613 }
1614
1615 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1616                                   int shareable)
1617 {
1618         drm_device_t *dev = priv->head->dev;
1619         int ret;
1620
1621         mutex_lock(&dev->struct_mutex);
1622         ret = drm_add_user_object(priv, &bo->base, shareable);
1623         if (ret)
1624                 goto out;
1625
1626         bo->base.remove = drm_bo_base_deref_locked;
1627         bo->base.type = drm_buffer_type;
1628         bo->base.ref_struct_locked = NULL;
1629         bo->base.unref = drm_buffer_user_object_unmap;
1630
1631       out:
1632         mutex_unlock(&dev->struct_mutex);
1633         return ret;
1634 }
1635
1636 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1637 {
1638         LOCK_TEST_WITH_RETURN(dev, filp);
1639         return 0;
1640 }
1641
1642 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1643 {
1644         DRM_DEVICE;
1645         drm_bo_arg_t arg;
1646         drm_bo_arg_request_t *req = &arg.d.req;
1647         drm_bo_arg_reply_t rep;
1648         unsigned long next;
1649         drm_user_object_t *uo;
1650         drm_buffer_object_t *entry;
1651
1652         if (!dev->bm.initialized) {
1653                 DRM_ERROR("Buffer object manager is not initialized.\n");
1654                 return -EINVAL;
1655         }
1656
1657         do {
1658                 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1659
1660                 if (arg.handled) {
1661                         data = arg.next;
1662                         continue;
1663                 }
1664
1665                 rep.ret = 0;
1666                 switch (req->op) {
1667                 case drm_bo_create:
1668                         rep.ret =
1669                             drm_buffer_object_create(priv, req->size,
1670                                                      req->type,
1671                                                      req->mask,
1672                                                      req->hint,
1673                                                      req->page_alignment,
1674                                                      req->buffer_start, &entry);
1675                         if (rep.ret)
1676                                 break;
1677
1678                         rep.ret =
1679                             drm_bo_add_user_object(priv, entry,
1680                                                    req->
1681                                                    mask &
1682                                                    DRM_BO_FLAG_SHAREABLE);
1683                         if (rep.ret)
1684                                 drm_bo_usage_deref_unlocked(entry);
1685
1686                         if (rep.ret)
1687                                 break;
1688
1689                         mutex_lock(&entry->mutex);
1690                         drm_bo_fill_rep_arg(entry, &rep);
1691                         mutex_unlock(&entry->mutex);
1692                         break;
1693                 case drm_bo_unmap:
1694                         rep.ret = drm_buffer_object_unmap(priv, req->handle);
1695                         break;
1696                 case drm_bo_map:
1697                         rep.ret = drm_buffer_object_map(priv, req->handle,
1698                                                         req->mask,
1699                                                         req->hint, &rep);
1700                         break;
1701                 case drm_bo_destroy:
1702                         mutex_lock(&dev->struct_mutex);
1703                         uo = drm_lookup_user_object(priv, req->handle);
1704                         if (!uo || (uo->type != drm_buffer_type)
1705                             || uo->owner != priv) {
1706                                 mutex_unlock(&dev->struct_mutex);
1707                                 rep.ret = -EINVAL;
1708                                 break;
1709                         }
1710                         rep.ret = drm_remove_user_object(priv, uo);
1711                         mutex_unlock(&dev->struct_mutex);
1712                         break;
1713                 case drm_bo_reference:
1714                         rep.ret = drm_user_object_ref(priv, req->handle,
1715                                                       drm_buffer_type, &uo);
1716                         if (rep.ret)
1717                                 break;
1718                         mutex_lock(&dev->struct_mutex);
1719                         uo = drm_lookup_user_object(priv, req->handle);
1720                         entry =
1721                             drm_user_object_entry(uo, drm_buffer_object_t,
1722                                                   base);
1723                         atomic_dec(&entry->usage);
1724                         mutex_unlock(&dev->struct_mutex);
1725                         mutex_lock(&entry->mutex);
1726                         drm_bo_fill_rep_arg(entry, &rep);
1727                         mutex_unlock(&entry->mutex);
1728                         break;
1729                 case drm_bo_unreference:
1730                         rep.ret = drm_user_object_unref(priv, req->handle,
1731                                                         drm_buffer_type);
1732                         break;
1733                 case drm_bo_validate:
1734                         rep.ret = drm_bo_lock_test(dev, filp);
1735
1736                         if (rep.ret)
1737                                 break;
1738                         rep.ret =
1739                             drm_bo_handle_validate(priv, req->handle, req->mask,
1740                                                    req->arg_handle, req->hint,
1741                                                    &rep);
1742                         break;
1743                 case drm_bo_fence:
1744                         rep.ret = drm_bo_lock_test(dev, filp);
1745                         if (rep.ret)
1746                                 break;
1747                          /**/ break;
1748                 case drm_bo_info:
1749                         rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1750                         break;
1751                 case drm_bo_wait_idle:
1752                         rep.ret = drm_bo_handle_wait(priv, req->handle,
1753                                                      req->hint, &rep);
1754                         break;
1755                 case drm_bo_ref_fence:
1756                         rep.ret = -EINVAL;
1757                         DRM_ERROR("Function is not implemented yet.\n");
1758                 default:
1759                         rep.ret = -EINVAL;
1760                 }
1761                 next = arg.next;
1762
1763                 /*
1764                  * A signal interrupted us. Make sure the ioctl is restartable.
1765                  */
1766
1767                 if (rep.ret == -EAGAIN)
1768                         return -EAGAIN;
1769
1770                 arg.handled = 1;
1771                 arg.d.rep = rep;
1772                 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1773                 data = next;
1774         } while (data);
1775         return 0;
1776 }
1777
1778 /**
1779  *Clean the unfenced list and put on regular LRU.
1780  *This is part of the memory manager cleanup and should only be
1781  *called with the DRI lock held.
1782  *Call dev->struct_sem locked.
1783  */
1784
1785 static void drm_bo_clean_unfenced(drm_device_t *dev)
1786 {
1787         drm_buffer_manager_t *bm  = &dev->bm;
1788         struct list_head *head, *list;
1789         drm_buffer_object_t *entry;
1790
1791         head = &bm->unfenced;
1792
1793         list = head->next;
1794         while(list != head) {
1795                 prefetch(list->next);
1796                 entry = list_entry(list, drm_buffer_object_t, lru);
1797
1798                 atomic_inc(&entry->usage);
1799                 mutex_unlock(&dev->struct_mutex);
1800                 mutex_lock(&entry->mutex);
1801                 mutex_lock(&dev->struct_mutex);
1802
1803                 list_del(&entry->lru);
1804                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1805                 drm_bo_add_to_lru(entry);
1806                 mutex_unlock(&entry->mutex);
1807                 list = head->next;
1808         }
1809 }
1810
1811 static int drm_bo_leave_list(drm_buffer_object_t * bo,
1812                              uint32_t mem_type,
1813                              int free_pinned, int allow_errors)
1814 {
1815         drm_device_t *dev = bo->dev;
1816         int ret = 0;
1817
1818         mutex_lock(&bo->mutex);
1819
1820         ret = drm_bo_expire_fence(bo, allow_errors);
1821         if (ret)
1822                 goto out;
1823
1824         if (free_pinned) {
1825                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1826                 mutex_lock(&dev->struct_mutex);
1827                 list_del_init(&bo->pinned_lru);
1828                 if (bo->pinned_node == bo->mem.mm_node)
1829                         bo->pinned_node = NULL;
1830                 if (bo->pinned_node != NULL) {
1831                         drm_mm_put_block(bo->pinned_node);
1832                         bo->pinned_node = NULL;
1833                 }
1834                 mutex_unlock(&dev->struct_mutex);
1835         }
1836
1837         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1838                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1839                           "cleanup. Removing flag and evicting.\n");
1840                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1841                 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1842         }
1843
1844         if (bo->mem.mem_type == mem_type)
1845                 ret = drm_bo_evict(bo, mem_type, 0);
1846
1847         if (ret) {
1848                 if (allow_errors) {
1849                         goto out;
1850                 } else {
1851                         ret = 0;
1852                         DRM_ERROR("Cleanup eviction failed\n");
1853                 }
1854         }
1855
1856       out:
1857         mutex_unlock(&bo->mutex);
1858         return ret;
1859 }
1860
1861
1862 static drm_buffer_object_t *drm_bo_entry(struct list_head *list,
1863                                          int pinned_list)
1864 {
1865         if (pinned_list)
1866                 return list_entry(list, drm_buffer_object_t, pinned_lru);
1867         else
1868                 return list_entry(list, drm_buffer_object_t, lru);
1869 }
1870
1871 /*
1872  * dev->struct_mutex locked.
1873  */
1874
1875 static int drm_bo_force_list_clean(drm_device_t * dev,
1876                                    struct list_head *head,
1877                                    unsigned mem_type,
1878                                    int free_pinned,
1879                                    int allow_errors,
1880                                    int pinned_list)
1881 {
1882         struct list_head *list, *next, *prev;
1883         drm_buffer_object_t *entry, *nentry;
1884         int ret;
1885         int do_restart;
1886
1887         /*
1888          * The list traversal is a bit odd here, because an item may
1889          * disappear from the list when we release the struct_mutex or
1890          * when we decrease the usage count. Also we're not guaranteed
1891          * to drain pinned lists, so we can't always restart.
1892          */
1893
1894 restart:
1895         nentry = NULL;
1896         list_for_each_safe(list, next, head) {
1897                 prev = list->prev;
1898
1899                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
1900                 atomic_inc(&entry->usage);
1901                 if (nentry) {
1902                         atomic_dec(&nentry->usage);
1903                         nentry = NULL;
1904                 }
1905
1906                 /*
1907                  * Protect the next item from destruction, so we can check
1908                  * its list pointers later on.
1909                  */
1910
1911                 if (next != head) {
1912                         nentry = drm_bo_entry(next, pinned_list);
1913                         atomic_inc(&nentry->usage);
1914                 }
1915                 mutex_unlock(&dev->struct_mutex);
1916
1917                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
1918                                         allow_errors);
1919                 mutex_lock(&dev->struct_mutex);
1920
1921                 drm_bo_usage_deref_locked(entry);
1922                 if (ret)
1923                         return ret;
1924
1925                 /*
1926                  * Has the next item disappeared from the list?
1927                  */
1928
1929                 do_restart = ((next->prev != list) && (next->prev != prev));
1930
1931                 if (nentry != NULL && do_restart) {
1932                         drm_bo_usage_deref_locked(nentry);
1933                         nentry = NULL;
1934                 }
1935
1936                 if (do_restart)
1937                         goto restart;
1938         }
1939         return 0;
1940 }
1941
1942 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1943 {
1944         drm_buffer_manager_t *bm = &dev->bm;
1945         drm_mem_type_manager_t *man = &bm->man[mem_type];
1946         int ret = -EINVAL;
1947
1948         if (mem_type >= DRM_BO_MEM_TYPES) {
1949                 DRM_ERROR("Illegal memory type %d\n", mem_type);
1950                 return ret;
1951         }
1952
1953         if (!man->has_type) {
1954                 DRM_ERROR("Trying to take down uninitialized "
1955                           "memory manager type\n");
1956                 return ret;
1957         }
1958         man->use_type = 0;
1959         man->has_type = 0;
1960
1961         ret = 0;
1962         if (mem_type > 0) {
1963
1964                 drm_bo_clean_unfenced(dev);
1965                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
1966                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
1967
1968                 if (drm_mm_clean(&man->manager)) {
1969                         drm_mm_takedown(&man->manager);
1970                 } else {
1971                         ret = -EBUSY;
1972                 }
1973         }
1974
1975         return ret;
1976 }
1977
1978 /**
1979  *Evict all buffers of a particular mem_type, but leave memory manager
1980  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
1981  *point since we have the hardware lock.
1982  */
1983
1984 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1985 {
1986         int ret;
1987         drm_buffer_manager_t *bm = &dev->bm;
1988         drm_mem_type_manager_t *man = &bm->man[mem_type];
1989
1990         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1991                 DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
1992                 return -EINVAL;
1993         }
1994
1995         drm_bo_clean_unfenced(dev);
1996         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
1997         if (ret)
1998                 return ret;
1999         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2000
2001         return ret;
2002 }
2003
2004 static int drm_bo_init_mm(drm_device_t * dev,
2005                           unsigned type,
2006                           unsigned long p_offset, unsigned long p_size)
2007 {
2008         drm_buffer_manager_t *bm = &dev->bm;
2009         int ret = -EINVAL;
2010         drm_mem_type_manager_t *man;
2011
2012         if (type >= DRM_BO_MEM_TYPES) {
2013                 DRM_ERROR("Illegal memory type %d\n", type);
2014                 return ret;
2015         }
2016
2017         man = &bm->man[type];
2018         if (man->has_type) {
2019                 DRM_ERROR("Memory manager already initialized for type %d\n",
2020                           type);
2021                 return ret;
2022         }
2023
2024         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2025         if (ret)
2026                 return ret;
2027
2028         ret = 0;
2029         if (type != DRM_BO_MEM_LOCAL) {
2030                 if (!p_size) {
2031                         DRM_ERROR("Zero size memory manager type %d\n", type);
2032                         return ret;
2033                 }
2034                 ret = drm_mm_init(&man->manager, p_offset, p_size);
2035                 if (ret)
2036                         return ret;
2037         }
2038         man->has_type = 1;
2039         man->use_type = 1;
2040
2041         INIT_LIST_HEAD(&man->lru);
2042         INIT_LIST_HEAD(&man->pinned);
2043
2044         return 0;
2045 }
2046
2047 /*
2048  * This is called from lastclose, so we don't need to bother about
2049  * any clients still running when we set the initialized flag to zero.
2050  */
2051
2052 int drm_bo_driver_finish(drm_device_t * dev)
2053 {
2054         drm_buffer_manager_t *bm = &dev->bm;
2055         int ret = 0;
2056         unsigned i = DRM_BO_MEM_TYPES;
2057         drm_mem_type_manager_t *man;
2058
2059         mutex_lock(&dev->bm.init_mutex);
2060         mutex_lock(&dev->struct_mutex);
2061
2062         if (!bm->initialized)
2063                 goto out;
2064         bm->initialized = 0;
2065
2066         while (i--) {
2067                 man = &bm->man[i];
2068                 if (man->has_type) {
2069                         man->use_type = 0;
2070                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2071                                 ret = -EBUSY;
2072                                 DRM_ERROR("DRM memory manager type %d "
2073                                           "is not clean.\n", i);
2074                         }
2075                         man->has_type = 0;
2076                 }
2077         }
2078         mutex_unlock(&dev->struct_mutex);
2079
2080         if (!cancel_delayed_work(&bm->wq)) {
2081                 flush_scheduled_work();
2082         }
2083         mutex_lock(&dev->struct_mutex);
2084         drm_bo_delayed_delete(dev, 1);
2085         if (list_empty(&bm->ddestroy)) {
2086                 DRM_DEBUG("Delayed destroy list was clean\n");
2087         }
2088         if (list_empty(&bm->man[0].lru)) {
2089                 DRM_DEBUG("Swap list was clean\n");
2090         }
2091         if (list_empty(&bm->man[0].pinned)) {
2092                 DRM_DEBUG("NO_MOVE list was clean\n");
2093         }
2094         if (list_empty(&bm->unfenced)) {
2095                 DRM_DEBUG("Unfenced list was clean\n");
2096         }
2097       out:
2098         mutex_unlock(&dev->struct_mutex);
2099         mutex_unlock(&dev->bm.init_mutex);
2100         return ret;
2101 }
2102
2103 int drm_bo_driver_init(drm_device_t * dev)
2104 {
2105         drm_bo_driver_t *driver = dev->driver->bo_driver;
2106         drm_buffer_manager_t *bm = &dev->bm;
2107         int ret = -EINVAL;
2108
2109         mutex_lock(&dev->bm.init_mutex);
2110         mutex_lock(&dev->struct_mutex);
2111         if (!driver)
2112                 goto out_unlock;
2113
2114         /*
2115          * Initialize the system memory buffer type.
2116          * Other types need to be driver / IOCTL initialized.
2117          */
2118
2119         ret = drm_bo_init_mm(dev, 0, 0, 0);
2120         if (ret)
2121                 goto out_unlock;
2122
2123 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2124         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2125 #else
2126         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2127 #endif
2128         bm->initialized = 1;
2129         bm->nice_mode = 1;
2130         atomic_set(&bm->count, 0);
2131         bm->cur_pages = 0;
2132         INIT_LIST_HEAD(&bm->unfenced);
2133         INIT_LIST_HEAD(&bm->ddestroy);
2134       out_unlock:
2135         mutex_unlock(&dev->struct_mutex);
2136         mutex_unlock(&dev->bm.init_mutex);
2137         return ret;
2138 }
2139
2140 EXPORT_SYMBOL(drm_bo_driver_init);
2141
2142 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
2143 {
2144         DRM_DEVICE;
2145
2146         int ret = 0;
2147         drm_mm_init_arg_t arg;
2148         drm_buffer_manager_t *bm = &dev->bm;
2149         drm_bo_driver_t *driver = dev->driver->bo_driver;
2150
2151         if (!driver) {
2152                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2153                 return -EINVAL;
2154         }
2155
2156         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2157
2158         switch (arg.req.op) {
2159         case mm_init:
2160                 ret = -EINVAL;
2161                 mutex_lock(&dev->bm.init_mutex);
2162                 mutex_lock(&dev->struct_mutex);
2163                 if (!bm->initialized) {
2164                         DRM_ERROR("DRM memory manager was not initialized.\n");
2165                         break;
2166                 }
2167                 if (arg.req.mem_type == 0) {
2168                         DRM_ERROR
2169                             ("System memory buffers already initialized.\n");
2170                         break;
2171                 }
2172                 ret = drm_bo_init_mm(dev, arg.req.mem_type,
2173                                      arg.req.p_offset, arg.req.p_size);
2174                 break;
2175         case mm_takedown:
2176                 LOCK_TEST_WITH_RETURN(dev, filp);
2177                 mutex_lock(&dev->bm.init_mutex);
2178                 mutex_lock(&dev->struct_mutex);
2179                 ret = -EINVAL;
2180                 if (!bm->initialized) {
2181                         DRM_ERROR("DRM memory manager was not initialized\n");
2182                         break;
2183                 }
2184                 if (arg.req.mem_type == 0) {
2185                         DRM_ERROR("No takedown for System memory buffers.\n");
2186                         break;
2187                 }
2188                 ret = 0;
2189                 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
2190                         DRM_ERROR("Memory manager type %d not clean. "
2191                                   "Delaying takedown\n", arg.req.mem_type);
2192                 }
2193                 break;
2194         case mm_lock:
2195                 LOCK_TEST_WITH_RETURN(dev, filp);
2196                 mutex_lock(&dev->bm.init_mutex);
2197                 mutex_lock(&dev->struct_mutex);
2198                 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
2199                 break;
2200         case mm_unlock:
2201                 LOCK_TEST_WITH_RETURN(dev, filp);
2202                 mutex_lock(&dev->bm.init_mutex);
2203                 mutex_lock(&dev->struct_mutex);
2204                 ret = 0;
2205                 break;
2206         default:
2207                 DRM_ERROR("Function not implemented yet\n");
2208                 return -EINVAL;
2209         }
2210
2211         mutex_unlock(&dev->struct_mutex);
2212         mutex_unlock(&dev->bm.init_mutex);
2213         if (ret)
2214                 return ret;
2215
2216         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2217         return 0;
2218 }
2219
2220 /*
2221  * buffer object vm functions.
2222  */
2223
2224 int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem)
2225 {
2226         drm_buffer_manager_t *bm = &dev->bm;
2227         drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2228
2229         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2230                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2231                         return 0;
2232
2233                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2234                         return 0;
2235
2236                 if (mem->flags & DRM_BO_FLAG_CACHED)
2237                         return 0;
2238         }
2239         return 1;
2240 }
2241
2242 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2243
2244 /**
2245  * \c Get the PCI offset for the buffer object memory.
2246  *
2247  * \param bo The buffer object.
2248  * \param bus_base On return the base of the PCI region
2249  * \param bus_offset On return the byte offset into the PCI region
2250  * \param bus_size On return the byte size of the buffer object or zero if
2251  *     the buffer object memory is not accessible through a PCI region.
2252  * \return Failure indication.
2253  * 
2254  * Returns -EINVAL if the buffer object is currently not mappable.
2255  * Otherwise returns zero.
2256  */
2257
2258 int drm_bo_pci_offset(drm_device_t * dev,
2259                       drm_bo_mem_reg_t * mem,
2260                       unsigned long *bus_base,
2261                       unsigned long *bus_offset, unsigned long *bus_size)
2262 {
2263         drm_buffer_manager_t *bm = &dev->bm;
2264         drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2265
2266         *bus_size = 0;
2267         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2268                 return -EINVAL;
2269
2270         if (drm_mem_reg_is_pci(dev, mem)) {
2271                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2272                 *bus_size = mem->num_pages << PAGE_SHIFT;
2273                 *bus_base = man->io_offset;
2274         }
2275
2276         return 0;
2277 }
2278
2279 /**
2280  * \c Kill all user-space virtual mappings of this buffer object.
2281  *
2282  * \param bo The buffer object.
2283  *
2284  * Call bo->mutex locked.
2285  */
2286
2287 void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
2288 {
2289         drm_device_t *dev = bo->dev;
2290         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2291         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2292
2293         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2294 }
2295
2296 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
2297 {
2298         drm_map_list_t *list = &bo->map_list;
2299         drm_local_map_t *map;
2300         drm_device_t *dev = bo->dev;
2301
2302         if (list->user_token) {
2303                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2304                 list->user_token = 0;
2305         }
2306         if (list->file_offset_node) {
2307                 drm_mm_put_block(list->file_offset_node);
2308                 list->file_offset_node = NULL;
2309         }
2310
2311         map = list->map;
2312         if (!map)
2313                 return;
2314
2315         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2316         list->map = NULL;
2317         list->user_token = 0ULL;
2318         drm_bo_usage_deref_locked(bo);
2319 }
2320
2321 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
2322 {
2323         drm_map_list_t *list = &bo->map_list;
2324         drm_local_map_t *map;
2325         drm_device_t *dev = bo->dev;
2326
2327         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2328         if (!list->map)
2329                 return -ENOMEM;
2330
2331         map = list->map;
2332         map->offset = 0;
2333         map->type = _DRM_TTM;
2334         map->flags = _DRM_REMOVABLE;
2335         map->size = bo->mem.num_pages * PAGE_SIZE;
2336         atomic_inc(&bo->usage);
2337         map->handle = (void *)bo;
2338
2339         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2340                                                     bo->mem.num_pages, 0, 0);
2341
2342         if (!list->file_offset_node) {
2343                 drm_bo_takedown_vm_locked(bo);
2344                 return -ENOMEM;
2345         }
2346
2347         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2348                                                   bo->mem.num_pages, 0);
2349
2350         list->hash.key = list->file_offset_node->start;
2351         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2352                 drm_bo_takedown_vm_locked(bo);
2353                 return -ENOMEM;
2354         }
2355
2356         list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
2357
2358         return 0;
2359 }