Add a code comment.
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
40  * heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44  * both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47  * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48  * traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(drm_buffer_object_t * bo);
53 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo);
54 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo);
55 static void drm_bo_unmap_virtual(drm_buffer_object_t * bo);
56
57 static inline uint32_t drm_bo_type_flags(unsigned type)
58 {
59         return (1 << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
67 {
68         drm_mem_type_manager_t *man;
69
70         man = &bo->dev->bm.man[bo->pinned_mem_type];
71         list_add_tail(&bo->pinned_lru, &man->pinned);
72 }
73
74 void drm_bo_add_to_lru(drm_buffer_object_t * bo)
75 {
76         drm_mem_type_manager_t *man;
77
78         if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
79             || bo->mem.mem_type != bo->pinned_mem_type) {
80                 man = &bo->dev->bm.man[bo->mem.mem_type];
81                 list_add_tail(&bo->lru, &man->lru);
82         } else {
83                 INIT_LIST_HEAD(&bo->lru);
84         }
85 }
86
87 static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
88 {
89 #ifdef DRM_ODD_MM_COMPAT
90         int ret;
91
92         ret = drm_bo_lock_kmm(bo);
93         if (ret)
94                 return ret;
95         drm_bo_unmap_virtual(bo);
96         if (old_is_pci)
97                 drm_bo_finish_unmap(bo);
98 #else
99         drm_bo_unmap_virtual(bo);
100 #endif
101         return 0;
102 }
103
104 static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
105 {
106 #ifdef DRM_ODD_MM_COMPAT
107         int ret;
108
109         ret = drm_bo_remap_bound(bo);
110         if (ret) {
111                 DRM_ERROR("Failed to remap a bound buffer object.\n"
112                           "\tThis might cause a sigbus later.\n");
113         }
114         drm_bo_unlock_kmm(bo);
115 #endif
116 }
117
118 /*
119  * Call bo->mutex locked.
120  */
121
122 static int drm_bo_add_ttm(drm_buffer_object_t * bo)
123 {
124         drm_device_t *dev = bo->dev;
125         int ret = 0;
126         bo->ttm = NULL;
127
128         switch (bo->type) {
129         case drm_bo_type_dc:
130                 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
131                 if (!bo->ttm)
132                         ret = -ENOMEM;
133                 break;
134         case drm_bo_type_user:
135         case drm_bo_type_fake:
136                 break;
137         default:
138                 DRM_ERROR("Illegal buffer object type\n");
139                 ret = -EINVAL;
140                 break;
141         }
142
143         return ret;
144 }
145
146 static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
147                                   drm_bo_mem_reg_t * mem,
148                                   int evict, int no_wait)
149 {
150         drm_device_t *dev = bo->dev;
151         drm_buffer_manager_t *bm = &dev->bm;
152         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
153         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
154         drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
155         drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
156         int ret = 0;
157
158         if (old_is_pci || new_is_pci)
159                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
160         if (ret)
161                 return ret;
162
163         /*
164          * Create and bind a ttm if required.
165          */
166
167         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
168                 ret = drm_bo_add_ttm(bo);
169                 if (ret)
170                         goto out_err;
171
172                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
173                         ret = drm_bind_ttm(bo->ttm, new_man->flags &
174                                            DRM_BO_FLAG_CACHED,
175                                            mem->mm_node->start);
176                         if (ret)
177                                 goto out_err;
178                 }
179         }
180
181         if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
182
183                 drm_bo_mem_reg_t *old_mem = &bo->mem;
184                 uint32_t save_flags = old_mem->flags;
185                 uint32_t save_mask = old_mem->mask;
186
187                 *old_mem = *mem;
188                 mem->mm_node = NULL;
189                 old_mem->mask = save_mask;
190                 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
191
192         } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
193                    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
194
195                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
196
197         } else if (dev->driver->bo_driver->move) {
198                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
199
200         } else {
201
202                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
203
204         }
205
206         if (ret)
207                 goto out_err;
208
209         if (old_is_pci || new_is_pci)
210                 drm_bo_vm_post_move(bo);
211
212         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
213                 ret =
214                     dev->driver->bo_driver->invalidate_caches(dev,
215                                                               bo->mem.flags);
216                 if (ret)
217                         DRM_ERROR("Can not flush read caches\n");
218         }
219
220         DRM_FLAG_MASKED(bo->priv_flags,
221                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
222                         _DRM_BO_FLAG_EVICTED);
223
224         if (bo->mem.mm_node)
225                 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
226
227         return 0;
228
229       out_err:
230         if (old_is_pci || new_is_pci)
231                 drm_bo_vm_post_move(bo);
232
233         new_man = &bm->man[bo->mem.mem_type];
234         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
235                 drm_ttm_unbind(bo->ttm);
236                 drm_destroy_ttm(bo->ttm);
237                 bo->ttm = NULL;
238         }
239
240         return ret;
241 }
242
243 /*
244  * Call bo->mutex locked.
245  * Wait until the buffer is idle.
246  */
247
248 int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
249                 int no_wait)
250 {
251
252         drm_fence_object_t *fence = bo->fence;
253         int ret;
254
255         if (fence) {
256                 drm_device_t *dev = bo->dev;
257                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
258                         drm_fence_usage_deref_unlocked(dev, fence);
259                         bo->fence = NULL;
260                         return 0;
261                 }
262                 if (no_wait) {
263                         return -EBUSY;
264                 }
265                 ret =
266                     drm_fence_object_wait(dev, fence, lazy, ignore_signals,
267                                           bo->fence_type);
268                 if (ret)
269                         return ret;
270
271                 drm_fence_usage_deref_unlocked(dev, fence);
272                 bo->fence = NULL;
273
274         }
275         return 0;
276 }
277
278 static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
279 {
280         drm_device_t *dev = bo->dev;
281         drm_buffer_manager_t *bm = &dev->bm;
282
283         if (bo->fence) {
284                 if (bm->nice_mode) {
285                         unsigned long _end = jiffies + 3 * DRM_HZ;
286                         int ret;
287                         do {
288                                 ret = drm_bo_wait(bo, 0, 1, 0);
289                                 if (ret && allow_errors)
290                                         return ret;
291
292                         } while (ret && !time_after_eq(jiffies, _end));
293
294                         if (bo->fence) {
295                                 bm->nice_mode = 0;
296                                 DRM_ERROR("Detected GPU lockup or "
297                                           "fence driver was taken down. "
298                                           "Evicting buffer.\n");
299                         }
300                 }
301                 if (bo->fence) {
302                         drm_fence_usage_deref_unlocked(dev, bo->fence);
303                         bo->fence = NULL;
304                 }
305         }
306         return 0;
307 }
308
309 /*
310  * Call dev->struct_mutex locked.
311  * Attempts to remove all private references to a buffer by expiring its
312  * fence object and removing from lru lists and memory managers.
313  */
314
315 static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
316 {
317         drm_device_t *dev = bo->dev;
318         drm_buffer_manager_t *bm = &dev->bm;
319
320         atomic_inc(&bo->usage);
321         mutex_unlock(&dev->struct_mutex);
322         mutex_lock(&bo->mutex);
323
324         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
325
326         if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) {
327                 drm_fence_usage_deref_locked(dev, bo->fence);
328                 bo->fence = NULL;
329         }
330
331         if (bo->fence && remove_all)
332                 (void)drm_bo_expire_fence(bo, 0);
333
334         mutex_lock(&dev->struct_mutex);
335
336         if (!atomic_dec_and_test(&bo->usage)) {
337                 goto out;
338         }
339
340         if (!bo->fence) {
341                 list_del_init(&bo->lru);
342                 if (bo->mem.mm_node) {
343                         drm_mm_put_block(bo->mem.mm_node);
344                         if (bo->pinned_node == bo->mem.mm_node)
345                                 bo->pinned_node = NULL;
346                         bo->mem.mm_node = NULL;
347                 }
348                 list_del_init(&bo->pinned_lru);
349                 if (bo->pinned_node) {
350                         drm_mm_put_block(bo->pinned_node);
351                         bo->pinned_node = NULL;
352                 }
353                 list_del_init(&bo->ddestroy);
354                 mutex_unlock(&bo->mutex);
355                 drm_bo_destroy_locked(bo);
356                 return;
357         }
358
359         if (list_empty(&bo->ddestroy)) {
360                 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
361                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
362                 schedule_delayed_work(&bm->wq,
363                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
364         }
365
366       out:
367         mutex_unlock(&bo->mutex);
368         return;
369 }
370
371 /*
372  * Verify that refcount is 0 and that there are no internal references
373  * to the buffer object. Then destroy it.
374  */
375
376 static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
377 {
378         drm_device_t *dev = bo->dev;
379         drm_buffer_manager_t *bm = &dev->bm;
380
381         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
382             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
383             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
384                 if (bo->fence != NULL) {
385                         DRM_ERROR("Fence was non-zero.\n");
386                         drm_bo_cleanup_refs(bo, 0);
387                         return;
388                 }
389
390 #ifdef DRM_ODD_MM_COMPAT
391                 BUG_ON(!list_empty(&bo->vma_list));
392                 BUG_ON(!list_empty(&bo->p_mm_list));
393 #endif
394
395                 if (bo->ttm) {
396                         drm_ttm_unbind(bo->ttm);
397                         drm_destroy_ttm(bo->ttm);
398                         bo->ttm = NULL;
399                 }
400
401                 atomic_dec(&bm->count);
402
403                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
404
405                 return;
406         }
407
408         /*
409          * Some stuff is still trying to reference the buffer object.
410          * Get rid of those references.
411          */
412
413         drm_bo_cleanup_refs(bo, 0);
414
415         return;
416 }
417
418 /*
419  * Call dev->struct_mutex locked.
420  */
421
422 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
423 {
424         drm_buffer_manager_t *bm = &dev->bm;
425
426         drm_buffer_object_t *entry, *nentry;
427         struct list_head *list, *next;
428
429         list_for_each_safe(list, next, &bm->ddestroy) {
430                 entry = list_entry(list, drm_buffer_object_t, ddestroy);
431
432                 nentry = NULL;
433                 if (next != &bm->ddestroy) {
434                         nentry = list_entry(next, drm_buffer_object_t,
435                                             ddestroy);
436                         atomic_inc(&nentry->usage);
437                 }
438
439                 drm_bo_cleanup_refs(entry, remove_all);
440
441                 if (nentry) {
442                         atomic_dec(&nentry->usage);
443                 }
444         }
445 }
446
447 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
448 static void drm_bo_delayed_workqueue(void *data)
449 #else
450 static void drm_bo_delayed_workqueue(struct work_struct *work)
451 #endif
452 {
453 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
454         drm_device_t *dev = (drm_device_t *) data;
455         drm_buffer_manager_t *bm = &dev->bm;
456 #else
457         drm_buffer_manager_t *bm =
458             container_of(work, drm_buffer_manager_t, wq.work);
459         drm_device_t *dev = container_of(bm, drm_device_t, bm);
460 #endif
461
462         DRM_DEBUG("Delayed delete Worker\n");
463
464         mutex_lock(&dev->struct_mutex);
465         if (!bm->initialized) {
466                 mutex_unlock(&dev->struct_mutex);
467                 return;
468         }
469         drm_bo_delayed_delete(dev, 0);
470         if (bm->initialized && !list_empty(&bm->ddestroy)) {
471                 schedule_delayed_work(&bm->wq,
472                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
473         }
474         mutex_unlock(&dev->struct_mutex);
475 }
476
477 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
478 {
479         if (atomic_dec_and_test(&bo->usage)) {
480                 drm_bo_destroy_locked(bo);
481         }
482 }
483
484 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
485 {
486         drm_buffer_object_t *bo =
487             drm_user_object_entry(uo, drm_buffer_object_t, base);
488
489         drm_bo_takedown_vm_locked(bo);
490         drm_bo_usage_deref_locked(bo);
491 }
492
493 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
494 {
495         drm_device_t *dev = bo->dev;
496
497         if (atomic_dec_and_test(&bo->usage)) {
498                 mutex_lock(&dev->struct_mutex);
499                 if (atomic_read(&bo->usage) == 0)
500                         drm_bo_destroy_locked(bo);
501                 mutex_unlock(&dev->struct_mutex);
502         }
503 }
504
505 /*
506  * Note. The caller has to register (if applicable)
507  * and deregister fence object usage.
508  */
509
510 int drm_fence_buffer_objects(drm_file_t * priv,
511                              struct list_head *list,
512                              uint32_t fence_flags,
513                              drm_fence_object_t * fence,
514                              drm_fence_object_t ** used_fence)
515 {
516         drm_device_t *dev = priv->head->dev;
517         drm_buffer_manager_t *bm = &dev->bm;
518
519         drm_buffer_object_t *entry;
520         uint32_t fence_type = 0;
521         int count = 0;
522         int ret = 0;
523         struct list_head *l;
524         LIST_HEAD(f_list);
525
526         mutex_lock(&dev->struct_mutex);
527
528         if (!list)
529                 list = &bm->unfenced;
530
531         list_for_each_entry(entry, list, lru) {
532                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
533                 fence_type |= entry->fence_type;
534                 if (entry->fence_class != 0) {
535                         DRM_ERROR("Fence class %d is not implemented yet.\n",
536                                   entry->fence_class);
537                         ret = -EINVAL;
538                         goto out;
539                 }
540                 count++;
541         }
542
543         if (!count) {
544                 ret = -EINVAL;
545                 goto out;
546         }
547
548         /*
549          * Transfer to a local list before we release the dev->struct_mutex;
550          * This is so we don't get any new unfenced objects while fencing
551          * the ones we already have..
552          */
553
554         list_splice_init(list, &f_list);
555
556         if (fence) {
557                 if ((fence_type & fence->type) != fence_type) {
558                         DRM_ERROR("Given fence doesn't match buffers "
559                                   "on unfenced list.\n");
560                         ret = -EINVAL;
561                         goto out;
562                 }
563         } else {
564                 mutex_unlock(&dev->struct_mutex);
565                 ret = drm_fence_object_create(dev, 0, fence_type,
566                                               fence_flags | DRM_FENCE_FLAG_EMIT,
567                                               &fence);
568                 mutex_lock(&dev->struct_mutex);
569                 if (ret)
570                         goto out;
571         }
572
573         count = 0;
574         l = f_list.next;
575         while (l != &f_list) {
576                 prefetch(l->next);
577                 entry = list_entry(l, drm_buffer_object_t, lru);
578                 atomic_inc(&entry->usage);
579                 mutex_unlock(&dev->struct_mutex);
580                 mutex_lock(&entry->mutex);
581                 mutex_lock(&dev->struct_mutex);
582                 list_del_init(l);
583                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
584                         count++;
585                         if (entry->fence)
586                                 drm_fence_usage_deref_locked(dev, entry->fence);
587                         entry->fence = fence;
588                         DRM_FLAG_MASKED(entry->priv_flags, 0,
589                                         _DRM_BO_FLAG_UNFENCED);
590                         DRM_WAKEUP(&entry->event_queue);
591                         drm_bo_add_to_lru(entry);
592                 }
593                 mutex_unlock(&entry->mutex);
594                 drm_bo_usage_deref_locked(entry);
595                 l = f_list.next;
596         }
597         atomic_add(count, &fence->usage);
598         DRM_DEBUG("Fenced %d buffers\n", count);
599       out:
600         mutex_unlock(&dev->struct_mutex);
601         *used_fence = fence;
602         return ret;
603 }
604
605 EXPORT_SYMBOL(drm_fence_buffer_objects);
606
607 /*
608  * bo->mutex locked
609  */
610
611 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
612                         int no_wait)
613 {
614         int ret = 0;
615         drm_device_t *dev = bo->dev;
616         drm_bo_mem_reg_t evict_mem;
617
618         /*
619          * Someone might have modified the buffer before we took the buffer mutex.
620          */
621
622         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
623                 goto out;
624         if (bo->mem.mem_type != mem_type)
625                 goto out;
626
627         ret = drm_bo_wait(bo, 0, 0, no_wait);
628
629         if (ret && ret != -EAGAIN) {
630                 DRM_ERROR("Failed to expire fence before "
631                           "buffer eviction.\n");
632                 goto out;
633         }
634
635         evict_mem = bo->mem;
636         evict_mem.mm_node = NULL;
637
638         if (bo->type == drm_bo_type_fake) {
639                 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
640                 bo->mem.mm_node = NULL;
641                 goto out1;
642         }
643
644         evict_mem = bo->mem;
645         evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
646         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
647
648         if (ret) {
649                 if (ret != -EAGAIN)
650                         DRM_ERROR("Failed to find memory space for "
651                                   "buffer 0x%p eviction.\n", bo);
652                 goto out;
653         }
654
655         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
656
657         if (ret) {
658                 if (ret != -EAGAIN)
659                         DRM_ERROR("Buffer eviction failed\n");
660                 goto out;
661         }
662
663       out1:
664         mutex_lock(&dev->struct_mutex);
665         if (evict_mem.mm_node) {
666                 if (evict_mem.mm_node != bo->pinned_node)
667                         drm_mm_put_block(evict_mem.mm_node);
668                 evict_mem.mm_node = NULL;
669         }
670         list_del(&bo->lru);
671         drm_bo_add_to_lru(bo);
672         mutex_unlock(&dev->struct_mutex);
673
674         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
675                         _DRM_BO_FLAG_EVICTED);
676
677       out:
678         return ret;
679 }
680
681 static int drm_bo_mem_force_space(drm_device_t * dev,
682                                   drm_bo_mem_reg_t * mem,
683                                   uint32_t mem_type, int no_wait)
684 {
685         drm_mm_node_t *node;
686         drm_buffer_manager_t *bm = &dev->bm;
687         drm_buffer_object_t *entry;
688         drm_mem_type_manager_t *man = &bm->man[mem_type];
689         struct list_head *lru;
690         unsigned long num_pages = mem->num_pages;
691         int ret;
692
693         mutex_lock(&dev->struct_mutex);
694         do {
695                 node = drm_mm_search_free(&man->manager, num_pages,
696                                           mem->page_alignment, 1);
697                 if (node)
698                         break;
699
700                 lru = &man->lru;
701                 if (lru->next == lru)
702                         break;
703
704                 entry = list_entry(lru->next, drm_buffer_object_t, lru);
705                 atomic_inc(&entry->usage);
706                 mutex_unlock(&dev->struct_mutex);
707                 mutex_lock(&entry->mutex);
708                 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
709
710                 ret = drm_bo_evict(entry, mem_type, no_wait);
711                 mutex_unlock(&entry->mutex);
712                 drm_bo_usage_deref_unlocked(entry);
713                 if (ret)
714                         return ret;
715                 mutex_lock(&dev->struct_mutex);
716         } while (1);
717
718         if (!node) {
719                 mutex_unlock(&dev->struct_mutex);
720                 return -ENOMEM;
721         }
722
723         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
724         mutex_unlock(&dev->struct_mutex);
725         mem->mm_node = node;
726         mem->mem_type = mem_type;
727         return 0;
728 }
729
730 static int drm_bo_mt_compatible(drm_mem_type_manager_t * man,
731                                 uint32_t mem_type,
732                                 uint32_t mask, uint32_t * res_mask)
733 {
734         uint32_t cur_flags = drm_bo_type_flags(mem_type);
735         uint32_t flag_diff;
736
737         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
738                 cur_flags |= DRM_BO_FLAG_CACHED;
739         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
740                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
741         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
742                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
743
744         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
745                 return 0;
746
747         if (mem_type == DRM_BO_MEM_LOCAL) {
748                 *res_mask = cur_flags;
749                 return 1;
750         }
751
752         flag_diff = (mask ^ cur_flags);
753         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
754             (!(mask & DRM_BO_FLAG_CACHED) ||
755              (mask & DRM_BO_FLAG_FORCE_CACHING)))
756                 return 0;
757
758         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
759             ((mask & DRM_BO_FLAG_MAPPABLE) ||
760              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
761                 return 0;
762
763         *res_mask = cur_flags;
764         return 1;
765 }
766
767 int drm_bo_mem_space(drm_buffer_object_t * bo,
768                      drm_bo_mem_reg_t * mem, int no_wait)
769 {
770         drm_device_t *dev = bo->dev;
771         drm_buffer_manager_t *bm = &dev->bm;
772         drm_mem_type_manager_t *man;
773
774         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
775         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
776         uint32_t i;
777         uint32_t mem_type = DRM_BO_MEM_LOCAL;
778         uint32_t cur_flags;
779         int type_found = 0;
780         int type_ok = 0;
781         int has_eagain = 0;
782         drm_mm_node_t *node = NULL;
783         int ret;
784
785         mem->mm_node = NULL;
786         for (i = 0; i < num_prios; ++i) {
787                 mem_type = prios[i];
788                 man = &bm->man[mem_type];
789
790                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
791                                                &cur_flags);
792
793                 if (!type_ok)
794                         continue;
795
796                 if (mem_type == DRM_BO_MEM_LOCAL)
797                         break;
798
799                 if ((mem_type == bo->pinned_mem_type) &&
800                     (bo->pinned_node != NULL)) {
801                         node = bo->pinned_node;
802                         break;
803                 }
804
805                 mutex_lock(&dev->struct_mutex);
806                 if (man->has_type && man->use_type) {
807                         type_found = 1;
808                         node = drm_mm_search_free(&man->manager, mem->num_pages,
809                                                   mem->page_alignment, 1);
810                         if (node)
811                                 node = drm_mm_get_block(node, mem->num_pages,
812                                                         mem->page_alignment);
813                 }
814                 mutex_unlock(&dev->struct_mutex);
815                 if (node)
816                         break;
817         }
818
819         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
820                 mem->mm_node = node;
821                 mem->mem_type = mem_type;
822                 mem->flags = cur_flags;
823                 return 0;
824         }
825
826         if (!type_found)
827                 return -EINVAL;
828
829         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
830         prios = dev->driver->bo_driver->mem_busy_prio;
831
832         for (i = 0; i < num_prios; ++i) {
833                 mem_type = prios[i];
834                 man = &bm->man[mem_type];
835
836                 if (!man->has_type)
837                         continue;
838
839                 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
840                         continue;
841
842                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
843
844                 if (ret == 0) {
845                         mem->flags = cur_flags;
846                         return 0;
847                 }
848
849                 if (ret == -EAGAIN)
850                         has_eagain = 1;
851         }
852
853         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
854         return ret;
855 }
856
857 EXPORT_SYMBOL(drm_bo_mem_space);
858
859 static int drm_bo_new_mask(drm_buffer_object_t * bo,
860                            uint32_t new_mask, uint32_t hint)
861 {
862         uint32_t new_props;
863
864         if (bo->type == drm_bo_type_user) {
865                 DRM_ERROR("User buffers are not supported yet\n");
866                 return -EINVAL;
867         }
868         if (bo->type == drm_bo_type_fake &&
869             !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
870                 DRM_ERROR("Fake buffers must be pinned.\n");
871                 return -EINVAL;
872         }
873
874         if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
875                 DRM_ERROR
876                     ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
877                      "processes\n");
878                 return -EPERM;
879         }
880
881         new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
882                                 DRM_BO_FLAG_READ);
883
884         if (!new_props) {
885                 DRM_ERROR("Invalid buffer object rwx properties\n");
886                 return -EINVAL;
887         }
888
889         bo->mem.mask = new_mask;
890         return 0;
891 }
892
893 /*
894  * Call dev->struct_mutex locked.
895  */
896
897 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
898                                               uint32_t handle, int check_owner)
899 {
900         drm_user_object_t *uo;
901         drm_buffer_object_t *bo;
902
903         uo = drm_lookup_user_object(priv, handle);
904
905         if (!uo || (uo->type != drm_buffer_type)) {
906                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
907                 return NULL;
908         }
909
910         if (check_owner && priv != uo->owner) {
911                 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
912                         return NULL;
913         }
914
915         bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
916         atomic_inc(&bo->usage);
917         return bo;
918 }
919
920 /*
921  * Call bo->mutex locked.
922  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
923  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
924  */
925
926 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
927 {
928         drm_fence_object_t *fence = bo->fence;
929
930         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
931         if (fence) {
932                 drm_device_t *dev = bo->dev;
933                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
934                         drm_fence_usage_deref_unlocked(dev, fence);
935                         bo->fence = NULL;
936                         return 0;
937                 }
938                 return 1;
939         }
940         return 0;
941 }
942
943 /*
944  * Call bo->mutex locked.
945  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
946  */
947
948 static int drm_bo_busy(drm_buffer_object_t * bo)
949 {
950         drm_fence_object_t *fence = bo->fence;
951
952         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
953         if (fence) {
954                 drm_device_t *dev = bo->dev;
955                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
956                         drm_fence_usage_deref_unlocked(dev, fence);
957                         bo->fence = NULL;
958                         return 0;
959                 }
960                 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
961                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
962                         drm_fence_usage_deref_unlocked(dev, fence);
963                         bo->fence = NULL;
964                         return 0;
965                 }
966                 return 1;
967         }
968         return 0;
969 }
970
971 static int drm_bo_read_cached(drm_buffer_object_t * bo)
972 {
973         int ret = 0;
974
975         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
976         if (bo->mem.mm_node)
977                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
978         return ret;
979 }
980
981 /*
982  * Wait until a buffer is unmapped.
983  */
984
985 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
986 {
987         int ret = 0;
988
989         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
990                 return -EBUSY;
991
992         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
993                     atomic_read(&bo->mapped) == -1);
994
995         if (ret == -EINTR)
996                 ret = -EAGAIN;
997
998         return ret;
999 }
1000
1001 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
1002 {
1003         int ret;
1004
1005         mutex_lock(&bo->mutex);
1006         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1007         mutex_unlock(&bo->mutex);
1008         return ret;
1009 }
1010
1011 /*
1012  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1013  * Until then, we cannot really do anything with it except delete it.
1014  * The unfenced list is a PITA, and the operations
1015  * 1) validating
1016  * 2) submitting commands
1017  * 3) fencing
1018  * Should really be an atomic operation.
1019  * We now "solve" this problem by keeping
1020  * the buffer "unfenced" after validating, but before fencing.
1021  */
1022
1023 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
1024                                 int eagain_if_wait)
1025 {
1026         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1027
1028         if (ret && no_wait)
1029                 return -EBUSY;
1030         else if (!ret)
1031                 return 0;
1032
1033         ret = 0;
1034         mutex_unlock(&bo->mutex);
1035         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1036                     !drm_bo_check_unfenced(bo));
1037         mutex_lock(&bo->mutex);
1038         if (ret == -EINTR)
1039                 return -EAGAIN;
1040         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1041         if (ret) {
1042                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1043                 return -EBUSY;
1044         }
1045         if (eagain_if_wait)
1046                 return -EAGAIN;
1047
1048         return 0;
1049 }
1050
1051 /*
1052  * Fill in the ioctl reply argument with buffer info.
1053  * Bo locked.
1054  */
1055
1056 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
1057                                 drm_bo_arg_reply_t * rep)
1058 {
1059         rep->handle = bo->base.hash.key;
1060         rep->flags = bo->mem.flags;
1061         rep->size = bo->mem.num_pages * PAGE_SIZE;
1062         rep->offset = bo->offset;
1063         rep->arg_handle = bo->map_list.user_token;
1064         rep->mask = bo->mem.mask;
1065         rep->buffer_start = bo->buffer_start;
1066         rep->fence_flags = bo->fence_type;
1067         rep->rep_flags = 0;
1068         rep->page_alignment = bo->mem.page_alignment;
1069
1070         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1071                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1072                                 DRM_BO_REP_BUSY);
1073         }
1074 }
1075
1076 /*
1077  * Wait for buffer idle and register that we've mapped the buffer.
1078  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1079  * so that if the client dies, the mapping is automatically
1080  * unregistered.
1081  */
1082
1083 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1084                                  uint32_t map_flags, unsigned hint,
1085                                  drm_bo_arg_reply_t * rep)
1086 {
1087         drm_buffer_object_t *bo;
1088         drm_device_t *dev = priv->head->dev;
1089         int ret = 0;
1090         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1091
1092         mutex_lock(&dev->struct_mutex);
1093         bo = drm_lookup_buffer_object(priv, handle, 1);
1094         mutex_unlock(&dev->struct_mutex);
1095
1096         if (!bo)
1097                 return -EINVAL;
1098
1099         mutex_lock(&bo->mutex);
1100         if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1101                 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1102                 if (ret)
1103                         goto out;
1104         }
1105
1106         /*
1107          * If this returns true, we are currently unmapped.
1108          * We need to do this test, because unmapping can
1109          * be done without the bo->mutex held.
1110          */
1111
1112         while (1) {
1113                 if (atomic_inc_and_test(&bo->mapped)) {
1114                         if (no_wait && drm_bo_busy(bo)) {
1115                                 atomic_dec(&bo->mapped);
1116                                 ret = -EBUSY;
1117                                 goto out;
1118                         }
1119                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1120                         if (ret) {
1121                                 atomic_dec(&bo->mapped);
1122                                 goto out;
1123                         }
1124
1125                         if ((map_flags & DRM_BO_FLAG_READ) &&
1126                             (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1127                             (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1128                                 drm_bo_read_cached(bo);
1129                         }
1130                         break;
1131                 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1132                            (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1133                            (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1134
1135                         /*
1136                          * We are already mapped with different flags.
1137                          * need to wait for unmap.
1138                          */
1139
1140                         ret = drm_bo_wait_unmapped(bo, no_wait);
1141                         if (ret)
1142                                 goto out;
1143
1144                         continue;
1145                 }
1146                 break;
1147         }
1148
1149         mutex_lock(&dev->struct_mutex);
1150         ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1151         mutex_unlock(&dev->struct_mutex);
1152         if (ret) {
1153                 if (atomic_add_negative(-1, &bo->mapped))
1154                         DRM_WAKEUP(&bo->event_queue);
1155
1156         } else
1157                 drm_bo_fill_rep_arg(bo, rep);
1158       out:
1159         mutex_unlock(&bo->mutex);
1160         drm_bo_usage_deref_unlocked(bo);
1161         return ret;
1162 }
1163
1164 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1165 {
1166         drm_device_t *dev = priv->head->dev;
1167         drm_buffer_object_t *bo;
1168         drm_ref_object_t *ro;
1169         int ret = 0;
1170
1171         mutex_lock(&dev->struct_mutex);
1172
1173         bo = drm_lookup_buffer_object(priv, handle, 1);
1174         if (!bo) {
1175                 ret = -EINVAL;
1176                 goto out;
1177         }
1178
1179         ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1180         if (!ro) {
1181                 ret = -EINVAL;
1182                 goto out;
1183         }
1184
1185         drm_remove_ref_object(priv, ro);
1186         drm_bo_usage_deref_locked(bo);
1187       out:
1188         mutex_unlock(&dev->struct_mutex);
1189         return ret;
1190 }
1191
1192 /*
1193  * Call struct-sem locked.
1194  */
1195
1196 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1197                                          drm_user_object_t * uo,
1198                                          drm_ref_t action)
1199 {
1200         drm_buffer_object_t *bo =
1201             drm_user_object_entry(uo, drm_buffer_object_t, base);
1202
1203         /*
1204          * We DON'T want to take the bo->lock here, because we want to
1205          * hold it when we wait for unmapped buffer.
1206          */
1207
1208         BUG_ON(action != _DRM_REF_TYPE1);
1209
1210         if (atomic_add_negative(-1, &bo->mapped))
1211                 DRM_WAKEUP(&bo->event_queue);
1212 }
1213
1214 /*
1215  * bo->mutex locked.
1216  * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1217  */
1218
1219 int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1220                        int no_wait, int move_unfenced)
1221 {
1222         drm_device_t *dev = bo->dev;
1223         drm_buffer_manager_t *bm = &dev->bm;
1224         int ret = 0;
1225         drm_bo_mem_reg_t mem;
1226         /*
1227          * Flush outstanding fences.
1228          */
1229
1230         drm_bo_busy(bo);
1231
1232         /*
1233          * Wait for outstanding fences.
1234          */
1235
1236         ret = drm_bo_wait(bo, 0, 0, no_wait);
1237         if (ret)
1238                 return ret;
1239
1240         mem.num_pages = bo->mem.num_pages;
1241         mem.size = mem.num_pages << PAGE_SHIFT;
1242         mem.mask = new_mem_flags;
1243         mem.page_alignment = bo->mem.page_alignment;
1244
1245         mutex_lock(&bm->evict_mutex);
1246         mutex_lock(&dev->struct_mutex);
1247         list_del(&bo->lru);
1248         list_add_tail(&bo->lru, &bm->unfenced);
1249         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1250                         _DRM_BO_FLAG_UNFENCED);
1251         mutex_unlock(&dev->struct_mutex);
1252
1253         /*
1254          * Determine where to move the buffer.
1255          */
1256         ret = drm_bo_mem_space(bo, &mem, no_wait);
1257         if (ret)
1258                 goto out_unlock;
1259
1260         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1261
1262  out_unlock:
1263         if (ret || !move_unfenced) {
1264                 mutex_lock(&dev->struct_mutex);
1265                 if (mem.mm_node) {
1266                         if (mem.mm_node != bo->pinned_node)
1267                                 drm_mm_put_block(mem.mm_node);
1268                         mem.mm_node = NULL;
1269                 }
1270                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1271                 DRM_WAKEUP(&bo->event_queue);
1272                 list_del(&bo->lru);
1273                 drm_bo_add_to_lru(bo);
1274                 mutex_unlock(&dev->struct_mutex);
1275         }
1276
1277         mutex_unlock(&bm->evict_mutex);
1278         return ret;
1279 }
1280
1281 static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem)
1282 {
1283         uint32_t flag_diff = (mem->mask ^ mem->flags);
1284
1285         if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1286                 return 0;
1287         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1288             (!(mem->mask & DRM_BO_FLAG_CACHED) ||
1289              (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1290           return 0;
1291         }
1292         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1293             ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1294              (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1295                 return 0;
1296         return 1;
1297 }
1298
1299 static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
1300 {
1301         drm_buffer_manager_t *bm = &dev->bm;
1302         drm_mem_type_manager_t *man;
1303         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1304         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1305         uint32_t i;
1306         int type_ok = 0;
1307         uint32_t mem_type = 0;
1308         uint32_t cur_flags;
1309
1310         if (drm_bo_mem_compat(mem))
1311                 return 0;
1312
1313         BUG_ON(mem->mm_node);
1314
1315         for (i = 0; i < num_prios; ++i) {
1316                 mem_type = prios[i];
1317                 man = &bm->man[mem_type];
1318                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1319                                                &cur_flags);
1320                 if (type_ok)
1321                         break;
1322         }
1323
1324         if (type_ok) {
1325                 mem->mm_node = NULL;
1326                 mem->mem_type = mem_type;
1327                 mem->flags = cur_flags;
1328                 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1329                 return 0;
1330         }
1331
1332         DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask);
1333         return -EINVAL;
1334 }
1335
1336 /*
1337  * bo locked.
1338  */
1339
1340 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1341                                       int move_unfenced, int no_wait)
1342 {
1343         drm_device_t *dev = bo->dev;
1344         drm_buffer_manager_t *bm = &dev->bm;
1345         drm_bo_driver_t *driver = dev->driver->bo_driver;
1346         int ret;
1347
1348         DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask,
1349                   bo->mem.flags);
1350         ret =
1351             driver->fence_type(bo, &bo->fence_class, &bo->fence_type);
1352         if (ret) {
1353                 DRM_ERROR("Driver did not support given buffer permissions\n");
1354                 return ret;
1355         }
1356
1357         ret = drm_bo_wait_unmapped(bo, no_wait);
1358         if (ret)
1359                 return ret;
1360
1361         if (bo->type == drm_bo_type_fake) {
1362                 ret = drm_bo_check_fake(dev, &bo->mem);
1363                 if (ret)
1364                         return ret;
1365         }
1366
1367         /*
1368          * Check whether we need to move buffer.
1369          */
1370
1371         if (!drm_bo_mem_compat(&bo->mem)) {
1372                 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1373                                          move_unfenced);
1374                 if (ret) {
1375                         if (ret != -EAGAIN)
1376                                 DRM_ERROR("Failed moving buffer.\n");
1377                         return ret;
1378                 }
1379         }
1380
1381         /*
1382          * Pinned buffers.
1383          */
1384
1385         if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1386                 bo->pinned_mem_type = bo->mem.mem_type;
1387                 mutex_lock(&dev->struct_mutex);
1388                 list_del_init(&bo->pinned_lru);
1389                 drm_bo_add_to_pinned_lru(bo);
1390
1391                 if (bo->pinned_node != bo->mem.mm_node) {
1392                         if (bo->pinned_node != NULL)
1393                                 drm_mm_put_block(bo->pinned_node);
1394                         bo->pinned_node = bo->mem.mm_node;
1395                 }
1396
1397                 mutex_unlock(&dev->struct_mutex);
1398
1399         } else if (bo->pinned_node != NULL) {
1400
1401                 mutex_lock(&dev->struct_mutex);
1402                 drm_mm_put_block(bo->pinned_node);
1403                 list_del_init(&bo->pinned_lru);
1404                 bo->pinned_node = NULL;
1405                 mutex_unlock(&dev->struct_mutex);
1406
1407         }
1408
1409         /*
1410          * We might need to add a TTM.
1411          */
1412
1413         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1414                 ret = drm_bo_add_ttm(bo);
1415                 if (ret)
1416                         return ret;
1417         }
1418         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1419
1420         /*
1421          * Finally, adjust lru to be sure.
1422          */
1423
1424         mutex_lock(&dev->struct_mutex);
1425         list_del(&bo->lru);
1426         if (move_unfenced) {
1427                 list_add_tail(&bo->lru, &bm->unfenced);
1428                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1429                                 _DRM_BO_FLAG_UNFENCED);
1430         } else {
1431                 drm_bo_add_to_lru(bo);
1432                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1433                         DRM_WAKEUP(&bo->event_queue);
1434                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1435                                         _DRM_BO_FLAG_UNFENCED);
1436                 }
1437         }
1438         mutex_unlock(&dev->struct_mutex);
1439
1440         return 0;
1441 }
1442
1443 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1444                                   uint32_t flags, uint32_t mask, uint32_t hint,
1445                                   drm_bo_arg_reply_t * rep)
1446 {
1447         drm_buffer_object_t *bo;
1448         int ret;
1449         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1450
1451         bo = drm_lookup_buffer_object(priv, handle, 1);
1452         if (!bo) {
1453                 return -EINVAL;
1454         }
1455
1456         mutex_lock(&bo->mutex);
1457         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1458
1459         if (ret)
1460                 goto out;
1461
1462         DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1463         ret = drm_bo_new_mask(bo, flags, hint);
1464         if (ret)
1465                 goto out;
1466
1467         ret =
1468             drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE),
1469                                        no_wait);
1470         drm_bo_fill_rep_arg(bo, rep);
1471
1472       out:
1473
1474         mutex_unlock(&bo->mutex);
1475
1476         drm_bo_usage_deref_unlocked(bo);
1477         return ret;
1478 }
1479
1480 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1481                               drm_bo_arg_reply_t * rep)
1482 {
1483         drm_buffer_object_t *bo;
1484
1485         bo = drm_lookup_buffer_object(priv, handle, 1);
1486         if (!bo) {
1487                 return -EINVAL;
1488         }
1489         mutex_lock(&bo->mutex);
1490         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1491                 (void)drm_bo_busy(bo);
1492         drm_bo_fill_rep_arg(bo, rep);
1493         mutex_unlock(&bo->mutex);
1494         drm_bo_usage_deref_unlocked(bo);
1495         return 0;
1496 }
1497
1498 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1499                               uint32_t hint, drm_bo_arg_reply_t * rep)
1500 {
1501         drm_buffer_object_t *bo;
1502         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1503         int ret;
1504
1505         bo = drm_lookup_buffer_object(priv, handle, 1);
1506         if (!bo) {
1507                 return -EINVAL;
1508         }
1509
1510         mutex_lock(&bo->mutex);
1511         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1512         if (ret)
1513                 goto out;
1514         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1515         if (ret)
1516                 goto out;
1517
1518         drm_bo_fill_rep_arg(bo, rep);
1519
1520       out:
1521         mutex_unlock(&bo->mutex);
1522         drm_bo_usage_deref_unlocked(bo);
1523         return ret;
1524 }
1525
1526 int drm_buffer_object_create(drm_file_t * priv,
1527                              unsigned long size,
1528                              drm_bo_type_t type,
1529                              uint32_t mask,
1530                              uint32_t hint,
1531                              uint32_t page_alignment,
1532                              unsigned long buffer_start,
1533                              drm_buffer_object_t ** buf_obj)
1534 {
1535         drm_device_t *dev = priv->head->dev;
1536         drm_buffer_manager_t *bm = &dev->bm;
1537         drm_buffer_object_t *bo;
1538         int ret = 0;
1539         unsigned long num_pages;
1540
1541         if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1542                 DRM_ERROR("Invalid buffer object start.\n");
1543                 return -EINVAL;
1544         }
1545         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1546         if (num_pages == 0) {
1547                 DRM_ERROR("Illegal buffer object size.\n");
1548                 return -EINVAL;
1549         }
1550
1551         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1552
1553         if (!bo)
1554                 return -ENOMEM;
1555
1556         mutex_init(&bo->mutex);
1557         mutex_lock(&bo->mutex);
1558
1559         atomic_set(&bo->usage, 1);
1560         atomic_set(&bo->mapped, -1);
1561         DRM_INIT_WAITQUEUE(&bo->event_queue);
1562         INIT_LIST_HEAD(&bo->lru);
1563         INIT_LIST_HEAD(&bo->pinned_lru);
1564         INIT_LIST_HEAD(&bo->ddestroy);
1565 #ifdef DRM_ODD_MM_COMPAT
1566         INIT_LIST_HEAD(&bo->p_mm_list);
1567         INIT_LIST_HEAD(&bo->vma_list);
1568 #endif
1569         bo->dev = dev;
1570         bo->type = type;
1571         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1572         bo->mem.num_pages = num_pages;
1573         bo->mem.mm_node = NULL;
1574         bo->mem.page_alignment = page_alignment;
1575         if (bo->type == drm_bo_type_fake) {
1576                 bo->offset = buffer_start;
1577                 bo->buffer_start = 0;
1578         } else {
1579                 bo->buffer_start = buffer_start;
1580         }
1581         bo->priv_flags = 0;
1582         bo->mem.flags = 0;
1583         bo->mem.mask = 0;
1584         atomic_inc(&bm->count);
1585         ret = drm_bo_new_mask(bo, mask, hint);
1586
1587         if (ret)
1588                 goto out_err;
1589
1590         if (bo->type == drm_bo_type_dc) {
1591                 mutex_lock(&dev->struct_mutex);
1592                 ret = drm_bo_setup_vm_locked(bo);
1593                 mutex_unlock(&dev->struct_mutex);
1594                 if (ret)
1595                         goto out_err;
1596         }
1597         ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1598         if (ret)
1599                 goto out_err;
1600
1601         mutex_unlock(&bo->mutex);
1602         *buf_obj = bo;
1603         return 0;
1604
1605       out_err:
1606         mutex_unlock(&bo->mutex);
1607
1608         drm_bo_usage_deref_unlocked(bo);
1609         return ret;
1610 }
1611
1612 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1613                                   int shareable)
1614 {
1615         drm_device_t *dev = priv->head->dev;
1616         int ret;
1617
1618         mutex_lock(&dev->struct_mutex);
1619         ret = drm_add_user_object(priv, &bo->base, shareable);
1620         if (ret)
1621                 goto out;
1622
1623         bo->base.remove = drm_bo_base_deref_locked;
1624         bo->base.type = drm_buffer_type;
1625         bo->base.ref_struct_locked = NULL;
1626         bo->base.unref = drm_buffer_user_object_unmap;
1627
1628       out:
1629         mutex_unlock(&dev->struct_mutex);
1630         return ret;
1631 }
1632
1633 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1634 {
1635         LOCK_TEST_WITH_RETURN(dev, filp);
1636         return 0;
1637 }
1638
1639 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1640 {
1641         DRM_DEVICE;
1642         drm_bo_arg_t arg;
1643         drm_bo_arg_request_t *req = &arg.d.req;
1644         drm_bo_arg_reply_t rep;
1645         unsigned long next;
1646         drm_user_object_t *uo;
1647         drm_buffer_object_t *entry;
1648
1649         if (!dev->bm.initialized) {
1650                 DRM_ERROR("Buffer object manager is not initialized.\n");
1651                 return -EINVAL;
1652         }
1653
1654         do {
1655                 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1656
1657                 if (arg.handled) {
1658                         data = arg.next;
1659                         continue;
1660                 }
1661
1662                 rep.ret = 0;
1663                 switch (req->op) {
1664                 case drm_bo_create:
1665                         rep.ret = drm_bo_lock_test(dev, filp);
1666                         if (rep.ret)
1667                                 break;  
1668                         rep.ret =
1669                             drm_buffer_object_create(priv, req->size,
1670                                                      req->type,
1671                                                      req->mask,
1672                                                      req->hint,
1673                                                      req->page_alignment,
1674                                                      req->buffer_start, &entry);
1675                         if (rep.ret)
1676                                 break;
1677
1678                         rep.ret =
1679                             drm_bo_add_user_object(priv, entry,
1680                                                    req->
1681                                                    mask &
1682                                                    DRM_BO_FLAG_SHAREABLE);
1683                         if (rep.ret)
1684                                 drm_bo_usage_deref_unlocked(entry);
1685
1686                         if (rep.ret)
1687                                 break;
1688
1689                         mutex_lock(&entry->mutex);
1690                         drm_bo_fill_rep_arg(entry, &rep);
1691                         mutex_unlock(&entry->mutex);
1692                         break;
1693                 case drm_bo_unmap:
1694                         rep.ret = drm_buffer_object_unmap(priv, req->handle);
1695                         break;
1696                 case drm_bo_map:
1697                         rep.ret = drm_buffer_object_map(priv, req->handle,
1698                                                         req->mask,
1699                                                         req->hint, &rep);
1700                         break;
1701                 case drm_bo_destroy:
1702                         mutex_lock(&dev->struct_mutex);
1703                         uo = drm_lookup_user_object(priv, req->handle);
1704                         if (!uo || (uo->type != drm_buffer_type)
1705                             || uo->owner != priv) {
1706                                 mutex_unlock(&dev->struct_mutex);
1707                                 rep.ret = -EINVAL;
1708                                 break;
1709                         }
1710                         rep.ret = drm_remove_user_object(priv, uo);
1711                         mutex_unlock(&dev->struct_mutex);
1712                         break;
1713                 case drm_bo_reference:
1714                         rep.ret = drm_user_object_ref(priv, req->handle,
1715                                                       drm_buffer_type, &uo);
1716                         if (rep.ret)
1717                                 break;
1718
1719                         /*
1720                          * Note: The following code is only to 
1721                          * fill in the rep argument. drm_lookup_user_object ups the
1722                          * refcount which is decreased again when we're done with the bo.
1723                          */
1724
1725                         mutex_lock(&dev->struct_mutex);
1726                         uo = drm_lookup_user_object(priv, req->handle);
1727                         entry =
1728                             drm_user_object_entry(uo, drm_buffer_object_t,
1729                                                   base);
1730                         atomic_dec(&entry->usage);
1731                         mutex_unlock(&dev->struct_mutex);
1732                         mutex_lock(&entry->mutex);
1733                         drm_bo_fill_rep_arg(entry, &rep);
1734                         mutex_unlock(&entry->mutex);
1735                         break;
1736                 case drm_bo_unreference:
1737                         rep.ret = drm_user_object_unref(priv, req->handle,
1738                                                         drm_buffer_type);
1739                         break;
1740                 case drm_bo_validate:
1741                         rep.ret = drm_bo_lock_test(dev, filp);
1742
1743                         if (rep.ret)
1744                                 break;
1745                         rep.ret =
1746                             drm_bo_handle_validate(priv, req->handle, req->mask,
1747                                                    req->arg_handle, req->hint,
1748                                                    &rep);
1749                         break;
1750                 case drm_bo_fence:
1751                         rep.ret = drm_bo_lock_test(dev, filp);
1752                         if (rep.ret)
1753                                 break;
1754                          /**/ break;
1755                 case drm_bo_info:
1756                         rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1757                         break;
1758                 case drm_bo_wait_idle:
1759                         rep.ret = drm_bo_handle_wait(priv, req->handle,
1760                                                      req->hint, &rep);
1761                         break;
1762                 case drm_bo_ref_fence:
1763                         rep.ret = -EINVAL;
1764                         DRM_ERROR("Function is not implemented yet.\n");
1765                 default:
1766                         rep.ret = -EINVAL;
1767                 }
1768                 next = arg.next;
1769
1770                 /*
1771                  * A signal interrupted us. Make sure the ioctl is restartable.
1772                  */
1773
1774                 if (rep.ret == -EAGAIN)
1775                         return -EAGAIN;
1776
1777                 arg.handled = 1;
1778                 arg.d.rep = rep;
1779                 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1780                 data = next;
1781         } while (data);
1782         return 0;
1783 }
1784
1785 /**
1786  *Clean the unfenced list and put on regular LRU.
1787  *This is part of the memory manager cleanup and should only be
1788  *called with the DRI lock held.
1789  *Call dev->struct_sem locked.
1790  */
1791
1792 static void drm_bo_clean_unfenced(drm_device_t *dev)
1793 {
1794         drm_buffer_manager_t *bm  = &dev->bm;
1795         struct list_head *head, *list;
1796         drm_buffer_object_t *entry;
1797
1798         head = &bm->unfenced;
1799
1800         list = head->next;
1801         while(list != head) {
1802                 prefetch(list->next);
1803                 entry = list_entry(list, drm_buffer_object_t, lru);
1804
1805                 atomic_inc(&entry->usage);
1806                 mutex_unlock(&dev->struct_mutex);
1807                 mutex_lock(&entry->mutex);
1808                 mutex_lock(&dev->struct_mutex);
1809
1810                 list_del(&entry->lru);
1811                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1812                 drm_bo_add_to_lru(entry);
1813                 mutex_unlock(&entry->mutex);
1814                 list = head->next;
1815         }
1816 }
1817
1818 static int drm_bo_leave_list(drm_buffer_object_t * bo,
1819                              uint32_t mem_type,
1820                              int free_pinned, int allow_errors)
1821 {
1822         drm_device_t *dev = bo->dev;
1823         int ret = 0;
1824
1825         mutex_lock(&bo->mutex);
1826
1827         ret = drm_bo_expire_fence(bo, allow_errors);
1828         if (ret)
1829                 goto out;
1830
1831         if (free_pinned) {
1832                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1833                 mutex_lock(&dev->struct_mutex);
1834                 list_del_init(&bo->pinned_lru);
1835                 if (bo->pinned_node == bo->mem.mm_node)
1836                         bo->pinned_node = NULL;
1837                 if (bo->pinned_node != NULL) {
1838                         drm_mm_put_block(bo->pinned_node);
1839                         bo->pinned_node = NULL;
1840                 }
1841                 mutex_unlock(&dev->struct_mutex);
1842         }
1843
1844         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1845                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1846                           "cleanup. Removing flag and evicting.\n");
1847                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1848                 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1849         }
1850
1851         if (bo->mem.mem_type == mem_type)
1852                 ret = drm_bo_evict(bo, mem_type, 0);
1853
1854         if (ret) {
1855                 if (allow_errors) {
1856                         goto out;
1857                 } else {
1858                         ret = 0;
1859                         DRM_ERROR("Cleanup eviction failed\n");
1860                 }
1861         }
1862
1863       out:
1864         mutex_unlock(&bo->mutex);
1865         return ret;
1866 }
1867
1868
1869 static drm_buffer_object_t *drm_bo_entry(struct list_head *list,
1870                                          int pinned_list)
1871 {
1872         if (pinned_list)
1873                 return list_entry(list, drm_buffer_object_t, pinned_lru);
1874         else
1875                 return list_entry(list, drm_buffer_object_t, lru);
1876 }
1877
1878 /*
1879  * dev->struct_mutex locked.
1880  */
1881
1882 static int drm_bo_force_list_clean(drm_device_t * dev,
1883                                    struct list_head *head,
1884                                    unsigned mem_type,
1885                                    int free_pinned,
1886                                    int allow_errors,
1887                                    int pinned_list)
1888 {
1889         struct list_head *list, *next, *prev;
1890         drm_buffer_object_t *entry, *nentry;
1891         int ret;
1892         int do_restart;
1893
1894         /*
1895          * The list traversal is a bit odd here, because an item may
1896          * disappear from the list when we release the struct_mutex or
1897          * when we decrease the usage count. Also we're not guaranteed
1898          * to drain pinned lists, so we can't always restart.
1899          */
1900
1901 restart:
1902         nentry = NULL;
1903         list_for_each_safe(list, next, head) {
1904                 prev = list->prev;
1905
1906                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
1907                 atomic_inc(&entry->usage);
1908                 if (nentry) {
1909                         atomic_dec(&nentry->usage);
1910                         nentry = NULL;
1911                 }
1912
1913                 /*
1914                  * Protect the next item from destruction, so we can check
1915                  * its list pointers later on.
1916                  */
1917
1918                 if (next != head) {
1919                         nentry = drm_bo_entry(next, pinned_list);
1920                         atomic_inc(&nentry->usage);
1921                 }
1922                 mutex_unlock(&dev->struct_mutex);
1923
1924                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
1925                                         allow_errors);
1926                 mutex_lock(&dev->struct_mutex);
1927
1928                 drm_bo_usage_deref_locked(entry);
1929                 if (ret)
1930                         return ret;
1931
1932                 /*
1933                  * Has the next item disappeared from the list?
1934                  */
1935
1936                 do_restart = ((next->prev != list) && (next->prev != prev));
1937
1938                 if (nentry != NULL && do_restart) {
1939                         drm_bo_usage_deref_locked(nentry);
1940                         nentry = NULL;
1941                 }
1942
1943                 if (do_restart)
1944                         goto restart;
1945         }
1946         return 0;
1947 }
1948
1949 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1950 {
1951         drm_buffer_manager_t *bm = &dev->bm;
1952         drm_mem_type_manager_t *man = &bm->man[mem_type];
1953         int ret = -EINVAL;
1954
1955         if (mem_type >= DRM_BO_MEM_TYPES) {
1956                 DRM_ERROR("Illegal memory type %d\n", mem_type);
1957                 return ret;
1958         }
1959
1960         if (!man->has_type) {
1961                 DRM_ERROR("Trying to take down uninitialized "
1962                           "memory manager type\n");
1963                 return ret;
1964         }
1965         man->use_type = 0;
1966         man->has_type = 0;
1967
1968         ret = 0;
1969         if (mem_type > 0) {
1970
1971                 drm_bo_clean_unfenced(dev);
1972                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
1973                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
1974
1975                 if (drm_mm_clean(&man->manager)) {
1976                         drm_mm_takedown(&man->manager);
1977                 } else {
1978                         ret = -EBUSY;
1979                 }
1980         }
1981
1982         return ret;
1983 }
1984
1985 /**
1986  *Evict all buffers of a particular mem_type, but leave memory manager
1987  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
1988  *point since we have the hardware lock.
1989  */
1990
1991 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1992 {
1993         int ret;
1994         drm_buffer_manager_t *bm = &dev->bm;
1995         drm_mem_type_manager_t *man = &bm->man[mem_type];
1996
1997         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1998                 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
1999                 return -EINVAL;
2000         }
2001
2002         if (!man->has_type) {
2003                 DRM_ERROR("Memory type %u has not been initialized.\n",
2004                           mem_type);
2005                 return 0;
2006         }
2007
2008         drm_bo_clean_unfenced(dev);
2009         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2010         if (ret)
2011                 return ret;
2012         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2013
2014         return ret;
2015 }
2016
2017 int drm_bo_init_mm(drm_device_t * dev,
2018                    unsigned type,
2019                    unsigned long p_offset, unsigned long p_size)
2020 {
2021         drm_buffer_manager_t *bm = &dev->bm;
2022         int ret = -EINVAL;
2023         drm_mem_type_manager_t *man;
2024
2025         if (type >= DRM_BO_MEM_TYPES) {
2026                 DRM_ERROR("Illegal memory type %d\n", type);
2027                 return ret;
2028         }
2029
2030         man = &bm->man[type];
2031         if (man->has_type) {
2032                 DRM_ERROR("Memory manager already initialized for type %d\n",
2033                           type);
2034                 return ret;
2035         }
2036
2037         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2038         if (ret)
2039                 return ret;
2040
2041         ret = 0;
2042         if (type != DRM_BO_MEM_LOCAL) {
2043                 if (!p_size) {
2044                         DRM_ERROR("Zero size memory manager type %d\n", type);
2045                         return ret;
2046                 }
2047                 ret = drm_mm_init(&man->manager, p_offset, p_size);
2048                 if (ret)
2049                         return ret;
2050         }
2051         man->has_type = 1;
2052         man->use_type = 1;
2053
2054         INIT_LIST_HEAD(&man->lru);
2055         INIT_LIST_HEAD(&man->pinned);
2056
2057         return 0;
2058 }
2059 EXPORT_SYMBOL(drm_bo_init_mm);
2060
2061 /*
2062  * This is called from lastclose, so we don't need to bother about
2063  * any clients still running when we set the initialized flag to zero.
2064  */
2065
2066 int drm_bo_driver_finish(drm_device_t * dev)
2067 {
2068         drm_buffer_manager_t *bm = &dev->bm;
2069         int ret = 0;
2070         unsigned i = DRM_BO_MEM_TYPES;
2071         drm_mem_type_manager_t *man;
2072
2073         mutex_lock(&dev->bm.init_mutex);
2074         mutex_lock(&dev->struct_mutex);
2075
2076         if (!bm->initialized)
2077                 goto out;
2078         bm->initialized = 0;
2079
2080         while (i--) {
2081                 man = &bm->man[i];
2082                 if (man->has_type) {
2083                         man->use_type = 0;
2084                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2085                                 ret = -EBUSY;
2086                                 DRM_ERROR("DRM memory manager type %d "
2087                                           "is not clean.\n", i);
2088                         }
2089                         man->has_type = 0;
2090                 }
2091         }
2092         mutex_unlock(&dev->struct_mutex);
2093
2094         if (!cancel_delayed_work(&bm->wq)) {
2095                 flush_scheduled_work();
2096         }
2097         mutex_lock(&dev->struct_mutex);
2098         drm_bo_delayed_delete(dev, 1);
2099         if (list_empty(&bm->ddestroy)) {
2100                 DRM_DEBUG("Delayed destroy list was clean\n");
2101         }
2102         if (list_empty(&bm->man[0].lru)) {
2103                 DRM_DEBUG("Swap list was clean\n");
2104         }
2105         if (list_empty(&bm->man[0].pinned)) {
2106                 DRM_DEBUG("NO_MOVE list was clean\n");
2107         }
2108         if (list_empty(&bm->unfenced)) {
2109                 DRM_DEBUG("Unfenced list was clean\n");
2110         }
2111       out:
2112         mutex_unlock(&dev->struct_mutex);
2113         mutex_unlock(&dev->bm.init_mutex);
2114         return ret;
2115 }
2116
2117 int drm_bo_driver_init(drm_device_t * dev)
2118 {
2119         drm_bo_driver_t *driver = dev->driver->bo_driver;
2120         drm_buffer_manager_t *bm = &dev->bm;
2121         int ret = -EINVAL;
2122
2123         mutex_lock(&dev->bm.init_mutex);
2124         mutex_lock(&dev->struct_mutex);
2125         if (!driver)
2126                 goto out_unlock;
2127
2128         /*
2129          * Initialize the system memory buffer type.
2130          * Other types need to be driver / IOCTL initialized.
2131          */
2132
2133         ret = drm_bo_init_mm(dev, 0, 0, 0);
2134         if (ret)
2135                 goto out_unlock;
2136
2137 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2138         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2139 #else
2140         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2141 #endif
2142         bm->initialized = 1;
2143         bm->nice_mode = 1;
2144         atomic_set(&bm->count, 0);
2145         bm->cur_pages = 0;
2146         INIT_LIST_HEAD(&bm->unfenced);
2147         INIT_LIST_HEAD(&bm->ddestroy);
2148       out_unlock:
2149         mutex_unlock(&dev->struct_mutex);
2150         mutex_unlock(&dev->bm.init_mutex);
2151         return ret;
2152 }
2153
2154 EXPORT_SYMBOL(drm_bo_driver_init);
2155
2156 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
2157 {
2158         DRM_DEVICE;
2159
2160         int ret = 0;
2161         drm_mm_init_arg_t arg;
2162         drm_buffer_manager_t *bm = &dev->bm;
2163         drm_bo_driver_t *driver = dev->driver->bo_driver;
2164
2165         if (!driver) {
2166                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2167                 return -EINVAL;
2168         }
2169
2170         DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2171
2172         switch (arg.req.op) {
2173         case mm_init:
2174                 ret = -EINVAL;
2175                 mutex_lock(&dev->bm.init_mutex);
2176                 mutex_lock(&dev->struct_mutex);
2177                 if (!bm->initialized) {
2178                         DRM_ERROR("DRM memory manager was not initialized.\n");
2179                         break;
2180                 }
2181                 if (arg.req.mem_type == 0) {
2182                         DRM_ERROR
2183                             ("System memory buffers already initialized.\n");
2184                         break;
2185                 }
2186                 ret = drm_bo_init_mm(dev, arg.req.mem_type,
2187                                      arg.req.p_offset, arg.req.p_size);
2188                 break;
2189         case mm_takedown:
2190                 LOCK_TEST_WITH_RETURN(dev, filp);
2191                 mutex_lock(&dev->bm.init_mutex);
2192                 mutex_lock(&dev->struct_mutex);
2193                 ret = -EINVAL;
2194                 if (!bm->initialized) {
2195                         DRM_ERROR("DRM memory manager was not initialized\n");
2196                         break;
2197                 }
2198                 if (arg.req.mem_type == 0) {
2199                         DRM_ERROR("No takedown for System memory buffers.\n");
2200                         break;
2201                 }
2202                 ret = 0;
2203                 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
2204                         DRM_ERROR("Memory manager type %d not clean. "
2205                                   "Delaying takedown\n", arg.req.mem_type);
2206                 }
2207                 break;
2208         case mm_lock:
2209                 LOCK_TEST_WITH_RETURN(dev, filp);
2210                 mutex_lock(&dev->bm.init_mutex);
2211                 mutex_lock(&dev->struct_mutex);
2212                 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
2213                 break;
2214         case mm_unlock:
2215                 LOCK_TEST_WITH_RETURN(dev, filp);
2216                 mutex_lock(&dev->bm.init_mutex);
2217                 mutex_lock(&dev->struct_mutex);
2218                 ret = 0;
2219                 break;
2220         default:
2221                 DRM_ERROR("Function not implemented yet\n");
2222                 return -EINVAL;
2223         }
2224
2225         mutex_unlock(&dev->struct_mutex);
2226         mutex_unlock(&dev->bm.init_mutex);
2227         if (ret)
2228                 return ret;
2229
2230         DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2231         return 0;
2232 }
2233
2234 /*
2235  * buffer object vm functions.
2236  */
2237
2238 int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem)
2239 {
2240         drm_buffer_manager_t *bm = &dev->bm;
2241         drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2242
2243         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2244                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2245                         return 0;
2246
2247                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2248                         return 0;
2249
2250                 if (mem->flags & DRM_BO_FLAG_CACHED)
2251                         return 0;
2252         }
2253         return 1;
2254 }
2255
2256 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2257
2258 /**
2259  * \c Get the PCI offset for the buffer object memory.
2260  *
2261  * \param bo The buffer object.
2262  * \param bus_base On return the base of the PCI region
2263  * \param bus_offset On return the byte offset into the PCI region
2264  * \param bus_size On return the byte size of the buffer object or zero if
2265  *     the buffer object memory is not accessible through a PCI region.
2266  * \return Failure indication.
2267  *
2268  * Returns -EINVAL if the buffer object is currently not mappable.
2269  * Otherwise returns zero.
2270  */
2271
2272 int drm_bo_pci_offset(drm_device_t * dev,
2273                       drm_bo_mem_reg_t * mem,
2274                       unsigned long *bus_base,
2275                       unsigned long *bus_offset, unsigned long *bus_size)
2276 {
2277         drm_buffer_manager_t *bm = &dev->bm;
2278         drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2279
2280         *bus_size = 0;
2281         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2282                 return -EINVAL;
2283
2284         if (drm_mem_reg_is_pci(dev, mem)) {
2285                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2286                 *bus_size = mem->num_pages << PAGE_SHIFT;
2287                 *bus_base = man->io_offset;
2288         }
2289
2290         return 0;
2291 }
2292
2293 /**
2294  * \c Kill all user-space virtual mappings of this buffer object.
2295  *
2296  * \param bo The buffer object.
2297  *
2298  * Call bo->mutex locked.
2299  */
2300
2301 void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
2302 {
2303         drm_device_t *dev = bo->dev;
2304         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2305         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2306
2307         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2308 }
2309
2310 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
2311 {
2312         drm_map_list_t *list = &bo->map_list;
2313         drm_local_map_t *map;
2314         drm_device_t *dev = bo->dev;
2315
2316         if (list->user_token) {
2317                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2318                 list->user_token = 0;
2319         }
2320         if (list->file_offset_node) {
2321                 drm_mm_put_block(list->file_offset_node);
2322                 list->file_offset_node = NULL;
2323         }
2324
2325         map = list->map;
2326         if (!map)
2327                 return;
2328
2329         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2330         list->map = NULL;
2331         list->user_token = 0ULL;
2332         drm_bo_usage_deref_locked(bo);
2333 }
2334
2335 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
2336 {
2337         drm_map_list_t *list = &bo->map_list;
2338         drm_local_map_t *map;
2339         drm_device_t *dev = bo->dev;
2340
2341         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2342         if (!list->map)
2343                 return -ENOMEM;
2344
2345         map = list->map;
2346         map->offset = 0;
2347         map->type = _DRM_TTM;
2348         map->flags = _DRM_REMOVABLE;
2349         map->size = bo->mem.num_pages * PAGE_SIZE;
2350         atomic_inc(&bo->usage);
2351         map->handle = (void *)bo;
2352
2353         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2354                                                     bo->mem.num_pages, 0, 0);
2355
2356         if (!list->file_offset_node) {
2357                 drm_bo_takedown_vm_locked(bo);
2358                 return -ENOMEM;
2359         }
2360
2361         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2362                                                   bo->mem.num_pages, 0);
2363
2364         list->hash.key = list->file_offset_node->start;
2365         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2366                 drm_bo_takedown_vm_locked(bo);
2367                 return -ENOMEM;
2368         }
2369
2370         list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
2371
2372         return 0;
2373 }