drm/ttm: add ioctl to get back memory managed area sized
[profile/ivi/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads,
40  * Hash tables and hash heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those,
44  * we need both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
47  * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
48  * the list traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
56
57 static inline uint64_t drm_bo_type_flags(unsigned type)
58 {
59         return (1ULL << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
67 {
68         struct drm_mem_type_manager *man;
69
70         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71         DRM_ASSERT_LOCKED(&bo->mutex);
72
73         man = &bo->dev->bm.man[bo->pinned_mem_type];
74         list_add_tail(&bo->pinned_lru, &man->pinned);
75 }
76
77 void drm_bo_add_to_lru(struct drm_buffer_object *bo)
78 {
79         struct drm_mem_type_manager *man;
80
81         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
82
83         if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84             || bo->mem.mem_type != bo->pinned_mem_type) {
85                 man = &bo->dev->bm.man[bo->mem.mem_type];
86                 list_add_tail(&bo->lru, &man->lru);
87         } else {
88                 INIT_LIST_HEAD(&bo->lru);
89         }
90 }
91
92 static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
93 {
94 #ifdef DRM_ODD_MM_COMPAT
95         int ret;
96
97         if (!bo->map_list.map)
98                 return 0;
99
100         ret = drm_bo_lock_kmm(bo);
101         if (ret)
102                 return ret;
103         drm_bo_unmap_virtual(bo);
104         if (old_is_pci)
105                 drm_bo_finish_unmap(bo);
106 #else
107         if (!bo->map_list.map)
108                 return 0;
109
110         drm_bo_unmap_virtual(bo);
111 #endif
112         return 0;
113 }
114
115 static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
116 {
117 #ifdef DRM_ODD_MM_COMPAT
118         int ret;
119
120         if (!bo->map_list.map)
121                 return;
122
123         ret = drm_bo_remap_bound(bo);
124         if (ret) {
125                 DRM_ERROR("Failed to remap a bound buffer object.\n"
126                           "\tThis might cause a sigbus later.\n");
127         }
128         drm_bo_unlock_kmm(bo);
129 #endif
130 }
131
132 /*
133  * Call bo->mutex locked.
134  */
135
136 static int drm_bo_add_ttm(struct drm_buffer_object *bo)
137 {
138         struct drm_device *dev = bo->dev;
139         int ret = 0;
140         uint32_t page_flags = 0;
141
142         DRM_ASSERT_LOCKED(&bo->mutex);
143         bo->ttm = NULL;
144
145         if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
146                 page_flags |= DRM_TTM_PAGE_WRITE;
147
148         switch (bo->type) {
149         case drm_bo_type_device:
150         case drm_bo_type_kernel:
151                 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, 
152                                          page_flags, dev->bm.dummy_read_page);
153                 if (!bo->ttm)
154                         ret = -ENOMEM;
155                 break;
156         case drm_bo_type_user:
157                 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
158                                          page_flags | DRM_TTM_PAGE_USER,
159                                          dev->bm.dummy_read_page);
160                 if (!bo->ttm)
161                         ret = -ENOMEM;
162
163                 ret = drm_ttm_set_user(bo->ttm, current,
164                                        bo->buffer_start,
165                                        bo->num_pages);
166                 if (ret)
167                         return ret;
168
169                 break;
170         default:
171                 DRM_ERROR("Illegal buffer object type\n");
172                 ret = -EINVAL;
173                 break;
174         }
175
176         return ret;
177 }
178
179 static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
180                                   struct drm_bo_mem_reg *mem,
181                                   int evict, int no_wait)
182 {
183         struct drm_device *dev = bo->dev;
184         struct drm_buffer_manager *bm = &dev->bm;
185         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
186         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
187         struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
188         struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
189         int ret = 0;
190
191         if (old_is_pci || new_is_pci ||
192             ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
193                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
194         if (ret)
195                 return ret;
196
197         /*
198          * Create and bind a ttm if required.
199          */
200
201         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
202                 ret = drm_bo_add_ttm(bo);
203                 if (ret)
204                         goto out_err;
205
206                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
207                         ret = drm_ttm_bind(bo->ttm, mem);
208                         if (ret)
209                                 goto out_err;
210                 }
211         }
212
213         if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
214
215                 struct drm_bo_mem_reg *old_mem = &bo->mem;
216                 uint64_t save_flags = old_mem->flags;
217                 uint64_t save_proposed_flags = old_mem->proposed_flags;
218
219                 *old_mem = *mem;
220                 mem->mm_node = NULL;
221                 old_mem->proposed_flags = save_proposed_flags;
222                 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
223
224         } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
225                    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
226
227                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
228
229         } else if (dev->driver->bo_driver->move) {
230                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
231
232         } else {
233
234                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
235
236         }
237
238         if (ret)
239                 goto out_err;
240
241         if (old_is_pci || new_is_pci)
242                 drm_bo_vm_post_move(bo);
243
244         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
245                 ret =
246                     dev->driver->bo_driver->invalidate_caches(dev,
247                                                               bo->mem.flags);
248                 if (ret)
249                         DRM_ERROR("Can not flush read caches\n");
250         }
251
252         DRM_FLAG_MASKED(bo->priv_flags,
253                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
254                         _DRM_BO_FLAG_EVICTED);
255
256         if (bo->mem.mm_node)
257                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
258                         bm->man[bo->mem.mem_type].gpu_offset;
259
260
261         return 0;
262
263 out_err:
264         if (old_is_pci || new_is_pci)
265                 drm_bo_vm_post_move(bo);
266
267         new_man = &bm->man[bo->mem.mem_type];
268         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
269                 drm_ttm_unbind(bo->ttm);
270                 drm_ttm_destroy(bo->ttm);
271                 bo->ttm = NULL;
272         }
273
274         return ret;
275 }
276
277 /*
278  * Call bo->mutex locked.
279  * Wait until the buffer is idle.
280  */
281
282 int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
283                 int no_wait)
284 {
285         int ret;
286
287         DRM_ASSERT_LOCKED(&bo->mutex);
288
289         if (bo->fence) {
290                 if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
291                         drm_fence_usage_deref_unlocked(&bo->fence);
292                         return 0;
293                 }
294                 if (no_wait)
295                         return -EBUSY;
296
297                 ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
298                                           bo->fence_type);
299                 if (ret)
300                         return ret;
301
302                 drm_fence_usage_deref_unlocked(&bo->fence);
303         }
304         return 0;
305 }
306 EXPORT_SYMBOL(drm_bo_wait);
307
308 static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
309 {
310         struct drm_device *dev = bo->dev;
311         struct drm_buffer_manager *bm = &dev->bm;
312
313         if (bo->fence) {
314                 if (bm->nice_mode) {
315                         unsigned long _end = jiffies + 3 * DRM_HZ;
316                         int ret;
317                         do {
318                                 ret = drm_bo_wait(bo, 0, 1, 0);
319                                 if (ret && allow_errors)
320                                         return ret;
321
322                         } while (ret && !time_after_eq(jiffies, _end));
323
324                         if (bo->fence) {
325                                 bm->nice_mode = 0;
326                                 DRM_ERROR("Detected GPU lockup or "
327                                           "fence driver was taken down. "
328                                           "Evicting buffer.\n");
329                         }
330                 }
331                 if (bo->fence)
332                         drm_fence_usage_deref_unlocked(&bo->fence);
333         }
334         return 0;
335 }
336
337 /*
338  * Call dev->struct_mutex locked.
339  * Attempts to remove all private references to a buffer by expiring its
340  * fence object and removing from lru lists and memory managers.
341  */
342
343 static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
344 {
345         struct drm_device *dev = bo->dev;
346         struct drm_buffer_manager *bm = &dev->bm;
347
348         DRM_ASSERT_LOCKED(&dev->struct_mutex);
349
350         atomic_inc(&bo->usage);
351         mutex_unlock(&dev->struct_mutex);
352         mutex_lock(&bo->mutex);
353
354         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
355
356         if (bo->fence && drm_fence_object_signaled(bo->fence,
357                                                    bo->fence_type))
358                 drm_fence_usage_deref_unlocked(&bo->fence);
359
360         if (bo->fence && remove_all)
361                 (void)drm_bo_expire_fence(bo, 0);
362
363         mutex_lock(&dev->struct_mutex);
364
365         if (!atomic_dec_and_test(&bo->usage))
366                 goto out;
367
368         if (!bo->fence) {
369                 list_del_init(&bo->lru);
370                 if (bo->mem.mm_node) {
371                         drm_mm_put_block(bo->mem.mm_node);
372                         if (bo->pinned_node == bo->mem.mm_node)
373                                 bo->pinned_node = NULL;
374                         bo->mem.mm_node = NULL;
375                 }
376                 list_del_init(&bo->pinned_lru);
377                 if (bo->pinned_node) {
378                         drm_mm_put_block(bo->pinned_node);
379                         bo->pinned_node = NULL;
380                 }
381                 list_del_init(&bo->ddestroy);
382                 mutex_unlock(&bo->mutex);
383                 drm_bo_destroy_locked(bo);
384                 return;
385         }
386
387         if (list_empty(&bo->ddestroy)) {
388                 drm_fence_object_flush(bo->fence, bo->fence_type);
389                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
390                 schedule_delayed_work(&bm->wq,
391                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
392         }
393
394 out:
395         mutex_unlock(&bo->mutex);
396         return;
397 }
398
399 /*
400  * Verify that refcount is 0 and that there are no internal references
401  * to the buffer object. Then destroy it.
402  */
403
404 static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
405 {
406         struct drm_device *dev = bo->dev;
407         struct drm_buffer_manager *bm = &dev->bm;
408
409         DRM_ASSERT_LOCKED(&dev->struct_mutex);
410
411         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
412             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
413             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
414                 if (bo->fence != NULL) {
415                         DRM_ERROR("Fence was non-zero.\n");
416                         drm_bo_cleanup_refs(bo, 0);
417                         return;
418                 }
419
420 #ifdef DRM_ODD_MM_COMPAT
421                 BUG_ON(!list_empty(&bo->vma_list));
422                 BUG_ON(!list_empty(&bo->p_mm_list));
423 #endif
424
425                 if (bo->ttm) {
426                         drm_ttm_unbind(bo->ttm);
427                         drm_ttm_destroy(bo->ttm);
428                         bo->ttm = NULL;
429                 }
430
431                 atomic_dec(&bm->count);
432
433                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
434
435                 return;
436         }
437
438         /*
439          * Some stuff is still trying to reference the buffer object.
440          * Get rid of those references.
441          */
442
443         drm_bo_cleanup_refs(bo, 0);
444
445         return;
446 }
447
448 /*
449  * Call dev->struct_mutex locked.
450  */
451
452 static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
453 {
454         struct drm_buffer_manager *bm = &dev->bm;
455
456         struct drm_buffer_object *entry, *nentry;
457         struct list_head *list, *next;
458
459         list_for_each_safe(list, next, &bm->ddestroy) {
460                 entry = list_entry(list, struct drm_buffer_object, ddestroy);
461
462                 nentry = NULL;
463                 if (next != &bm->ddestroy) {
464                         nentry = list_entry(next, struct drm_buffer_object,
465                                             ddestroy);
466                         atomic_inc(&nentry->usage);
467                 }
468
469                 drm_bo_cleanup_refs(entry, remove_all);
470
471                 if (nentry)
472                         atomic_dec(&nentry->usage);
473         }
474 }
475
476 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
477 static void drm_bo_delayed_workqueue(void *data)
478 #else
479 static void drm_bo_delayed_workqueue(struct work_struct *work)
480 #endif
481 {
482 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
483         struct drm_device *dev = (struct drm_device *) data;
484         struct drm_buffer_manager *bm = &dev->bm;
485 #else
486         struct drm_buffer_manager *bm =
487             container_of(work, struct drm_buffer_manager, wq.work);
488         struct drm_device *dev = container_of(bm, struct drm_device, bm);
489 #endif
490
491         DRM_DEBUG("Delayed delete Worker\n");
492
493         mutex_lock(&dev->struct_mutex);
494         if (!bm->initialized) {
495                 mutex_unlock(&dev->struct_mutex);
496                 return;
497         }
498         drm_bo_delayed_delete(dev, 0);
499         if (bm->initialized && !list_empty(&bm->ddestroy)) {
500                 schedule_delayed_work(&bm->wq,
501                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
502         }
503         mutex_unlock(&dev->struct_mutex);
504 }
505
506 void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
507 {
508         struct drm_buffer_object *tmp_bo = *bo;
509         bo = NULL;
510
511         DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
512
513         if (atomic_dec_and_test(&tmp_bo->usage))
514                 drm_bo_destroy_locked(tmp_bo);
515 }
516 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
517
518 static void drm_bo_base_deref_locked(struct drm_file *file_priv,
519                                      struct drm_user_object *uo)
520 {
521         struct drm_buffer_object *bo =
522             drm_user_object_entry(uo, struct drm_buffer_object, base);
523
524         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
525
526         drm_bo_takedown_vm_locked(bo);
527         drm_bo_usage_deref_locked(&bo);
528 }
529
530 void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
531 {
532         struct drm_buffer_object *tmp_bo = *bo;
533         struct drm_device *dev = tmp_bo->dev;
534
535         *bo = NULL;
536         if (atomic_dec_and_test(&tmp_bo->usage)) {
537                 mutex_lock(&dev->struct_mutex);
538                 if (atomic_read(&tmp_bo->usage) == 0)
539                         drm_bo_destroy_locked(tmp_bo);
540                 mutex_unlock(&dev->struct_mutex);
541         }
542 }
543 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
544
545 void drm_putback_buffer_objects(struct drm_device *dev)
546 {
547         struct drm_buffer_manager *bm = &dev->bm;
548         struct list_head *list = &bm->unfenced;
549         struct drm_buffer_object *entry, *next;
550
551         mutex_lock(&dev->struct_mutex);
552         list_for_each_entry_safe(entry, next, list, lru) {
553                 atomic_inc(&entry->usage);
554                 mutex_unlock(&dev->struct_mutex);
555
556                 mutex_lock(&entry->mutex);
557                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
558                 mutex_lock(&dev->struct_mutex);
559
560                 list_del_init(&entry->lru);
561                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
562                 wake_up_all(&entry->event_queue);
563
564                 /*
565                  * FIXME: Might want to put back on head of list
566                  * instead of tail here.
567                  */
568
569                 drm_bo_add_to_lru(entry);
570                 mutex_unlock(&entry->mutex);
571                 drm_bo_usage_deref_locked(&entry);
572         }
573         mutex_unlock(&dev->struct_mutex);
574 }
575 EXPORT_SYMBOL(drm_putback_buffer_objects);
576
577
578 /*
579  * Note. The caller has to register (if applicable)
580  * and deregister fence object usage.
581  */
582
583 int drm_fence_buffer_objects(struct drm_device *dev,
584                              struct list_head *list,
585                              uint32_t fence_flags,
586                              struct drm_fence_object *fence,
587                              struct drm_fence_object **used_fence)
588 {
589         struct drm_buffer_manager *bm = &dev->bm;
590         struct drm_buffer_object *entry;
591         uint32_t fence_type = 0;
592         uint32_t fence_class = ~0;
593         int count = 0;
594         int ret = 0;
595         struct list_head *l;
596
597         mutex_lock(&dev->struct_mutex);
598
599         if (!list)
600                 list = &bm->unfenced;
601
602         if (fence)
603                 fence_class = fence->fence_class;
604
605         list_for_each_entry(entry, list, lru) {
606                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
607                 fence_type |= entry->new_fence_type;
608                 if (fence_class == ~0)
609                         fence_class = entry->new_fence_class;
610                 else if (entry->new_fence_class != fence_class) {
611                         DRM_ERROR("Unmatching fence classes on unfenced list: "
612                                   "%d and %d.\n",
613                                   fence_class,
614                                   entry->new_fence_class);
615                         ret = -EINVAL;
616                         goto out;
617                 }
618                 count++;
619         }
620
621         if (!count) {
622                 ret = -EINVAL;
623                 goto out;
624         }
625
626         if (fence) {
627                 if ((fence_type & fence->type) != fence_type ||
628                     (fence->fence_class != fence_class)) {
629                         DRM_ERROR("Given fence doesn't match buffers "
630                                   "on unfenced list.\n");
631                         ret = -EINVAL;
632                         goto out;
633                 }
634         } else {
635                 mutex_unlock(&dev->struct_mutex);
636                 ret = drm_fence_object_create(dev, fence_class, fence_type,
637                                               fence_flags | DRM_FENCE_FLAG_EMIT,
638                                               &fence);
639                 mutex_lock(&dev->struct_mutex);
640                 if (ret)
641                         goto out;
642         }
643
644         count = 0;
645         l = list->next;
646         while (l != list) {
647                 prefetch(l->next);
648                 entry = list_entry(l, struct drm_buffer_object, lru);
649                 atomic_inc(&entry->usage);
650                 mutex_unlock(&dev->struct_mutex);
651                 mutex_lock(&entry->mutex);
652                 mutex_lock(&dev->struct_mutex);
653                 list_del_init(l);
654                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
655                         count++;
656                         if (entry->fence)
657                                 drm_fence_usage_deref_locked(&entry->fence);
658                         entry->fence = drm_fence_reference_locked(fence);
659                         entry->fence_class = entry->new_fence_class;
660                         entry->fence_type = entry->new_fence_type;
661                         DRM_FLAG_MASKED(entry->priv_flags, 0,
662                                         _DRM_BO_FLAG_UNFENCED);
663                         wake_up_all(&entry->event_queue);
664                         drm_bo_add_to_lru(entry);
665                 }
666                 mutex_unlock(&entry->mutex);
667                 drm_bo_usage_deref_locked(&entry);
668                 l = list->next;
669         }
670         DRM_DEBUG("Fenced %d buffers\n", count);
671 out:
672         mutex_unlock(&dev->struct_mutex);
673         *used_fence = fence;
674         return ret;
675 }
676 EXPORT_SYMBOL(drm_fence_buffer_objects);
677
678 /*
679  * bo->mutex locked
680  */
681
682 static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
683                         int no_wait)
684 {
685         int ret = 0;
686         struct drm_device *dev = bo->dev;
687         struct drm_bo_mem_reg evict_mem;
688
689         /*
690          * Someone might have modified the buffer before we took the
691          * buffer mutex.
692          */
693
694         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
695                 goto out;
696         if (bo->mem.mem_type != mem_type)
697                 goto out;
698
699         ret = drm_bo_wait(bo, 0, 0, no_wait);
700
701         if (ret && ret != -EAGAIN) {
702                 DRM_ERROR("Failed to expire fence before "
703                           "buffer eviction.\n");
704                 goto out;
705         }
706
707         evict_mem = bo->mem;
708         evict_mem.mm_node = NULL;
709
710         evict_mem = bo->mem;
711         evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
712         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
713
714         if (ret) {
715                 if (ret != -EAGAIN)
716                         DRM_ERROR("Failed to find memory space for "
717                                   "buffer 0x%p eviction.\n", bo);
718                 goto out;
719         }
720
721         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
722
723         if (ret) {
724                 if (ret != -EAGAIN)
725                         DRM_ERROR("Buffer eviction failed\n");
726                 goto out;
727         }
728
729         mutex_lock(&dev->struct_mutex);
730         if (evict_mem.mm_node) {
731                 if (evict_mem.mm_node != bo->pinned_node)
732                         drm_mm_put_block(evict_mem.mm_node);
733                 evict_mem.mm_node = NULL;
734         }
735         list_del(&bo->lru);
736         drm_bo_add_to_lru(bo);
737         mutex_unlock(&dev->struct_mutex);
738
739         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
740                         _DRM_BO_FLAG_EVICTED);
741
742 out:
743         return ret;
744 }
745
746 /**
747  * Repeatedly evict memory from the LRU for @mem_type until we create enough
748  * space, or we've evicted everything and there isn't enough space.
749  */
750 static int drm_bo_mem_force_space(struct drm_device *dev,
751                                   struct drm_bo_mem_reg *mem,
752                                   uint32_t mem_type, int no_wait)
753 {
754         struct drm_mm_node *node;
755         struct drm_buffer_manager *bm = &dev->bm;
756         struct drm_buffer_object *entry;
757         struct drm_mem_type_manager *man = &bm->man[mem_type];
758         struct list_head *lru;
759         unsigned long num_pages = mem->num_pages;
760         int ret;
761
762         mutex_lock(&dev->struct_mutex);
763         do {
764                 node = drm_mm_search_free(&man->manager, num_pages,
765                                           mem->page_alignment, 1);
766                 if (node)
767                         break;
768
769                 lru = &man->lru;
770                 if (lru->next == lru)
771                         break;
772
773                 entry = list_entry(lru->next, struct drm_buffer_object, lru);
774                 atomic_inc(&entry->usage);
775                 mutex_unlock(&dev->struct_mutex);
776                 mutex_lock(&entry->mutex);
777                 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
778
779                 ret = drm_bo_evict(entry, mem_type, no_wait);
780                 mutex_unlock(&entry->mutex);
781                 drm_bo_usage_deref_unlocked(&entry);
782                 if (ret)
783                         return ret;
784                 mutex_lock(&dev->struct_mutex);
785         } while (1);
786
787         if (!node) {
788                 mutex_unlock(&dev->struct_mutex);
789                 return -ENOMEM;
790         }
791
792         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
793         mutex_unlock(&dev->struct_mutex);
794         mem->mm_node = node;
795         mem->mem_type = mem_type;
796         return 0;
797 }
798
799 static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
800                                 int disallow_fixed,
801                                 uint32_t mem_type,
802                                 uint64_t mask, uint32_t *res_mask)
803 {
804         uint64_t cur_flags = drm_bo_type_flags(mem_type);
805         uint64_t flag_diff;
806
807         if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
808                 return 0;
809         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
810                 cur_flags |= DRM_BO_FLAG_CACHED;
811         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
812                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
813         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
814                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
815
816         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
817                 return 0;
818
819         if (mem_type == DRM_BO_MEM_LOCAL) {
820                 *res_mask = cur_flags;
821                 return 1;
822         }
823
824         flag_diff = (mask ^ cur_flags);
825         if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
826                 cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
827
828         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
829             (!(mask & DRM_BO_FLAG_CACHED) ||
830              (mask & DRM_BO_FLAG_FORCE_CACHING)))
831                 return 0;
832
833         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
834             ((mask & DRM_BO_FLAG_MAPPABLE) ||
835              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
836                 return 0;
837
838         *res_mask = cur_flags;
839         return 1;
840 }
841
842 /**
843  * Creates space for memory region @mem according to its type.
844  *
845  * This function first searches for free space in compatible memory types in
846  * the priority order defined by the driver.  If free space isn't found, then
847  * drm_bo_mem_force_space is attempted in priority order to evict and find
848  * space.
849  */
850 int drm_bo_mem_space(struct drm_buffer_object *bo,
851                      struct drm_bo_mem_reg *mem, int no_wait)
852 {
853         struct drm_device *dev = bo->dev;
854         struct drm_buffer_manager *bm = &dev->bm;
855         struct drm_mem_type_manager *man;
856
857         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
858         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
859         uint32_t i;
860         uint32_t mem_type = DRM_BO_MEM_LOCAL;
861         uint32_t cur_flags;
862         int type_found = 0;
863         int type_ok = 0;
864         int has_eagain = 0;
865         struct drm_mm_node *node = NULL;
866         int ret;
867
868         mem->mm_node = NULL;
869         for (i = 0; i < num_prios; ++i) {
870                 mem_type = prios[i];
871                 man = &bm->man[mem_type];
872
873                 type_ok = drm_bo_mt_compatible(man,
874                                                bo->type == drm_bo_type_user,
875                                                mem_type, mem->proposed_flags,
876                                                &cur_flags);
877
878                 if (!type_ok)
879                         continue;
880
881                 if (mem_type == DRM_BO_MEM_LOCAL)
882                         break;
883
884                 if ((mem_type == bo->pinned_mem_type) &&
885                     (bo->pinned_node != NULL)) {
886                         node = bo->pinned_node;
887                         break;
888                 }
889
890                 mutex_lock(&dev->struct_mutex);
891                 if (man->has_type && man->use_type) {
892                         type_found = 1;
893                         node = drm_mm_search_free(&man->manager, mem->num_pages,
894                                                   mem->page_alignment, 1);
895                         if (node)
896                                 node = drm_mm_get_block(node, mem->num_pages,
897                                                         mem->page_alignment);
898                 }
899                 mutex_unlock(&dev->struct_mutex);
900                 if (node)
901                         break;
902         }
903
904         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
905                 mem->mm_node = node;
906                 mem->mem_type = mem_type;
907                 mem->flags = cur_flags;
908                 return 0;
909         }
910
911         if (!type_found)
912                 return -EINVAL;
913
914         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
915         prios = dev->driver->bo_driver->mem_busy_prio;
916
917         for (i = 0; i < num_prios; ++i) {
918                 mem_type = prios[i];
919                 man = &bm->man[mem_type];
920
921                 if (!man->has_type)
922                         continue;
923
924                 if (!drm_bo_mt_compatible(man,
925                                           bo->type == drm_bo_type_user,
926                                           mem_type,
927                                           mem->proposed_flags,
928                                           &cur_flags))
929                         continue;
930
931                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
932
933                 if (ret == 0 && mem->mm_node) {
934                         mem->flags = cur_flags;
935                         return 0;
936                 }
937
938                 if (ret == -EAGAIN)
939                         has_eagain = 1;
940         }
941
942         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
943         return ret;
944 }
945 EXPORT_SYMBOL(drm_bo_mem_space);
946
947 /*
948  * drm_bo_propose_flags:
949  *
950  * @bo: the buffer object getting new flags
951  *
952  * @new_flags: the new set of proposed flag bits
953  *
954  * @new_mask: the mask of bits changed in new_flags
955  *
956  * Modify the proposed_flag bits in @bo
957  */
958 static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
959                                          uint64_t new_flags, uint64_t new_mask)
960 {
961         uint32_t new_access;
962
963         /* Copy unchanging bits from existing proposed_flags */
964         DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
965          
966         if (bo->type == drm_bo_type_user &&
967             ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
968              (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
969                 DRM_ERROR("User buffers require cache-coherent memory.\n");
970                 return -EINVAL;
971         }
972
973         if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
974                 DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
975                 return -EPERM;
976         }
977
978         if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
979                 DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
980                 return -EPERM;
981         }
982
983         new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
984                                   DRM_BO_FLAG_READ);
985
986         if (new_access == 0) {
987                 DRM_ERROR("Invalid buffer object rwx properties\n");
988                 return -EINVAL;
989         }
990
991         bo->mem.proposed_flags = new_flags;
992         return 0;
993 }
994
995 /*
996  * Call dev->struct_mutex locked.
997  */
998
999 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
1000                                               uint32_t handle, int check_owner)
1001 {
1002         struct drm_user_object *uo;
1003         struct drm_buffer_object *bo;
1004
1005         uo = drm_lookup_user_object(file_priv, handle);
1006
1007         if (!uo || (uo->type != drm_buffer_type)) {
1008                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
1009                 return NULL;
1010         }
1011
1012         if (check_owner && file_priv != uo->owner) {
1013                 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
1014                         return NULL;
1015         }
1016
1017         bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
1018         atomic_inc(&bo->usage);
1019         return bo;
1020 }
1021 EXPORT_SYMBOL(drm_lookup_buffer_object);
1022
1023 /*
1024  * Call bo->mutex locked.
1025  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1026  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
1027  */
1028
1029 static int drm_bo_quick_busy(struct drm_buffer_object *bo)
1030 {
1031         struct drm_fence_object *fence = bo->fence;
1032
1033         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1034         if (fence) {
1035                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1036                         drm_fence_usage_deref_unlocked(&bo->fence);
1037                         return 0;
1038                 }
1039                 return 1;
1040         }
1041         return 0;
1042 }
1043
1044 /*
1045  * Call bo->mutex locked.
1046  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1047  */
1048
1049 static int drm_bo_busy(struct drm_buffer_object *bo)
1050 {
1051         struct drm_fence_object *fence = bo->fence;
1052
1053         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1054         if (fence) {
1055                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1056                         drm_fence_usage_deref_unlocked(&bo->fence);
1057                         return 0;
1058                 }
1059                 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
1060                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1061                         drm_fence_usage_deref_unlocked(&bo->fence);
1062                         return 0;
1063                 }
1064                 return 1;
1065         }
1066         return 0;
1067 }
1068
1069 int drm_bo_evict_cached(struct drm_buffer_object *bo)
1070 {
1071         int ret = 0;
1072
1073         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1074         if (bo->mem.mm_node)
1075                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1076         return ret;
1077 }
1078
1079 EXPORT_SYMBOL(drm_bo_evict_cached);
1080 /*
1081  * Wait until a buffer is unmapped.
1082  */
1083
1084 static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
1085 {
1086         int ret = 0;
1087
1088         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1089                 return -EBUSY;
1090
1091         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1092                     atomic_read(&bo->mapped) == -1);
1093
1094         if (ret == -EINTR)
1095                 ret = -EAGAIN;
1096
1097         return ret;
1098 }
1099
1100 static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
1101 {
1102         int ret;
1103
1104         mutex_lock(&bo->mutex);
1105         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1106         mutex_unlock(&bo->mutex);
1107         return ret;
1108 }
1109
1110 /*
1111  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1112  * Until then, we cannot really do anything with it except delete it.
1113  */
1114
1115 static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
1116                                 int eagain_if_wait)
1117 {
1118         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1119
1120         if (ret && no_wait)
1121                 return -EBUSY;
1122         else if (!ret)
1123                 return 0;
1124
1125         ret = 0;
1126         mutex_unlock(&bo->mutex);
1127         DRM_WAIT_ON (ret, bo->event_queue, 3 * DRM_HZ,
1128                      !drm_bo_check_unfenced(bo));
1129         mutex_lock(&bo->mutex);
1130         if (ret == -EINTR)
1131                 return -EAGAIN;
1132         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1133         if (ret) {
1134                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1135                 return -EBUSY;
1136         }
1137         if (eagain_if_wait)
1138                 return -EAGAIN;
1139
1140         return 0;
1141 }
1142
1143 /*
1144  * Fill in the ioctl reply argument with buffer info.
1145  * Bo locked.
1146  */
1147
1148 static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
1149                                 struct drm_bo_info_rep *rep)
1150 {
1151         if (!rep)
1152                 return;
1153
1154         rep->handle = bo->base.hash.key;
1155         rep->flags = bo->mem.flags;
1156         rep->size = bo->num_pages * PAGE_SIZE;
1157         rep->offset = bo->offset;
1158
1159         /*
1160          * drm_bo_type_device buffers have user-visible
1161          * handles which can be used to share across
1162          * processes. Hand that back to the application
1163          */
1164         if (bo->type == drm_bo_type_device)
1165                 rep->arg_handle = bo->map_list.user_token;
1166         else
1167                 rep->arg_handle = 0;
1168
1169         rep->proposed_flags = bo->mem.proposed_flags;
1170         rep->buffer_start = bo->buffer_start;
1171         rep->fence_flags = bo->fence_type;
1172         rep->rep_flags = 0;
1173         rep->page_alignment = bo->mem.page_alignment;
1174
1175         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1176                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1177                                 DRM_BO_REP_BUSY);
1178         }
1179 }
1180
1181 /*
1182  * Wait for buffer idle and register that we've mapped the buffer.
1183  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1184  * so that if the client dies, the mapping is automatically
1185  * unregistered.
1186  */
1187
1188 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1189                                  uint32_t map_flags, unsigned hint,
1190                                  struct drm_bo_info_rep *rep)
1191 {
1192         struct drm_buffer_object *bo;
1193         struct drm_device *dev = file_priv->minor->dev;
1194         int ret = 0;
1195         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1196
1197         mutex_lock(&dev->struct_mutex);
1198         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1199         mutex_unlock(&dev->struct_mutex);
1200
1201         if (!bo)
1202                 return -EINVAL;
1203
1204         mutex_lock(&bo->mutex);
1205         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1206         if (ret)
1207                 goto out;
1208
1209         /*
1210          * If this returns true, we are currently unmapped.
1211          * We need to do this test, because unmapping can
1212          * be done without the bo->mutex held.
1213          */
1214
1215         while (1) {
1216                 if (atomic_inc_and_test(&bo->mapped)) {
1217                         if (no_wait && drm_bo_busy(bo)) {
1218                                 atomic_dec(&bo->mapped);
1219                                 ret = -EBUSY;
1220                                 goto out;
1221                         }
1222                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1223                         if (ret) {
1224                                 atomic_dec(&bo->mapped);
1225                                 goto out;
1226                         }
1227
1228                         if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
1229                                 drm_bo_evict_cached(bo);
1230
1231                         break;
1232                 } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
1233
1234                         /*
1235                          * We are already mapped with different flags.
1236                          * need to wait for unmap.
1237                          */
1238
1239                         ret = drm_bo_wait_unmapped(bo, no_wait);
1240                         if (ret)
1241                                 goto out;
1242
1243                         continue;
1244                 }
1245                 break;
1246         }
1247
1248         mutex_lock(&dev->struct_mutex);
1249         ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1250         mutex_unlock(&dev->struct_mutex);
1251         if (ret) {
1252                 if (atomic_add_negative(-1, &bo->mapped))
1253                         wake_up_all(&bo->event_queue);
1254
1255         } else
1256                 drm_bo_fill_rep_arg(bo, rep);
1257 out:
1258         mutex_unlock(&bo->mutex);
1259         drm_bo_usage_deref_unlocked(&bo);
1260         return ret;
1261 }
1262
1263 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1264 {
1265         struct drm_device *dev = file_priv->minor->dev;
1266         struct drm_buffer_object *bo;
1267         struct drm_ref_object *ro;
1268         int ret = 0;
1269
1270         mutex_lock(&dev->struct_mutex);
1271
1272         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1273         if (!bo) {
1274                 ret = -EINVAL;
1275                 goto out;
1276         }
1277
1278         ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1279         if (!ro) {
1280                 ret = -EINVAL;
1281                 goto out;
1282         }
1283
1284         drm_remove_ref_object(file_priv, ro);
1285         drm_bo_usage_deref_locked(&bo);
1286 out:
1287         mutex_unlock(&dev->struct_mutex);
1288         return ret;
1289 }
1290
1291 /*
1292  * Call struct-sem locked.
1293  */
1294
1295 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1296                                          struct drm_user_object *uo,
1297                                          enum drm_ref_type action)
1298 {
1299         struct drm_buffer_object *bo =
1300             drm_user_object_entry(uo, struct drm_buffer_object, base);
1301
1302         /*
1303          * We DON'T want to take the bo->lock here, because we want to
1304          * hold it when we wait for unmapped buffer.
1305          */
1306
1307         BUG_ON(action != _DRM_REF_TYPE1);
1308
1309         if (atomic_add_negative(-1, &bo->mapped))
1310                 wake_up_all(&bo->event_queue);
1311 }
1312
1313 /*
1314  * bo->mutex locked.
1315  * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
1316  */
1317
1318 int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
1319                        int no_wait, int move_unfenced)
1320 {
1321         struct drm_device *dev = bo->dev;
1322         struct drm_buffer_manager *bm = &dev->bm;
1323         int ret = 0;
1324         struct drm_bo_mem_reg mem;
1325         /*
1326          * Flush outstanding fences.
1327          */
1328
1329         drm_bo_busy(bo);
1330
1331         /*
1332          * Wait for outstanding fences.
1333          */
1334
1335         ret = drm_bo_wait(bo, 0, 0, no_wait);
1336         if (ret)
1337                 return ret;
1338
1339         mem.num_pages = bo->num_pages;
1340         mem.size = mem.num_pages << PAGE_SHIFT;
1341         mem.proposed_flags = new_mem_flags;
1342         mem.page_alignment = bo->mem.page_alignment;
1343
1344         mutex_lock(&bm->evict_mutex);
1345         mutex_lock(&dev->struct_mutex);
1346         list_del_init(&bo->lru);
1347         mutex_unlock(&dev->struct_mutex);
1348
1349         /*
1350          * Determine where to move the buffer.
1351          */
1352         ret = drm_bo_mem_space(bo, &mem, no_wait);
1353         if (ret)
1354                 goto out_unlock;
1355
1356         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1357
1358 out_unlock:
1359         mutex_lock(&dev->struct_mutex);
1360         if (ret || !move_unfenced) {
1361                 if (mem.mm_node) {
1362                         if (mem.mm_node != bo->pinned_node)
1363                                 drm_mm_put_block(mem.mm_node);
1364                         mem.mm_node = NULL;
1365                 }
1366                 drm_bo_add_to_lru(bo);
1367                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1368                         wake_up_all(&bo->event_queue);
1369                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1370                                         _DRM_BO_FLAG_UNFENCED);
1371                 }
1372         } else {
1373                 list_add_tail(&bo->lru, &bm->unfenced);
1374                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1375                                 _DRM_BO_FLAG_UNFENCED);
1376         }
1377         mutex_unlock(&dev->struct_mutex);
1378         mutex_unlock(&bm->evict_mutex);
1379         return ret;
1380 }
1381
1382 static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
1383 {
1384         uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
1385
1386         if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
1387                 return 0;
1388         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1389             (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
1390              (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
1391                 return 0;
1392
1393         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1394             ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
1395              (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
1396                 return 0;
1397         return 1;
1398 }
1399
1400 /**
1401  * drm_buffer_object_validate:
1402  *
1403  * @bo: the buffer object to modify
1404  *
1405  * @fence_class: the new fence class covering this buffer
1406  *
1407  * @move_unfenced: a boolean indicating whether switching the
1408  * memory space of this buffer should cause the buffer to
1409  * be placed on the unfenced list.
1410  *
1411  * @no_wait: whether this function should return -EBUSY instead
1412  * of waiting.
1413  *
1414  * Change buffer access parameters. This can involve moving
1415  * the buffer to the correct memory type, pinning the buffer
1416  * or changing the class/type of fence covering this buffer
1417  *
1418  * Must be called with bo locked.
1419  */
1420
1421 static int drm_buffer_object_validate(struct drm_buffer_object *bo,
1422                                       uint32_t fence_class,
1423                                       int move_unfenced, int no_wait)
1424 {
1425         struct drm_device *dev = bo->dev;
1426         struct drm_buffer_manager *bm = &dev->bm;
1427         struct drm_bo_driver *driver = dev->driver->bo_driver;
1428         uint32_t ftype;
1429         int ret;
1430
1431         DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
1432                   (unsigned long long) bo->mem.proposed_flags,
1433                   (unsigned long long) bo->mem.flags);
1434
1435         ret = driver->fence_type(bo, &fence_class, &ftype);
1436
1437         if (ret) {
1438                 DRM_ERROR("Driver did not support given buffer permissions\n");
1439                 return ret;
1440         }
1441
1442         /*
1443          * We're switching command submission mechanism,
1444          * or cannot simply rely on the hardware serializing for us.
1445          *
1446          * Insert a driver-dependant barrier or wait for buffer idle.
1447          */
1448
1449         if ((fence_class != bo->fence_class) ||
1450             ((ftype ^ bo->fence_type) & bo->fence_type)) {
1451
1452                 ret = -EINVAL;
1453                 if (driver->command_stream_barrier) {
1454                         ret = driver->command_stream_barrier(bo,
1455                                                              fence_class,
1456                                                              ftype,
1457                                                              no_wait);
1458                 }
1459                 if (ret)
1460                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1461
1462                 if (ret)
1463                         return ret;
1464
1465         }
1466
1467         bo->new_fence_class = fence_class;
1468         bo->new_fence_type = ftype;
1469
1470         ret = drm_bo_wait_unmapped(bo, no_wait);
1471         if (ret) {
1472                 DRM_ERROR("Timed out waiting for buffer unmap.\n");
1473                 return ret;
1474         }
1475
1476         /*
1477          * Check whether we need to move buffer.
1478          */
1479
1480         if (!drm_bo_mem_compat(&bo->mem)) {
1481                 ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
1482                                          move_unfenced);
1483                 if (ret) {
1484                         if (ret != -EAGAIN)
1485                                 DRM_ERROR("Failed moving buffer.\n");
1486                         return ret;
1487                 }
1488         }
1489
1490         /*
1491          * Pinned buffers.
1492          */
1493
1494         if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1495                 bo->pinned_mem_type = bo->mem.mem_type;
1496                 mutex_lock(&dev->struct_mutex);
1497                 list_del_init(&bo->pinned_lru);
1498                 drm_bo_add_to_pinned_lru(bo);
1499
1500                 if (bo->pinned_node != bo->mem.mm_node) {
1501                         if (bo->pinned_node != NULL)
1502                                 drm_mm_put_block(bo->pinned_node);
1503                         bo->pinned_node = bo->mem.mm_node;
1504                 }
1505
1506                 mutex_unlock(&dev->struct_mutex);
1507
1508         } else if (bo->pinned_node != NULL) {
1509
1510                 mutex_lock(&dev->struct_mutex);
1511
1512                 if (bo->pinned_node != bo->mem.mm_node)
1513                         drm_mm_put_block(bo->pinned_node);
1514
1515                 list_del_init(&bo->pinned_lru);
1516                 bo->pinned_node = NULL;
1517                 mutex_unlock(&dev->struct_mutex);
1518
1519         }
1520
1521         /*
1522          * We might need to add a TTM.
1523          */
1524
1525         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1526                 ret = drm_bo_add_ttm(bo);
1527                 if (ret)
1528                         return ret;
1529         }
1530         /*
1531          * Validation has succeeded, move the access and other
1532          * non-mapping-related flag bits from the proposed flags to
1533          * the active flags
1534          */
1535
1536         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
1537
1538         /*
1539          * Finally, adjust lru to be sure.
1540          */
1541
1542         mutex_lock(&dev->struct_mutex);
1543         list_del(&bo->lru);
1544         if (move_unfenced) {
1545                 list_add_tail(&bo->lru, &bm->unfenced);
1546                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1547                                 _DRM_BO_FLAG_UNFENCED);
1548         } else {
1549                 drm_bo_add_to_lru(bo);
1550                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1551                         wake_up_all(&bo->event_queue);
1552                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1553                                         _DRM_BO_FLAG_UNFENCED);
1554                 }
1555         }
1556         mutex_unlock(&dev->struct_mutex);
1557
1558         return 0;
1559 }
1560
1561 /**
1562  * drm_bo_do_validate:
1563  *
1564  * @bo: the buffer object
1565  *
1566  * @flags: access rights, mapping parameters and cacheability. See
1567  * the DRM_BO_FLAG_* values in drm.h
1568  *
1569  * @mask: Which flag values to change; this allows callers to modify
1570  * things without knowing the current state of other flags.
1571  *
1572  * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1573  * values in drm.h.
1574  *
1575  * @fence_class: a driver-specific way of doing fences. Presumably,
1576  * this would be used if the driver had more than one submission and
1577  * fencing mechanism. At this point, there isn't any use of this
1578  * from the user mode code.
1579  *
1580  * @rep: To be stuffed with the reply from validation
1581  * 
1582  * 'validate' a buffer object. This changes where the buffer is
1583  * located, along with changing access modes.
1584  */
1585
1586 int drm_bo_do_validate(struct drm_buffer_object *bo,
1587                        uint64_t flags, uint64_t mask, uint32_t hint,
1588                        uint32_t fence_class,
1589                        struct drm_bo_info_rep *rep)
1590 {
1591         int ret;
1592         int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
1593
1594         mutex_lock(&bo->mutex);
1595         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1596
1597         if (ret)
1598                 goto out;
1599
1600         ret = drm_bo_modify_proposed_flags (bo, flags, mask);
1601         if (ret)
1602                 goto out;
1603
1604         ret = drm_buffer_object_validate(bo,
1605                                          fence_class,
1606                                          !(hint & DRM_BO_HINT_DONT_FENCE),
1607                                          no_wait);
1608 out:
1609         if (rep)
1610                 drm_bo_fill_rep_arg(bo, rep);
1611
1612         mutex_unlock(&bo->mutex);
1613         return ret;
1614 }
1615 EXPORT_SYMBOL(drm_bo_do_validate);
1616
1617 /**
1618  * drm_bo_handle_validate
1619  *
1620  * @file_priv: the drm file private, used to get a handle to the user context
1621  *
1622  * @handle: the buffer object handle
1623  *
1624  * @flags: access rights, mapping parameters and cacheability. See
1625  * the DRM_BO_FLAG_* values in drm.h
1626  *
1627  * @mask: Which flag values to change; this allows callers to modify
1628  * things without knowing the current state of other flags.
1629  *
1630  * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1631  * values in drm.h.
1632  *
1633  * @fence_class: a driver-specific way of doing fences. Presumably,
1634  * this would be used if the driver had more than one submission and
1635  * fencing mechanism. At this point, there isn't any use of this
1636  * from the user mode code.
1637  *
1638  * @use_old_fence_class: don't change fence class, pull it from the buffer object
1639  *
1640  * @rep: To be stuffed with the reply from validation
1641  * 
1642  * @bp_rep: To be stuffed with the buffer object pointer
1643  *
1644  * Perform drm_bo_do_validate on a buffer referenced by a user-space handle.
1645  * Some permissions checking is done on the parameters, otherwise this
1646  * is a thin wrapper.
1647  */
1648
1649 int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
1650                            uint64_t flags, uint64_t mask,
1651                            uint32_t hint,
1652                            uint32_t fence_class,
1653                            int use_old_fence_class,
1654                            struct drm_bo_info_rep *rep,
1655                            struct drm_buffer_object **bo_rep)
1656 {
1657         struct drm_device *dev = file_priv->minor->dev;
1658         struct drm_buffer_object *bo;
1659         int ret;
1660
1661         mutex_lock(&dev->struct_mutex);
1662         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1663         mutex_unlock(&dev->struct_mutex);
1664
1665         if (!bo)
1666                 return -EINVAL;
1667
1668         if (use_old_fence_class)
1669                 fence_class = bo->fence_class;
1670
1671         /*
1672          * Only allow creator to change shared buffer mask.
1673          */
1674
1675         if (bo->base.owner != file_priv)
1676                 mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1677
1678
1679         ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
1680
1681         if (!ret && bo_rep)
1682                 *bo_rep = bo;
1683         else
1684                 drm_bo_usage_deref_unlocked(&bo);
1685
1686         return ret;
1687 }
1688 EXPORT_SYMBOL(drm_bo_handle_validate);
1689
1690 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1691                               struct drm_bo_info_rep *rep)
1692 {
1693         struct drm_device *dev = file_priv->minor->dev;
1694         struct drm_buffer_object *bo;
1695
1696         mutex_lock(&dev->struct_mutex);
1697         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1698         mutex_unlock(&dev->struct_mutex);
1699
1700         if (!bo)
1701                 return -EINVAL;
1702
1703         mutex_lock(&bo->mutex);
1704         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1705                 (void)drm_bo_busy(bo);
1706         drm_bo_fill_rep_arg(bo, rep);
1707         mutex_unlock(&bo->mutex);
1708         drm_bo_usage_deref_unlocked(&bo);
1709         return 0;
1710 }
1711
1712 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1713                               uint32_t hint,
1714                               struct drm_bo_info_rep *rep)
1715 {
1716         struct drm_device *dev = file_priv->minor->dev;
1717         struct drm_buffer_object *bo;
1718         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1719         int ret;
1720
1721         mutex_lock(&dev->struct_mutex);
1722         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1723         mutex_unlock(&dev->struct_mutex);
1724
1725         if (!bo)
1726                 return -EINVAL;
1727
1728         mutex_lock(&bo->mutex);
1729         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1730         if (ret)
1731                 goto out;
1732         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1733         if (ret)
1734                 goto out;
1735
1736         drm_bo_fill_rep_arg(bo, rep);
1737
1738 out:
1739         mutex_unlock(&bo->mutex);
1740         drm_bo_usage_deref_unlocked(&bo);
1741         return ret;
1742 }
1743
1744 int drm_buffer_object_create(struct drm_device *dev,
1745                              unsigned long size,
1746                              enum drm_bo_type type,
1747                              uint64_t flags,
1748                              uint32_t hint,
1749                              uint32_t page_alignment,
1750                              unsigned long buffer_start,
1751                              struct drm_buffer_object **buf_obj)
1752 {
1753         struct drm_buffer_manager *bm = &dev->bm;
1754         struct drm_buffer_object *bo;
1755         int ret = 0;
1756         unsigned long num_pages;
1757
1758         size += buffer_start & ~PAGE_MASK;
1759         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1760         if (num_pages == 0) {
1761                 DRM_ERROR("Illegal buffer object size.\n");
1762                 return -EINVAL;
1763         }
1764
1765         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1766
1767         if (!bo)
1768                 return -ENOMEM;
1769
1770         mutex_init(&bo->mutex);
1771         mutex_lock(&bo->mutex);
1772
1773         atomic_set(&bo->usage, 1);
1774         atomic_set(&bo->mapped, -1);
1775         DRM_INIT_WAITQUEUE(&bo->event_queue);
1776         INIT_LIST_HEAD(&bo->lru);
1777         INIT_LIST_HEAD(&bo->pinned_lru);
1778         INIT_LIST_HEAD(&bo->ddestroy);
1779 #ifdef DRM_ODD_MM_COMPAT
1780         INIT_LIST_HEAD(&bo->p_mm_list);
1781         INIT_LIST_HEAD(&bo->vma_list);
1782 #endif
1783         bo->dev = dev;
1784         bo->type = type;
1785         bo->num_pages = num_pages;
1786         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1787         bo->mem.num_pages = bo->num_pages;
1788         bo->mem.mm_node = NULL;
1789         bo->mem.page_alignment = page_alignment;
1790         bo->buffer_start = buffer_start & PAGE_MASK;
1791         bo->priv_flags = 0;
1792         bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1793                          DRM_BO_FLAG_MAPPABLE);
1794         bo->mem.proposed_flags = 0;
1795         atomic_inc(&bm->count);
1796         /*
1797          * Use drm_bo_modify_proposed_flags to error-check the proposed flags
1798          */
1799         ret = drm_bo_modify_proposed_flags (bo, flags, flags);
1800         if (ret)
1801                 goto out_err;
1802
1803         /*
1804          * For drm_bo_type_device buffers, allocate
1805          * address space from the device so that applications
1806          * can mmap the buffer from there
1807          */
1808         if (bo->type == drm_bo_type_device) {
1809                 mutex_lock(&dev->struct_mutex);
1810                 ret = drm_bo_setup_vm_locked(bo);
1811                 mutex_unlock(&dev->struct_mutex);
1812                 if (ret)
1813                         goto out_err;
1814         }
1815
1816         ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1817         if (ret)
1818                 goto out_err;
1819
1820         mutex_unlock(&bo->mutex);
1821         *buf_obj = bo;
1822         return 0;
1823
1824 out_err:
1825         mutex_unlock(&bo->mutex);
1826
1827         drm_bo_usage_deref_unlocked(&bo);
1828         return ret;
1829 }
1830 EXPORT_SYMBOL(drm_buffer_object_create);
1831
1832
1833 static int drm_bo_add_user_object(struct drm_file *file_priv,
1834                                   struct drm_buffer_object *bo, int shareable)
1835 {
1836         struct drm_device *dev = file_priv->minor->dev;
1837         int ret;
1838
1839         mutex_lock(&dev->struct_mutex);
1840         ret = drm_add_user_object(file_priv, &bo->base, shareable);
1841         if (ret)
1842                 goto out;
1843
1844         bo->base.remove = drm_bo_base_deref_locked;
1845         bo->base.type = drm_buffer_type;
1846         bo->base.ref_struct_locked = NULL;
1847         bo->base.unref = drm_buffer_user_object_unmap;
1848
1849 out:
1850         mutex_unlock(&dev->struct_mutex);
1851         return ret;
1852 }
1853
1854 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1855 {
1856         struct drm_bo_create_arg *arg = data;
1857         struct drm_bo_create_req *req = &arg->d.req;
1858         struct drm_bo_info_rep *rep = &arg->d.rep;
1859         struct drm_buffer_object *entry;
1860         enum drm_bo_type bo_type;
1861         int ret = 0;
1862
1863         DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
1864             (int)(req->size / 1024), req->page_alignment * 4);
1865
1866         if (!dev->bm.initialized) {
1867                 DRM_ERROR("Buffer object manager is not initialized.\n");
1868                 return -EINVAL;
1869         }
1870
1871         /*
1872          * If the buffer creation request comes in with a starting address,
1873          * that points at the desired user pages to map. Otherwise, create
1874          * a drm_bo_type_device buffer, which uses pages allocated from the kernel
1875          */
1876         bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
1877
1878         /*
1879          * User buffers cannot be shared
1880          */
1881         if (bo_type == drm_bo_type_user)
1882                 req->flags &= ~DRM_BO_FLAG_SHAREABLE;
1883
1884         ret = drm_buffer_object_create(file_priv->minor->dev,
1885                                        req->size, bo_type, req->flags,
1886                                        req->hint, req->page_alignment,
1887                                        req->buffer_start, &entry);
1888         if (ret)
1889                 goto out;
1890
1891         ret = drm_bo_add_user_object(file_priv, entry,
1892                                      req->flags & DRM_BO_FLAG_SHAREABLE);
1893         if (ret) {
1894                 drm_bo_usage_deref_unlocked(&entry);
1895                 goto out;
1896         }
1897
1898         mutex_lock(&entry->mutex);
1899         drm_bo_fill_rep_arg(entry, rep);
1900         mutex_unlock(&entry->mutex);
1901
1902 out:
1903         return ret;
1904 }
1905
1906 int drm_bo_setstatus_ioctl(struct drm_device *dev,
1907                            void *data, struct drm_file *file_priv)
1908 {
1909         struct drm_bo_map_wait_idle_arg *arg = data;
1910         struct drm_bo_info_req *req = &arg->d.req;
1911         struct drm_bo_info_rep *rep = &arg->d.rep;
1912         int ret;
1913
1914         if (!dev->bm.initialized) {
1915                 DRM_ERROR("Buffer object manager is not initialized.\n");
1916                 return -EINVAL;
1917         }
1918
1919         ret = drm_bo_read_lock(&dev->bm.bm_lock);
1920         if (ret)
1921                 return ret;
1922
1923         /*
1924          * validate the buffer. note that 'fence_class' will be unused
1925          * as we pass use_old_fence_class=1 here. Note also that
1926          * the libdrm API doesn't pass fence_class to the kernel,
1927          * so it's a good thing it isn't used here.
1928          */
1929         ret = drm_bo_handle_validate(file_priv, req->handle,
1930                                      req->flags,
1931                                      req->mask,
1932                                      req->hint | DRM_BO_HINT_DONT_FENCE,
1933                                      req->fence_class, 1,
1934                                      rep, NULL);
1935
1936         (void) drm_bo_read_unlock(&dev->bm.bm_lock);
1937         if (ret)
1938                 return ret;
1939
1940         return 0;
1941 }
1942
1943 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1944 {
1945         struct drm_bo_map_wait_idle_arg *arg = data;
1946         struct drm_bo_info_req *req = &arg->d.req;
1947         struct drm_bo_info_rep *rep = &arg->d.rep;
1948         int ret;
1949         if (!dev->bm.initialized) {
1950                 DRM_ERROR("Buffer object manager is not initialized.\n");
1951                 return -EINVAL;
1952         }
1953
1954         ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1955                                     req->hint, rep);
1956         if (ret)
1957                 return ret;
1958
1959         return 0;
1960 }
1961
1962 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1963 {
1964         struct drm_bo_handle_arg *arg = data;
1965         int ret;
1966         if (!dev->bm.initialized) {
1967                 DRM_ERROR("Buffer object manager is not initialized.\n");
1968                 return -EINVAL;
1969         }
1970
1971         ret = drm_buffer_object_unmap(file_priv, arg->handle);
1972         return ret;
1973 }
1974
1975
1976 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1977 {
1978         struct drm_bo_reference_info_arg *arg = data;
1979         struct drm_bo_handle_arg *req = &arg->d.req;
1980         struct drm_bo_info_rep *rep = &arg->d.rep;
1981         struct drm_user_object *uo;
1982         int ret;
1983
1984         if (!dev->bm.initialized) {
1985                 DRM_ERROR("Buffer object manager is not initialized.\n");
1986                 return -EINVAL;
1987         }
1988
1989         ret = drm_user_object_ref(file_priv, req->handle,
1990                                   drm_buffer_type, &uo);
1991         if (ret)
1992                 return ret;
1993
1994         ret = drm_bo_handle_info(file_priv, req->handle, rep);
1995         if (ret)
1996                 return ret;
1997
1998         return 0;
1999 }
2000
2001 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2002 {
2003         struct drm_bo_handle_arg *arg = data;
2004         int ret = 0;
2005
2006         if (!dev->bm.initialized) {
2007                 DRM_ERROR("Buffer object manager is not initialized.\n");
2008                 return -EINVAL;
2009         }
2010
2011         ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
2012         return ret;
2013 }
2014
2015 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2016 {
2017         struct drm_bo_reference_info_arg *arg = data;
2018         struct drm_bo_handle_arg *req = &arg->d.req;
2019         struct drm_bo_info_rep *rep = &arg->d.rep;
2020         int ret;
2021
2022         if (!dev->bm.initialized) {
2023                 DRM_ERROR("Buffer object manager is not initialized.\n");
2024                 return -EINVAL;
2025         }
2026
2027         ret = drm_bo_handle_info(file_priv, req->handle, rep);
2028         if (ret)
2029                 return ret;
2030
2031         return 0;
2032 }
2033
2034 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2035 {
2036         struct drm_bo_map_wait_idle_arg *arg = data;
2037         struct drm_bo_info_req *req = &arg->d.req;
2038         struct drm_bo_info_rep *rep = &arg->d.rep;
2039         int ret;
2040         if (!dev->bm.initialized) {
2041                 DRM_ERROR("Buffer object manager is not initialized.\n");
2042                 return -EINVAL;
2043         }
2044
2045         ret = drm_bo_handle_wait(file_priv, req->handle,
2046                                  req->hint, rep);
2047         if (ret)
2048                 return ret;
2049
2050         return 0;
2051 }
2052
2053 static int drm_bo_leave_list(struct drm_buffer_object *bo,
2054                              uint32_t mem_type,
2055                              int free_pinned,
2056                              int allow_errors)
2057 {
2058         struct drm_device *dev = bo->dev;
2059         int ret = 0;
2060
2061         mutex_lock(&bo->mutex);
2062
2063         ret = drm_bo_expire_fence(bo, allow_errors);
2064         if (ret)
2065                 goto out;
2066
2067         if (free_pinned) {
2068                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2069                 mutex_lock(&dev->struct_mutex);
2070                 list_del_init(&bo->pinned_lru);
2071                 if (bo->pinned_node == bo->mem.mm_node)
2072                         bo->pinned_node = NULL;
2073                 if (bo->pinned_node != NULL) {
2074                         drm_mm_put_block(bo->pinned_node);
2075                         bo->pinned_node = NULL;
2076                 }
2077                 mutex_unlock(&dev->struct_mutex);
2078         }
2079
2080         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2081                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2082                           "cleanup. Removing flag and evicting.\n");
2083                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2084                 bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
2085         }
2086
2087         if (bo->mem.mem_type == mem_type)
2088                 ret = drm_bo_evict(bo, mem_type, 0);
2089
2090         if (ret) {
2091                 if (allow_errors) {
2092                         goto out;
2093                 } else {
2094                         ret = 0;
2095                         DRM_ERROR("Cleanup eviction failed\n");
2096                 }
2097         }
2098
2099 out:
2100         mutex_unlock(&bo->mutex);
2101         return ret;
2102 }
2103
2104
2105 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2106                                          int pinned_list)
2107 {
2108         if (pinned_list)
2109                 return list_entry(list, struct drm_buffer_object, pinned_lru);
2110         else
2111                 return list_entry(list, struct drm_buffer_object, lru);
2112 }
2113
2114 /*
2115  * dev->struct_mutex locked.
2116  */
2117
2118 static int drm_bo_force_list_clean(struct drm_device *dev,
2119                                    struct list_head *head,
2120                                    unsigned mem_type,
2121                                    int free_pinned,
2122                                    int allow_errors,
2123                                    int pinned_list)
2124 {
2125         struct list_head *list, *next, *prev;
2126         struct drm_buffer_object *entry, *nentry;
2127         int ret;
2128         int do_restart;
2129
2130         /*
2131          * The list traversal is a bit odd here, because an item may
2132          * disappear from the list when we release the struct_mutex or
2133          * when we decrease the usage count. Also we're not guaranteed
2134          * to drain pinned lists, so we can't always restart.
2135          */
2136
2137 restart:
2138         nentry = NULL;
2139         list_for_each_safe(list, next, head) {
2140                 prev = list->prev;
2141
2142                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2143                 atomic_inc(&entry->usage);
2144                 if (nentry) {
2145                         atomic_dec(&nentry->usage);
2146                         nentry = NULL;
2147                 }
2148
2149                 /*
2150                  * Protect the next item from destruction, so we can check
2151                  * its list pointers later on.
2152                  */
2153
2154                 if (next != head) {
2155                         nentry = drm_bo_entry(next, pinned_list);
2156                         atomic_inc(&nentry->usage);
2157                 }
2158                 mutex_unlock(&dev->struct_mutex);
2159
2160                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2161                                         allow_errors);
2162                 mutex_lock(&dev->struct_mutex);
2163
2164                 drm_bo_usage_deref_locked(&entry);
2165                 if (ret)
2166                         return ret;
2167
2168                 /*
2169                  * Has the next item disappeared from the list?
2170                  */
2171
2172                 do_restart = ((next->prev != list) && (next->prev != prev));
2173
2174                 if (nentry != NULL && do_restart)
2175                         drm_bo_usage_deref_locked(&nentry);
2176
2177                 if (do_restart)
2178                         goto restart;
2179         }
2180         return 0;
2181 }
2182
2183 int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type)
2184 {
2185         struct drm_buffer_manager *bm = &dev->bm;
2186         struct drm_mem_type_manager *man = &bm->man[mem_type];
2187         int ret = -EINVAL;
2188
2189         if (mem_type >= DRM_BO_MEM_TYPES) {
2190                 DRM_ERROR("Illegal memory type %d\n", mem_type);
2191                 return ret;
2192         }
2193
2194         if (!man->has_type) {
2195                 DRM_ERROR("Trying to take down uninitialized "
2196                           "memory manager type %u\n", mem_type);
2197                 return ret;
2198         }
2199         man->use_type = 0;
2200         man->has_type = 0;
2201
2202         ret = 0;
2203         if (mem_type > 0) {
2204                 BUG_ON(!list_empty(&bm->unfenced));
2205                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2206                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2207
2208                 if (drm_mm_clean(&man->manager)) {
2209                         drm_mm_takedown(&man->manager);
2210                 } else {
2211                         ret = -EBUSY;
2212                 }
2213         }
2214
2215         return ret;
2216 }
2217 EXPORT_SYMBOL(drm_bo_clean_mm);
2218
2219 /**
2220  *Evict all buffers of a particular mem_type, but leave memory manager
2221  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2222  *point since we have the hardware lock.
2223  */
2224
2225 static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
2226 {
2227         int ret;
2228         struct drm_buffer_manager *bm = &dev->bm;
2229         struct drm_mem_type_manager *man = &bm->man[mem_type];
2230
2231         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2232                 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2233                 return -EINVAL;
2234         }
2235
2236         if (!man->has_type) {
2237                 DRM_ERROR("Memory type %u has not been initialized.\n",
2238                           mem_type);
2239                 return 0;
2240         }
2241
2242         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2243         if (ret)
2244                 return ret;
2245         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2246
2247         return ret;
2248 }
2249
2250 int drm_bo_init_mm(struct drm_device *dev,
2251                    unsigned type,
2252                    unsigned long p_offset, unsigned long p_size)
2253 {
2254         struct drm_buffer_manager *bm = &dev->bm;
2255         int ret = -EINVAL;
2256         struct drm_mem_type_manager *man;
2257
2258         if (type >= DRM_BO_MEM_TYPES) {
2259                 DRM_ERROR("Illegal memory type %d\n", type);
2260                 return ret;
2261         }
2262
2263         man = &bm->man[type];
2264         if (man->has_type) {
2265                 DRM_ERROR("Memory manager already initialized for type %d\n",
2266                           type);
2267                 return ret;
2268         }
2269
2270         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2271         if (ret)
2272                 return ret;
2273
2274         ret = 0;
2275         if (type != DRM_BO_MEM_LOCAL) {
2276                 if (!p_size) {
2277                         DRM_ERROR("Zero size memory manager type %d\n", type);
2278                         return ret;
2279                 }
2280                 ret = drm_mm_init(&man->manager, p_offset, p_size);
2281                 if (ret)
2282                         return ret;
2283         }
2284         man->has_type = 1;
2285         man->use_type = 1;
2286         man->size = p_size;
2287
2288         INIT_LIST_HEAD(&man->lru);
2289         INIT_LIST_HEAD(&man->pinned);
2290
2291         return 0;
2292 }
2293 EXPORT_SYMBOL(drm_bo_init_mm);
2294
2295 /*
2296  * This function is intended to be called on drm driver unload.
2297  * If you decide to call it from lastclose, you must protect the call
2298  * from a potentially racing drm_bo_driver_init in firstopen.
2299  * (This may happen on X server restart).
2300  */
2301
2302 int drm_bo_driver_finish(struct drm_device *dev)
2303 {
2304         struct drm_buffer_manager *bm = &dev->bm;
2305         int ret = 0;
2306         unsigned i = DRM_BO_MEM_TYPES;
2307         struct drm_mem_type_manager *man;
2308
2309         mutex_lock(&dev->struct_mutex);
2310
2311         if (!bm->initialized)
2312                 goto out;
2313         bm->initialized = 0;
2314
2315         while (i--) {
2316                 man = &bm->man[i];
2317                 if (man->has_type) {
2318                         man->use_type = 0;
2319                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2320                                 ret = -EBUSY;
2321                                 DRM_ERROR("DRM memory manager type %d "
2322                                           "is not clean.\n", i);
2323                         }
2324                         man->has_type = 0;
2325                 }
2326         }
2327         mutex_unlock(&dev->struct_mutex);
2328
2329         if (!cancel_delayed_work(&bm->wq))
2330                 flush_scheduled_work();
2331
2332         mutex_lock(&dev->struct_mutex);
2333         drm_bo_delayed_delete(dev, 1);
2334         if (list_empty(&bm->ddestroy))
2335                 DRM_DEBUG("Delayed destroy list was clean\n");
2336
2337         if (list_empty(&bm->man[0].lru))
2338                 DRM_DEBUG("Swap list was clean\n");
2339
2340         if (list_empty(&bm->man[0].pinned))
2341                 DRM_DEBUG("NO_MOVE list was clean\n");
2342
2343         if (list_empty(&bm->unfenced))
2344                 DRM_DEBUG("Unfenced list was clean\n");
2345
2346 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2347         ClearPageReserved(bm->dummy_read_page);
2348 #endif
2349         __free_page(bm->dummy_read_page);
2350
2351 out:
2352         mutex_unlock(&dev->struct_mutex);
2353         return ret;
2354 }
2355
2356 /*
2357  * This function is intended to be called on drm driver load.
2358  * If you decide to call it from firstopen, you must protect the call
2359  * from a potentially racing drm_bo_driver_finish in lastclose.
2360  * (This may happen on X server restart).
2361  */
2362
2363 int drm_bo_driver_init(struct drm_device *dev)
2364 {
2365         struct drm_bo_driver *driver = dev->driver->bo_driver;
2366         struct drm_buffer_manager *bm = &dev->bm;
2367         int ret = -EINVAL;
2368
2369         bm->dummy_read_page = NULL;
2370         drm_bo_init_lock(&bm->bm_lock);
2371         mutex_lock(&dev->struct_mutex);
2372         if (!driver)
2373                 goto out_unlock;
2374
2375         bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
2376         if (!bm->dummy_read_page) {
2377                 ret = -ENOMEM;
2378                 goto out_unlock;
2379         }
2380
2381 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2382         SetPageReserved(bm->dummy_read_page);
2383 #endif
2384
2385         /*
2386          * Initialize the system memory buffer type.
2387          * Other types need to be driver / IOCTL initialized.
2388          */
2389         ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
2390         if (ret)
2391                 goto out_unlock;
2392
2393 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2394         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2395 #else
2396         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2397 #endif
2398         bm->initialized = 1;
2399         bm->nice_mode = 1;
2400         atomic_set(&bm->count, 0);
2401         bm->cur_pages = 0;
2402         INIT_LIST_HEAD(&bm->unfenced);
2403         INIT_LIST_HEAD(&bm->ddestroy);
2404 out_unlock:
2405         mutex_unlock(&dev->struct_mutex);
2406         return ret;
2407 }
2408 EXPORT_SYMBOL(drm_bo_driver_init);
2409
2410 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2411 {
2412         struct drm_mm_init_arg *arg = data;
2413         struct drm_buffer_manager *bm = &dev->bm;
2414         struct drm_bo_driver *driver = dev->driver->bo_driver;
2415         int ret;
2416
2417         if (!driver) {
2418                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2419                 return -EINVAL;
2420         }
2421
2422         ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
2423         if (ret)
2424                 return ret;
2425
2426         ret = -EINVAL;
2427         if (arg->magic != DRM_BO_INIT_MAGIC) {
2428                 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2429                           "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2430                 return -EINVAL;
2431         }
2432         if (arg->major != DRM_BO_INIT_MAJOR) {
2433                 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2434                           "\tversion don't match. Got %d, expected %d.\n",
2435                           arg->major, DRM_BO_INIT_MAJOR);
2436                 return -EINVAL;
2437         }
2438
2439         mutex_lock(&dev->struct_mutex);
2440         if (!bm->initialized) {
2441                 DRM_ERROR("DRM memory manager was not initialized.\n");
2442                 goto out;
2443         }
2444         if (arg->mem_type == 0) {
2445                 DRM_ERROR("System memory buffers already initialized.\n");
2446                 goto out;
2447         }
2448         ret = drm_bo_init_mm(dev, arg->mem_type,
2449                              arg->p_offset, arg->p_size);
2450
2451 out:
2452         mutex_unlock(&dev->struct_mutex);
2453         (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2454
2455         if (ret)
2456                 return ret;
2457
2458         return 0;
2459 }
2460
2461 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2462 {
2463         struct drm_mm_type_arg *arg = data;
2464         struct drm_buffer_manager *bm = &dev->bm;
2465         struct drm_bo_driver *driver = dev->driver->bo_driver;
2466         int ret;
2467
2468         if (!driver) {
2469                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2470                 return -EINVAL;
2471         }
2472
2473         ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
2474         if (ret)
2475                 return ret;
2476
2477         mutex_lock(&dev->struct_mutex);
2478         ret = -EINVAL;
2479         if (!bm->initialized) {
2480                 DRM_ERROR("DRM memory manager was not initialized\n");
2481                 goto out;
2482         }
2483         if (arg->mem_type == 0) {
2484                 DRM_ERROR("No takedown for System memory buffers.\n");
2485                 goto out;
2486         }
2487         ret = 0;
2488         if (drm_bo_clean_mm(dev, arg->mem_type)) {
2489                 DRM_ERROR("Memory manager type %d not clean. "
2490                           "Delaying takedown\n", arg->mem_type);
2491         }
2492 out:
2493         mutex_unlock(&dev->struct_mutex);
2494         (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2495
2496         if (ret)
2497                 return ret;
2498
2499         return 0;
2500 }
2501
2502 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2503 {
2504         struct drm_mm_type_arg *arg = data;
2505         struct drm_bo_driver *driver = dev->driver->bo_driver;
2506         int ret;
2507
2508         if (!driver) {
2509                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2510                 return -EINVAL;
2511         }
2512
2513         if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
2514                 DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
2515                 return -EINVAL;
2516         }
2517
2518         if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2519                 ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
2520                 if (ret)
2521                         return ret;
2522         }
2523
2524         mutex_lock(&dev->struct_mutex);
2525         ret = drm_bo_lock_mm(dev, arg->mem_type);
2526         mutex_unlock(&dev->struct_mutex);
2527         if (ret) {
2528                 (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2529                 return ret;
2530         }
2531
2532         return 0;
2533 }
2534
2535 int drm_mm_unlock_ioctl(struct drm_device *dev,
2536                         void *data,
2537                         struct drm_file *file_priv)
2538 {
2539         struct drm_mm_type_arg *arg = data;
2540         struct drm_bo_driver *driver = dev->driver->bo_driver;
2541         int ret;
2542
2543         if (!driver) {
2544                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2545                 return -EINVAL;
2546         }
2547
2548         if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2549                 ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2550                 if (ret)
2551                         return ret;
2552         }
2553
2554         return 0;
2555 }
2556
2557 int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2558 {
2559         struct drm_mm_info_arg *arg = data;
2560         struct drm_buffer_manager *bm = &dev->bm;
2561         struct drm_bo_driver *driver = dev->driver->bo_driver;
2562         struct drm_mem_type_manager *man;
2563         int ret = 0;
2564         int mem_type = arg->mem_type;
2565
2566         if (!driver) {
2567                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2568                 return -EINVAL;
2569         }
2570
2571         if (mem_type >= DRM_BO_MEM_TYPES) {
2572                 DRM_ERROR("Illegal memory type %d\n", arg->mem_type);
2573                 return -EINVAL;
2574         }
2575
2576         mutex_lock(&dev->struct_mutex);
2577         if (!bm->initialized) {
2578                 DRM_ERROR("DRM memory manager was not initialized\n");
2579                 ret = -EINVAL;
2580                 goto out;
2581         }
2582
2583
2584         man = &bm->man[arg->mem_type];
2585
2586         arg->p_size = man->size;
2587
2588 out:
2589         mutex_unlock(&dev->struct_mutex);
2590      
2591         return ret;
2592 }
2593 /*
2594  * buffer object vm functions.
2595  */
2596
2597 int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
2598 {
2599         struct drm_buffer_manager *bm = &dev->bm;
2600         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2601
2602         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2603                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2604                         return 0;
2605
2606                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2607                         return 0;
2608
2609                 if (mem->flags & DRM_BO_FLAG_CACHED)
2610                         return 0;
2611         }
2612         return 1;
2613 }
2614 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2615
2616 /**
2617  * \c Get the PCI offset for the buffer object memory.
2618  *
2619  * \param bo The buffer object.
2620  * \param bus_base On return the base of the PCI region
2621  * \param bus_offset On return the byte offset into the PCI region
2622  * \param bus_size On return the byte size of the buffer object or zero if
2623  *     the buffer object memory is not accessible through a PCI region.
2624  * \return Failure indication.
2625  *
2626  * Returns -EINVAL if the buffer object is currently not mappable.
2627  * Otherwise returns zero.
2628  */
2629
2630 int drm_bo_pci_offset(struct drm_device *dev,
2631                       struct drm_bo_mem_reg *mem,
2632                       unsigned long *bus_base,
2633                       unsigned long *bus_offset, unsigned long *bus_size)
2634 {
2635         struct drm_buffer_manager *bm = &dev->bm;
2636         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2637
2638         *bus_size = 0;
2639         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2640                 return -EINVAL;
2641
2642         if (drm_mem_reg_is_pci(dev, mem)) {
2643                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2644                 *bus_size = mem->num_pages << PAGE_SHIFT;
2645                 *bus_base = man->io_offset;
2646         }
2647
2648         return 0;
2649 }
2650
2651 /**
2652  * \c Kill all user-space virtual mappings of this buffer object.
2653  *
2654  * \param bo The buffer object.
2655  *
2656  * Call bo->mutex locked.
2657  */
2658
2659 void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
2660 {
2661         struct drm_device *dev = bo->dev;
2662         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2663         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2664
2665         if (!dev->dev_mapping)
2666                 return;
2667
2668         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2669 }
2670
2671 /**
2672  * drm_bo_takedown_vm_locked:
2673  *
2674  * @bo: the buffer object to remove any drm device mapping
2675  *
2676  * Remove any associated vm mapping on the drm device node that
2677  * would have been created for a drm_bo_type_device buffer
2678  */
2679 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
2680 {
2681         struct drm_map_list *list;
2682         drm_local_map_t *map;
2683         struct drm_device *dev = bo->dev;
2684
2685         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2686         if (bo->type != drm_bo_type_device)
2687                 return;
2688
2689         list = &bo->map_list;
2690         if (list->user_token) {
2691                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2692                 list->user_token = 0;
2693         }
2694         if (list->file_offset_node) {
2695                 drm_mm_put_block(list->file_offset_node);
2696                 list->file_offset_node = NULL;
2697         }
2698
2699         map = list->map;
2700         if (!map)
2701                 return;
2702
2703         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2704         list->map = NULL;
2705         list->user_token = 0ULL;
2706         drm_bo_usage_deref_locked(&bo);
2707 }
2708
2709 /**
2710  * drm_bo_setup_vm_locked:
2711  *
2712  * @bo: the buffer to allocate address space for
2713  *
2714  * Allocate address space in the drm device so that applications
2715  * can mmap the buffer and access the contents. This only
2716  * applies to drm_bo_type_device objects as others are not
2717  * placed in the drm device address space.
2718  */
2719 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
2720 {
2721         struct drm_map_list *list = &bo->map_list;
2722         drm_local_map_t *map;
2723         struct drm_device *dev = bo->dev;
2724
2725         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2726         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2727         if (!list->map)
2728                 return -ENOMEM;
2729
2730         map = list->map;
2731         map->offset = 0;
2732         map->type = _DRM_TTM;
2733         map->flags = _DRM_REMOVABLE;
2734         map->size = bo->mem.num_pages * PAGE_SIZE;
2735         atomic_inc(&bo->usage);
2736         map->handle = (void *)bo;
2737
2738         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2739                                                     bo->mem.num_pages, 0, 0);
2740
2741         if (!list->file_offset_node) {
2742                 drm_bo_takedown_vm_locked(bo);
2743                 return -ENOMEM;
2744         }
2745
2746         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2747                                                   bo->mem.num_pages, 0);
2748
2749         list->hash.key = list->file_offset_node->start;
2750         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2751                 drm_bo_takedown_vm_locked(bo);
2752                 return -ENOMEM;
2753         }
2754
2755         list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2756
2757         return 0;
2758 }
2759
2760 int drm_bo_version_ioctl(struct drm_device *dev, void *data,
2761                          struct drm_file *file_priv)
2762 {
2763         struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
2764
2765         arg->major = DRM_BO_INIT_MAJOR;
2766         arg->minor = DRM_BO_INIT_MINOR;
2767         arg->patchlevel = DRM_BO_INIT_PATCH;
2768
2769         return 0;
2770 }