Merge branch 'master' of ../../drm into modesetting-101
[profile/ivi/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads,
40  * Hash tables and hash heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those,
44  * we need both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
47  * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
48  * the list traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
56
57 static inline uint64_t drm_bo_type_flags(unsigned type)
58 {
59         return (1ULL << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
67 {
68         struct drm_mem_type_manager *man;
69
70         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71         DRM_ASSERT_LOCKED(&bo->mutex);
72
73         man = &bo->dev->bm.man[bo->pinned_mem_type];
74         list_add_tail(&bo->pinned_lru, &man->pinned);
75 }
76
77 void drm_bo_add_to_lru(struct drm_buffer_object *bo)
78 {
79         struct drm_mem_type_manager *man;
80
81         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
82
83         if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84             || bo->mem.mem_type != bo->pinned_mem_type) {
85                 man = &bo->dev->bm.man[bo->mem.mem_type];
86                 list_add_tail(&bo->lru, &man->lru);
87         } else {
88                 INIT_LIST_HEAD(&bo->lru);
89         }
90 }
91
92 static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
93 {
94 #ifdef DRM_ODD_MM_COMPAT
95         int ret;
96
97         if (!bo->map_list.map)
98                 return 0;
99
100         ret = drm_bo_lock_kmm(bo);
101         if (ret)
102                 return ret;
103         drm_bo_unmap_virtual(bo);
104         if (old_is_pci)
105                 drm_bo_finish_unmap(bo);
106 #else
107         if (!bo->map_list.map)
108                 return 0;
109
110         drm_bo_unmap_virtual(bo);
111 #endif
112         return 0;
113 }
114
115 static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
116 {
117 #ifdef DRM_ODD_MM_COMPAT
118         int ret;
119
120         if (!bo->map_list.map)
121                 return;
122
123         ret = drm_bo_remap_bound(bo);
124         if (ret) {
125                 DRM_ERROR("Failed to remap a bound buffer object.\n"
126                           "\tThis might cause a sigbus later.\n");
127         }
128         drm_bo_unlock_kmm(bo);
129 #endif
130 }
131
132 /*
133  * Call bo->mutex locked.
134  */
135
136 static int drm_bo_add_ttm(struct drm_buffer_object *bo)
137 {
138         struct drm_device *dev = bo->dev;
139         int ret = 0;
140         uint32_t page_flags = 0;
141
142         DRM_ASSERT_LOCKED(&bo->mutex);
143         bo->ttm = NULL;
144
145         if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
146                 page_flags |= DRM_TTM_PAGE_WRITE;
147
148         switch (bo->type) {
149         case drm_bo_type_device:
150         case drm_bo_type_kernel:
151                 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, 
152                                          page_flags, dev->bm.dummy_read_page);
153                 if (!bo->ttm)
154                         ret = -ENOMEM;
155                 break;
156         case drm_bo_type_user:
157                 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
158                                          page_flags | DRM_TTM_PAGE_USER,
159                                          dev->bm.dummy_read_page);
160                 if (!bo->ttm)
161                         ret = -ENOMEM;
162
163                 ret = drm_ttm_set_user(bo->ttm, current,
164                                        bo->buffer_start,
165                                        bo->num_pages);
166                 if (ret)
167                         return ret;
168
169                 break;
170         default:
171                 DRM_ERROR("Illegal buffer object type\n");
172                 ret = -EINVAL;
173                 break;
174         }
175
176         return ret;
177 }
178
179 static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
180                                   struct drm_bo_mem_reg *mem,
181                                   int evict, int no_wait)
182 {
183         struct drm_device *dev = bo->dev;
184         struct drm_buffer_manager *bm = &dev->bm;
185         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
186         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
187         struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
188         struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
189         int ret = 0;
190
191         if (old_is_pci || new_is_pci ||
192             ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
193                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
194         if (ret)
195                 return ret;
196
197         /*
198          * Create and bind a ttm if required.
199          */
200
201         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
202                 ret = drm_bo_add_ttm(bo);
203                 if (ret)
204                         goto out_err;
205
206                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
207                         ret = drm_ttm_bind(bo->ttm, mem);
208                         if (ret)
209                                 goto out_err;
210                 }
211         }
212
213         if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
214
215                 struct drm_bo_mem_reg *old_mem = &bo->mem;
216                 uint64_t save_flags = old_mem->flags;
217                 uint64_t save_proposed_flags = old_mem->proposed_flags;
218
219                 *old_mem = *mem;
220                 mem->mm_node = NULL;
221                 old_mem->proposed_flags = save_proposed_flags;
222                 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
223
224         } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
225                    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
226
227                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
228
229         } else if (dev->driver->bo_driver->move) {
230                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
231
232         } else {
233
234                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
235
236         }
237
238         if (ret)
239                 goto out_err;
240
241         if (old_is_pci || new_is_pci)
242                 drm_bo_vm_post_move(bo);
243
244         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
245                 ret =
246                     dev->driver->bo_driver->invalidate_caches(dev,
247                                                               bo->mem.flags);
248                 if (ret)
249                         DRM_ERROR("Can not flush read caches\n");
250         }
251
252         DRM_FLAG_MASKED(bo->priv_flags,
253                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
254                         _DRM_BO_FLAG_EVICTED);
255
256         if (bo->mem.mm_node)
257                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
258                         bm->man[bo->mem.mem_type].gpu_offset;
259
260
261         return 0;
262
263 out_err:
264         if (old_is_pci || new_is_pci)
265                 drm_bo_vm_post_move(bo);
266
267         new_man = &bm->man[bo->mem.mem_type];
268         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
269                 drm_ttm_unbind(bo->ttm);
270                 drm_ttm_destroy(bo->ttm);
271                 bo->ttm = NULL;
272         }
273
274         return ret;
275 }
276
277 /*
278  * Call bo->mutex locked.
279  * Wait until the buffer is idle.
280  */
281
282 int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
283                 int no_wait)
284 {
285         int ret;
286
287         DRM_ASSERT_LOCKED(&bo->mutex);
288
289         if (bo->fence) {
290                 if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
291                         drm_fence_usage_deref_unlocked(&bo->fence);
292                         return 0;
293                 }
294                 if (no_wait)
295                         return -EBUSY;
296
297                 ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
298                                           bo->fence_type);
299                 if (ret)
300                         return ret;
301
302                 drm_fence_usage_deref_unlocked(&bo->fence);
303         }
304         return 0;
305 }
306 EXPORT_SYMBOL(drm_bo_wait);
307
308 static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
309 {
310         struct drm_device *dev = bo->dev;
311         struct drm_buffer_manager *bm = &dev->bm;
312
313         if (bo->fence) {
314                 if (bm->nice_mode) {
315                         unsigned long _end = jiffies + 3 * DRM_HZ;
316                         int ret;
317                         do {
318                                 ret = drm_bo_wait(bo, 0, 1, 0);
319                                 if (ret && allow_errors)
320                                         return ret;
321
322                         } while (ret && !time_after_eq(jiffies, _end));
323
324                         if (bo->fence) {
325                                 bm->nice_mode = 0;
326                                 DRM_ERROR("Detected GPU lockup or "
327                                           "fence driver was taken down. "
328                                           "Evicting buffer.\n");
329                         }
330                 }
331                 if (bo->fence)
332                         drm_fence_usage_deref_unlocked(&bo->fence);
333         }
334         return 0;
335 }
336
337 /*
338  * Call dev->struct_mutex locked.
339  * Attempts to remove all private references to a buffer by expiring its
340  * fence object and removing from lru lists and memory managers.
341  */
342
343 static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
344 {
345         struct drm_device *dev = bo->dev;
346         struct drm_buffer_manager *bm = &dev->bm;
347
348         DRM_ASSERT_LOCKED(&dev->struct_mutex);
349
350         atomic_inc(&bo->usage);
351         mutex_unlock(&dev->struct_mutex);
352         mutex_lock(&bo->mutex);
353
354         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
355
356         if (bo->fence && drm_fence_object_signaled(bo->fence,
357                                                    bo->fence_type))
358                 drm_fence_usage_deref_unlocked(&bo->fence);
359
360         if (bo->fence && remove_all)
361                 (void)drm_bo_expire_fence(bo, 0);
362
363         mutex_lock(&dev->struct_mutex);
364
365         if (!atomic_dec_and_test(&bo->usage))
366                 goto out;
367
368         if (!bo->fence) {
369                 list_del_init(&bo->lru);
370                 if (bo->mem.mm_node) {
371                         drm_mm_put_block(bo->mem.mm_node);
372                         if (bo->pinned_node == bo->mem.mm_node)
373                                 bo->pinned_node = NULL;
374                         bo->mem.mm_node = NULL;
375                 }
376                 list_del_init(&bo->pinned_lru);
377                 if (bo->pinned_node) {
378                         drm_mm_put_block(bo->pinned_node);
379                         bo->pinned_node = NULL;
380                 }
381                 list_del_init(&bo->ddestroy);
382                 mutex_unlock(&bo->mutex);
383                 drm_bo_destroy_locked(bo);
384                 return;
385         }
386
387         if (list_empty(&bo->ddestroy)) {
388                 drm_fence_object_flush(bo->fence, bo->fence_type);
389                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
390                 schedule_delayed_work(&bm->wq,
391                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
392         }
393
394 out:
395         mutex_unlock(&bo->mutex);
396         return;
397 }
398
399 /*
400  * Verify that refcount is 0 and that there are no internal references
401  * to the buffer object. Then destroy it.
402  */
403
404 static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
405 {
406         struct drm_device *dev = bo->dev;
407         struct drm_buffer_manager *bm = &dev->bm;
408
409         DRM_ASSERT_LOCKED(&dev->struct_mutex);
410
411         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
412             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
413             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
414                 if (bo->fence != NULL) {
415                         DRM_ERROR("Fence was non-zero.\n");
416                         drm_bo_cleanup_refs(bo, 0);
417                         return;
418                 }
419
420 #ifdef DRM_ODD_MM_COMPAT
421                 BUG_ON(!list_empty(&bo->vma_list));
422                 BUG_ON(!list_empty(&bo->p_mm_list));
423 #endif
424
425                 if (bo->ttm) {
426                         drm_ttm_unbind(bo->ttm);
427                         drm_ttm_destroy(bo->ttm);
428                         bo->ttm = NULL;
429                 }
430
431                 atomic_dec(&bm->count);
432
433                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
434
435                 return;
436         }
437
438         /*
439          * Some stuff is still trying to reference the buffer object.
440          * Get rid of those references.
441          */
442
443         drm_bo_cleanup_refs(bo, 0);
444
445         return;
446 }
447
448 /*
449  * Call dev->struct_mutex locked.
450  */
451
452 static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
453 {
454         struct drm_buffer_manager *bm = &dev->bm;
455
456         struct drm_buffer_object *entry, *nentry;
457         struct list_head *list, *next;
458
459         list_for_each_safe(list, next, &bm->ddestroy) {
460                 entry = list_entry(list, struct drm_buffer_object, ddestroy);
461
462                 nentry = NULL;
463                 if (next != &bm->ddestroy) {
464                         nentry = list_entry(next, struct drm_buffer_object,
465                                             ddestroy);
466                         atomic_inc(&nentry->usage);
467                 }
468
469                 drm_bo_cleanup_refs(entry, remove_all);
470
471                 if (nentry)
472                         atomic_dec(&nentry->usage);
473         }
474 }
475
476 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
477 static void drm_bo_delayed_workqueue(void *data)
478 #else
479 static void drm_bo_delayed_workqueue(struct work_struct *work)
480 #endif
481 {
482 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
483         struct drm_device *dev = (struct drm_device *) data;
484         struct drm_buffer_manager *bm = &dev->bm;
485 #else
486         struct drm_buffer_manager *bm =
487             container_of(work, struct drm_buffer_manager, wq.work);
488         struct drm_device *dev = container_of(bm, struct drm_device, bm);
489 #endif
490
491         DRM_DEBUG("Delayed delete Worker\n");
492
493         mutex_lock(&dev->struct_mutex);
494         if (!bm->initialized) {
495                 mutex_unlock(&dev->struct_mutex);
496                 return;
497         }
498         drm_bo_delayed_delete(dev, 0);
499         if (bm->initialized && !list_empty(&bm->ddestroy)) {
500                 schedule_delayed_work(&bm->wq,
501                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
502         }
503         mutex_unlock(&dev->struct_mutex);
504 }
505
506 void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
507 {
508         struct drm_buffer_object *tmp_bo = *bo;
509         bo = NULL;
510
511         DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
512
513         if (atomic_dec_and_test(&tmp_bo->usage))
514                 drm_bo_destroy_locked(tmp_bo);
515 }
516 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
517
518 static void drm_bo_base_deref_locked(struct drm_file *file_priv,
519                                      struct drm_user_object *uo)
520 {
521         struct drm_buffer_object *bo =
522             drm_user_object_entry(uo, struct drm_buffer_object, base);
523
524         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
525
526         drm_bo_takedown_vm_locked(bo);
527         drm_bo_usage_deref_locked(&bo);
528 }
529
530 void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
531 {
532         struct drm_buffer_object *tmp_bo = *bo;
533         struct drm_device *dev = tmp_bo->dev;
534
535         *bo = NULL;
536         if (atomic_dec_and_test(&tmp_bo->usage)) {
537                 mutex_lock(&dev->struct_mutex);
538                 if (atomic_read(&tmp_bo->usage) == 0)
539                         drm_bo_destroy_locked(tmp_bo);
540                 mutex_unlock(&dev->struct_mutex);
541         }
542 }
543 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
544
545 void drm_putback_buffer_objects(struct drm_device *dev)
546 {
547         struct drm_buffer_manager *bm = &dev->bm;
548         struct list_head *list = &bm->unfenced;
549         struct drm_buffer_object *entry, *next;
550
551         mutex_lock(&dev->struct_mutex);
552         list_for_each_entry_safe(entry, next, list, lru) {
553                 atomic_inc(&entry->usage);
554                 mutex_unlock(&dev->struct_mutex);
555
556                 mutex_lock(&entry->mutex);
557                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
558                 mutex_lock(&dev->struct_mutex);
559
560                 list_del_init(&entry->lru);
561                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
562                 wake_up_all(&entry->event_queue);
563
564                 /*
565                  * FIXME: Might want to put back on head of list
566                  * instead of tail here.
567                  */
568
569                 drm_bo_add_to_lru(entry);
570                 mutex_unlock(&entry->mutex);
571                 drm_bo_usage_deref_locked(&entry);
572         }
573         mutex_unlock(&dev->struct_mutex);
574 }
575 EXPORT_SYMBOL(drm_putback_buffer_objects);
576
577 /*
578  * Note. The caller has to register (if applicable)
579  * and deregister fence object usage.
580  */
581
582 int drm_fence_buffer_objects(struct drm_device *dev,
583                              struct list_head *list,
584                              uint32_t fence_flags,
585                              struct drm_fence_object *fence,
586                              struct drm_fence_object **used_fence)
587 {
588         struct drm_buffer_manager *bm = &dev->bm;
589         struct drm_buffer_object *entry;
590         uint32_t fence_type = 0;
591         uint32_t fence_class = ~0;
592         int count = 0;
593         int ret = 0;
594         struct list_head *l;
595
596         mutex_lock(&dev->struct_mutex);
597
598         if (!list)
599                 list = &bm->unfenced;
600
601         if (fence)
602                 fence_class = fence->fence_class;
603
604         list_for_each_entry(entry, list, lru) {
605                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
606                 fence_type |= entry->new_fence_type;
607                 if (fence_class == ~0)
608                         fence_class = entry->new_fence_class;
609                 else if (entry->new_fence_class != fence_class) {
610                         DRM_ERROR("Unmatching fence classes on unfenced list: "
611                                   "%d and %d.\n",
612                                   fence_class,
613                                   entry->new_fence_class);
614                         ret = -EINVAL;
615                         goto out;
616                 }
617                 count++;
618         }
619
620         if (!count) {
621                 ret = -EINVAL;
622                 goto out;
623         }
624
625         if (fence) {
626                 if ((fence_type & fence->type) != fence_type ||
627                     (fence->fence_class != fence_class)) {
628                         DRM_ERROR("Given fence doesn't match buffers "
629                                   "on unfenced list.\n");
630                         ret = -EINVAL;
631                         goto out;
632                 }
633         } else {
634                 mutex_unlock(&dev->struct_mutex);
635                 ret = drm_fence_object_create(dev, fence_class, fence_type,
636                                               fence_flags | DRM_FENCE_FLAG_EMIT,
637                                               &fence);
638                 mutex_lock(&dev->struct_mutex);
639                 if (ret)
640                         goto out;
641         }
642
643         count = 0;
644         l = list->next;
645         while (l != list) {
646                 prefetch(l->next);
647                 entry = list_entry(l, struct drm_buffer_object, lru);
648                 atomic_inc(&entry->usage);
649                 mutex_unlock(&dev->struct_mutex);
650                 mutex_lock(&entry->mutex);
651                 mutex_lock(&dev->struct_mutex);
652                 list_del_init(l);
653                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
654                         count++;
655                         if (entry->fence)
656                                 drm_fence_usage_deref_locked(&entry->fence);
657                         entry->fence = drm_fence_reference_locked(fence);
658                         entry->fence_class = entry->new_fence_class;
659                         entry->fence_type = entry->new_fence_type;
660                         DRM_FLAG_MASKED(entry->priv_flags, 0,
661                                         _DRM_BO_FLAG_UNFENCED);
662                         wake_up_all(&entry->event_queue);
663                         drm_bo_add_to_lru(entry);
664                 }
665                 mutex_unlock(&entry->mutex);
666                 drm_bo_usage_deref_locked(&entry);
667                 l = list->next;
668         }
669         DRM_DEBUG("Fenced %d buffers\n", count);
670 out:
671         mutex_unlock(&dev->struct_mutex);
672         *used_fence = fence;
673         return ret;
674 }
675 EXPORT_SYMBOL(drm_fence_buffer_objects);
676
677 /*
678  * bo->mutex locked
679  */
680
681 static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
682                         int no_wait)
683 {
684         int ret = 0;
685         struct drm_device *dev = bo->dev;
686         struct drm_bo_mem_reg evict_mem;
687
688         /*
689          * Someone might have modified the buffer before we took the
690          * buffer mutex.
691          */
692
693         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
694                 goto out;
695         if (bo->mem.mem_type != mem_type)
696                 goto out;
697
698         ret = drm_bo_wait(bo, 0, 0, no_wait);
699
700         if (ret && ret != -EAGAIN) {
701                 DRM_ERROR("Failed to expire fence before "
702                           "buffer eviction.\n");
703                 goto out;
704         }
705
706         evict_mem = bo->mem;
707         evict_mem.mm_node = NULL;
708
709         evict_mem = bo->mem;
710         evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
711         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
712
713         if (ret) {
714                 if (ret != -EAGAIN)
715                         DRM_ERROR("Failed to find memory space for "
716                                   "buffer 0x%p eviction.\n", bo);
717                 goto out;
718         }
719
720         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
721
722         if (ret) {
723                 if (ret != -EAGAIN)
724                         DRM_ERROR("Buffer eviction failed\n");
725                 goto out;
726         }
727
728         mutex_lock(&dev->struct_mutex);
729         if (evict_mem.mm_node) {
730                 if (evict_mem.mm_node != bo->pinned_node)
731                         drm_mm_put_block(evict_mem.mm_node);
732                 evict_mem.mm_node = NULL;
733         }
734         list_del(&bo->lru);
735         drm_bo_add_to_lru(bo);
736         mutex_unlock(&dev->struct_mutex);
737
738         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
739                         _DRM_BO_FLAG_EVICTED);
740
741 out:
742         return ret;
743 }
744
745 /**
746  * Repeatedly evict memory from the LRU for @mem_type until we create enough
747  * space, or we've evicted everything and there isn't enough space.
748  */
749 static int drm_bo_mem_force_space(struct drm_device *dev,
750                                   struct drm_bo_mem_reg *mem,
751                                   uint32_t mem_type, int no_wait)
752 {
753         struct drm_mm_node *node;
754         struct drm_buffer_manager *bm = &dev->bm;
755         struct drm_buffer_object *entry;
756         struct drm_mem_type_manager *man = &bm->man[mem_type];
757         struct list_head *lru;
758         unsigned long num_pages = mem->num_pages;
759         int ret;
760
761         mutex_lock(&dev->struct_mutex);
762         do {
763                 node = drm_mm_search_free(&man->manager, num_pages,
764                                           mem->page_alignment, 1);
765                 if (node)
766                         break;
767
768                 lru = &man->lru;
769                 if (lru->next == lru)
770                         break;
771
772                 entry = list_entry(lru->next, struct drm_buffer_object, lru);
773                 atomic_inc(&entry->usage);
774                 mutex_unlock(&dev->struct_mutex);
775                 mutex_lock(&entry->mutex);
776                 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
777
778                 ret = drm_bo_evict(entry, mem_type, no_wait);
779                 mutex_unlock(&entry->mutex);
780                 drm_bo_usage_deref_unlocked(&entry);
781                 if (ret)
782                         return ret;
783                 mutex_lock(&dev->struct_mutex);
784         } while (1);
785
786         if (!node) {
787                 mutex_unlock(&dev->struct_mutex);
788                 return -ENOMEM;
789         }
790
791         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
792         mutex_unlock(&dev->struct_mutex);
793         mem->mm_node = node;
794         mem->mem_type = mem_type;
795         return 0;
796 }
797
798 static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
799                                 int disallow_fixed,
800                                 uint32_t mem_type,
801                                 uint64_t mask, uint32_t *res_mask)
802 {
803         uint64_t cur_flags = drm_bo_type_flags(mem_type);
804         uint64_t flag_diff;
805
806         if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
807                 return 0;
808         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
809                 cur_flags |= DRM_BO_FLAG_CACHED;
810         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
811                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
812         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
813                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
814
815         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
816                 return 0;
817
818         if (mem_type == DRM_BO_MEM_LOCAL) {
819                 *res_mask = cur_flags;
820                 return 1;
821         }
822
823         flag_diff = (mask ^ cur_flags);
824         if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
825                 cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
826
827         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
828             (!(mask & DRM_BO_FLAG_CACHED) ||
829              (mask & DRM_BO_FLAG_FORCE_CACHING)))
830                 return 0;
831
832         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
833             ((mask & DRM_BO_FLAG_MAPPABLE) ||
834              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
835                 return 0;
836
837         *res_mask = cur_flags;
838         return 1;
839 }
840
841 /**
842  * Creates space for memory region @mem according to its type.
843  *
844  * This function first searches for free space in compatible memory types in
845  * the priority order defined by the driver.  If free space isn't found, then
846  * drm_bo_mem_force_space is attempted in priority order to evict and find
847  * space.
848  */
849 int drm_bo_mem_space(struct drm_buffer_object *bo,
850                      struct drm_bo_mem_reg *mem, int no_wait)
851 {
852         struct drm_device *dev = bo->dev;
853         struct drm_buffer_manager *bm = &dev->bm;
854         struct drm_mem_type_manager *man;
855
856         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
857         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
858         uint32_t i;
859         uint32_t mem_type = DRM_BO_MEM_LOCAL;
860         uint32_t cur_flags;
861         int type_found = 0;
862         int type_ok = 0;
863         int has_eagain = 0;
864         struct drm_mm_node *node = NULL;
865         int ret;
866
867         mem->mm_node = NULL;
868         for (i = 0; i < num_prios; ++i) {
869                 mem_type = prios[i];
870                 man = &bm->man[mem_type];
871
872                 type_ok = drm_bo_mt_compatible(man,
873                                                bo->type == drm_bo_type_user,
874                                                mem_type, mem->proposed_flags,
875                                                &cur_flags);
876
877                 if (!type_ok)
878                         continue;
879
880                 if (mem_type == DRM_BO_MEM_LOCAL)
881                         break;
882
883                 if ((mem_type == bo->pinned_mem_type) &&
884                     (bo->pinned_node != NULL)) {
885                         node = bo->pinned_node;
886                         break;
887                 }
888
889                 mutex_lock(&dev->struct_mutex);
890                 if (man->has_type && man->use_type) {
891                         type_found = 1;
892                         node = drm_mm_search_free(&man->manager, mem->num_pages,
893                                                   mem->page_alignment, 1);
894                         if (node)
895                                 node = drm_mm_get_block(node, mem->num_pages,
896                                                         mem->page_alignment);
897                 }
898                 mutex_unlock(&dev->struct_mutex);
899                 if (node)
900                         break;
901         }
902
903         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
904                 mem->mm_node = node;
905                 mem->mem_type = mem_type;
906                 mem->flags = cur_flags;
907                 return 0;
908         }
909
910         if (!type_found)
911                 return -EINVAL;
912
913         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
914         prios = dev->driver->bo_driver->mem_busy_prio;
915
916         for (i = 0; i < num_prios; ++i) {
917                 mem_type = prios[i];
918                 man = &bm->man[mem_type];
919
920                 if (!man->has_type)
921                         continue;
922
923                 if (!drm_bo_mt_compatible(man,
924                                           bo->type == drm_bo_type_user,
925                                           mem_type,
926                                           mem->proposed_flags,
927                                           &cur_flags))
928                         continue;
929
930                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
931
932                 if (ret == 0 && mem->mm_node) {
933                         mem->flags = cur_flags;
934                         return 0;
935                 }
936
937                 if (ret == -EAGAIN)
938                         has_eagain = 1;
939         }
940
941         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
942         return ret;
943 }
944 EXPORT_SYMBOL(drm_bo_mem_space);
945
946 /*
947  * drm_bo_propose_flags:
948  *
949  * @bo: the buffer object getting new flags
950  *
951  * @new_flags: the new set of proposed flag bits
952  *
953  * @new_mask: the mask of bits changed in new_flags
954  *
955  * Modify the proposed_flag bits in @bo
956  */
957 static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
958                                          uint64_t new_flags, uint64_t new_mask)
959 {
960         uint32_t new_access;
961
962         /* Copy unchanging bits from existing proposed_flags */
963         DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
964          
965         if (bo->type == drm_bo_type_user &&
966             ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
967              (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
968                 DRM_ERROR("User buffers require cache-coherent memory.\n");
969                 return -EINVAL;
970         }
971
972         if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
973                 DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
974                 return -EPERM;
975         }
976
977         if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
978                 DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
979                 return -EPERM;
980         }
981
982         new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
983                                   DRM_BO_FLAG_READ);
984
985         if (new_access == 0) {
986                 DRM_ERROR("Invalid buffer object rwx properties\n");
987                 return -EINVAL;
988         }
989
990         bo->mem.proposed_flags = new_flags;
991         return 0;
992 }
993
994 /*
995  * Call dev->struct_mutex locked.
996  */
997
998 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
999                                               uint32_t handle, int check_owner)
1000 {
1001         struct drm_user_object *uo;
1002         struct drm_buffer_object *bo;
1003
1004         uo = drm_lookup_user_object(file_priv, handle);
1005
1006         if (!uo || (uo->type != drm_buffer_type)) {
1007                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
1008                 return NULL;
1009         }
1010
1011         if (check_owner && file_priv != uo->owner) {
1012                 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
1013                         return NULL;
1014         }
1015
1016         bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
1017         atomic_inc(&bo->usage);
1018         return bo;
1019 }
1020 EXPORT_SYMBOL(drm_lookup_buffer_object);
1021
1022 /*
1023  * Call bo->mutex locked.
1024  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1025  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
1026  */
1027
1028 static int drm_bo_quick_busy(struct drm_buffer_object *bo)
1029 {
1030         struct drm_fence_object *fence = bo->fence;
1031
1032         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1033         if (fence) {
1034                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1035                         drm_fence_usage_deref_unlocked(&bo->fence);
1036                         return 0;
1037                 }
1038                 return 1;
1039         }
1040         return 0;
1041 }
1042
1043 /*
1044  * Call bo->mutex locked.
1045  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1046  */
1047
1048 static int drm_bo_busy(struct drm_buffer_object *bo)
1049 {
1050         struct drm_fence_object *fence = bo->fence;
1051
1052         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1053         if (fence) {
1054                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1055                         drm_fence_usage_deref_unlocked(&bo->fence);
1056                         return 0;
1057                 }
1058                 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
1059                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1060                         drm_fence_usage_deref_unlocked(&bo->fence);
1061                         return 0;
1062                 }
1063                 return 1;
1064         }
1065         return 0;
1066 }
1067
1068 int drm_bo_evict_cached(struct drm_buffer_object *bo)
1069 {
1070         int ret = 0;
1071
1072         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1073         if (bo->mem.mm_node)
1074                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1075         return ret;
1076 }
1077
1078 EXPORT_SYMBOL(drm_bo_evict_cached);
1079 /*
1080  * Wait until a buffer is unmapped.
1081  */
1082
1083 static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
1084 {
1085         int ret = 0;
1086
1087         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1088                 return -EBUSY;
1089
1090         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1091                     atomic_read(&bo->mapped) == -1);
1092
1093         if (ret == -EINTR)
1094                 ret = -EAGAIN;
1095
1096         return ret;
1097 }
1098
1099 static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
1100 {
1101         int ret;
1102
1103         mutex_lock(&bo->mutex);
1104         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1105         mutex_unlock(&bo->mutex);
1106         return ret;
1107 }
1108
1109 /*
1110  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1111  * Until then, we cannot really do anything with it except delete it.
1112  */
1113
1114 static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
1115                                 int eagain_if_wait)
1116 {
1117         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1118
1119         if (ret && no_wait)
1120                 return -EBUSY;
1121         else if (!ret)
1122                 return 0;
1123
1124         ret = 0;
1125         mutex_unlock(&bo->mutex);
1126         DRM_WAIT_ON (ret, bo->event_queue, 3 * DRM_HZ,
1127                      !drm_bo_check_unfenced(bo));
1128         mutex_lock(&bo->mutex);
1129         if (ret == -EINTR)
1130                 return -EAGAIN;
1131         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1132         if (ret) {
1133                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1134                 return -EBUSY;
1135         }
1136         if (eagain_if_wait)
1137                 return -EAGAIN;
1138
1139         return 0;
1140 }
1141
1142 /*
1143  * Fill in the ioctl reply argument with buffer info.
1144  * Bo locked.
1145  */
1146
1147 static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
1148                                 struct drm_bo_info_rep *rep)
1149 {
1150         if (!rep)
1151                 return;
1152
1153         rep->handle = bo->base.hash.key;
1154         rep->flags = bo->mem.flags;
1155         rep->size = bo->num_pages * PAGE_SIZE;
1156         rep->offset = bo->offset;
1157
1158         /*
1159          * drm_bo_type_device buffers have user-visible
1160          * handles which can be used to share across
1161          * processes. Hand that back to the application
1162          */
1163         if (bo->type == drm_bo_type_device)
1164                 rep->arg_handle = bo->map_list.user_token;
1165         else
1166                 rep->arg_handle = 0;
1167
1168         rep->proposed_flags = bo->mem.proposed_flags;
1169         rep->buffer_start = bo->buffer_start;
1170         rep->fence_flags = bo->fence_type;
1171         rep->rep_flags = 0;
1172         rep->page_alignment = bo->mem.page_alignment;
1173
1174         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1175                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1176                                 DRM_BO_REP_BUSY);
1177         }
1178 }
1179
1180 /*
1181  * Wait for buffer idle and register that we've mapped the buffer.
1182  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1183  * so that if the client dies, the mapping is automatically
1184  * unregistered.
1185  */
1186
1187 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1188                                  uint32_t map_flags, unsigned hint,
1189                                  struct drm_bo_info_rep *rep)
1190 {
1191         struct drm_buffer_object *bo;
1192         struct drm_device *dev = file_priv->minor->dev;
1193         int ret = 0;
1194         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1195
1196         mutex_lock(&dev->struct_mutex);
1197         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1198         mutex_unlock(&dev->struct_mutex);
1199
1200         if (!bo)
1201                 return -EINVAL;
1202
1203         mutex_lock(&bo->mutex);
1204         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1205         if (ret)
1206                 goto out;
1207
1208         /*
1209          * If this returns true, we are currently unmapped.
1210          * We need to do this test, because unmapping can
1211          * be done without the bo->mutex held.
1212          */
1213
1214         while (1) {
1215                 if (atomic_inc_and_test(&bo->mapped)) {
1216                         if (no_wait && drm_bo_busy(bo)) {
1217                                 atomic_dec(&bo->mapped);
1218                                 ret = -EBUSY;
1219                                 goto out;
1220                         }
1221                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1222                         if (ret) {
1223                                 atomic_dec(&bo->mapped);
1224                                 goto out;
1225                         }
1226
1227                         if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
1228                                 drm_bo_evict_cached(bo);
1229
1230                         break;
1231                 } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
1232
1233                         /*
1234                          * We are already mapped with different flags.
1235                          * need to wait for unmap.
1236                          */
1237
1238                         ret = drm_bo_wait_unmapped(bo, no_wait);
1239                         if (ret)
1240                                 goto out;
1241
1242                         continue;
1243                 }
1244                 break;
1245         }
1246
1247         mutex_lock(&dev->struct_mutex);
1248         ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1249         mutex_unlock(&dev->struct_mutex);
1250         if (ret) {
1251                 if (atomic_add_negative(-1, &bo->mapped))
1252                         wake_up_all(&bo->event_queue);
1253
1254         } else
1255                 drm_bo_fill_rep_arg(bo, rep);
1256 out:
1257         mutex_unlock(&bo->mutex);
1258         drm_bo_usage_deref_unlocked(&bo);
1259         return ret;
1260 }
1261
1262 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1263 {
1264         struct drm_device *dev = file_priv->minor->dev;
1265         struct drm_buffer_object *bo;
1266         struct drm_ref_object *ro;
1267         int ret = 0;
1268
1269         mutex_lock(&dev->struct_mutex);
1270
1271         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1272         if (!bo) {
1273                 ret = -EINVAL;
1274                 goto out;
1275         }
1276
1277         ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1278         if (!ro) {
1279                 ret = -EINVAL;
1280                 goto out;
1281         }
1282
1283         drm_remove_ref_object(file_priv, ro);
1284         drm_bo_usage_deref_locked(&bo);
1285 out:
1286         mutex_unlock(&dev->struct_mutex);
1287         return ret;
1288 }
1289
1290 /*
1291  * Call struct-sem locked.
1292  */
1293
1294 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1295                                          struct drm_user_object *uo,
1296                                          enum drm_ref_type action)
1297 {
1298         struct drm_buffer_object *bo =
1299             drm_user_object_entry(uo, struct drm_buffer_object, base);
1300
1301         /*
1302          * We DON'T want to take the bo->lock here, because we want to
1303          * hold it when we wait for unmapped buffer.
1304          */
1305
1306         BUG_ON(action != _DRM_REF_TYPE1);
1307
1308         if (atomic_add_negative(-1, &bo->mapped))
1309                 wake_up_all(&bo->event_queue);
1310 }
1311
1312 /*
1313  * bo->mutex locked.
1314  * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
1315  */
1316
1317 int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
1318                        int no_wait, int move_unfenced)
1319 {
1320         struct drm_device *dev = bo->dev;
1321         struct drm_buffer_manager *bm = &dev->bm;
1322         int ret = 0;
1323         struct drm_bo_mem_reg mem;
1324         /*
1325          * Flush outstanding fences.
1326          */
1327
1328         drm_bo_busy(bo);
1329
1330         /*
1331          * Wait for outstanding fences.
1332          */
1333
1334         ret = drm_bo_wait(bo, 0, 0, no_wait);
1335         if (ret)
1336                 return ret;
1337
1338         mem.num_pages = bo->num_pages;
1339         mem.size = mem.num_pages << PAGE_SHIFT;
1340         mem.proposed_flags = new_mem_flags;
1341         mem.page_alignment = bo->mem.page_alignment;
1342
1343         mutex_lock(&bm->evict_mutex);
1344         mutex_lock(&dev->struct_mutex);
1345         list_del_init(&bo->lru);
1346         mutex_unlock(&dev->struct_mutex);
1347
1348         /*
1349          * Determine where to move the buffer.
1350          */
1351         ret = drm_bo_mem_space(bo, &mem, no_wait);
1352         if (ret)
1353                 goto out_unlock;
1354
1355         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1356
1357 out_unlock:
1358         mutex_lock(&dev->struct_mutex);
1359         if (ret || !move_unfenced) {
1360                 if (mem.mm_node) {
1361                         if (mem.mm_node != bo->pinned_node)
1362                                 drm_mm_put_block(mem.mm_node);
1363                         mem.mm_node = NULL;
1364                 }
1365                 drm_bo_add_to_lru(bo);
1366                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1367                         wake_up_all(&bo->event_queue);
1368                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1369                                         _DRM_BO_FLAG_UNFENCED);
1370                 }
1371         } else {
1372                 list_add_tail(&bo->lru, &bm->unfenced);
1373                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1374                                 _DRM_BO_FLAG_UNFENCED);
1375         }
1376         mutex_unlock(&dev->struct_mutex);
1377         mutex_unlock(&bm->evict_mutex);
1378         return ret;
1379 }
1380
1381 static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
1382 {
1383         uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
1384
1385         if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
1386                 return 0;
1387         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1388             (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
1389              (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
1390                 return 0;
1391
1392         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1393             ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
1394              (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
1395                 return 0;
1396         return 1;
1397 }
1398
1399 /**
1400  * drm_buffer_object_validate:
1401  *
1402  * @bo: the buffer object to modify
1403  *
1404  * @fence_class: the new fence class covering this buffer
1405  *
1406  * @move_unfenced: a boolean indicating whether switching the
1407  * memory space of this buffer should cause the buffer to
1408  * be placed on the unfenced list.
1409  *
1410  * @no_wait: whether this function should return -EBUSY instead
1411  * of waiting.
1412  *
1413  * Change buffer access parameters. This can involve moving
1414  * the buffer to the correct memory type, pinning the buffer
1415  * or changing the class/type of fence covering this buffer
1416  *
1417  * Must be called with bo locked.
1418  */
1419
1420 static int drm_buffer_object_validate(struct drm_buffer_object *bo,
1421                                       uint32_t fence_class,
1422                                       int move_unfenced, int no_wait)
1423 {
1424         struct drm_device *dev = bo->dev;
1425         struct drm_buffer_manager *bm = &dev->bm;
1426         struct drm_bo_driver *driver = dev->driver->bo_driver;
1427         uint32_t ftype;
1428         int ret;
1429
1430         DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
1431                   (unsigned long long) bo->mem.proposed_flags,
1432                   (unsigned long long) bo->mem.flags);
1433
1434         ret = driver->fence_type(bo, &fence_class, &ftype);
1435
1436         if (ret) {
1437                 DRM_ERROR("Driver did not support given buffer permissions\n");
1438                 return ret;
1439         }
1440
1441         /*
1442          * We're switching command submission mechanism,
1443          * or cannot simply rely on the hardware serializing for us.
1444          *
1445          * Insert a driver-dependant barrier or wait for buffer idle.
1446          */
1447
1448         if ((fence_class != bo->fence_class) ||
1449             ((ftype ^ bo->fence_type) & bo->fence_type)) {
1450
1451                 ret = -EINVAL;
1452                 if (driver->command_stream_barrier) {
1453                         ret = driver->command_stream_barrier(bo,
1454                                                              fence_class,
1455                                                              ftype,
1456                                                              no_wait);
1457                 }
1458                 if (ret)
1459                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1460
1461                 if (ret)
1462                         return ret;
1463
1464         }
1465
1466         bo->new_fence_class = fence_class;
1467         bo->new_fence_type = ftype;
1468
1469         ret = drm_bo_wait_unmapped(bo, no_wait);
1470         if (ret) {
1471                 DRM_ERROR("Timed out waiting for buffer unmap.\n");
1472                 return ret;
1473         }
1474
1475         /*
1476          * Check whether we need to move buffer.
1477          */
1478
1479         if (!drm_bo_mem_compat(&bo->mem)) {
1480                 ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
1481                                          move_unfenced);
1482                 if (ret) {
1483                         if (ret != -EAGAIN)
1484                                 DRM_ERROR("Failed moving buffer.\n");
1485                         return ret;
1486                 }
1487         }
1488
1489         /*
1490          * Pinned buffers.
1491          */
1492
1493         if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1494                 bo->pinned_mem_type = bo->mem.mem_type;
1495                 mutex_lock(&dev->struct_mutex);
1496                 list_del_init(&bo->pinned_lru);
1497                 drm_bo_add_to_pinned_lru(bo);
1498
1499                 if (bo->pinned_node != bo->mem.mm_node) {
1500                         if (bo->pinned_node != NULL)
1501                                 drm_mm_put_block(bo->pinned_node);
1502                         bo->pinned_node = bo->mem.mm_node;
1503                 }
1504
1505                 mutex_unlock(&dev->struct_mutex);
1506
1507         } else if (bo->pinned_node != NULL) {
1508
1509                 mutex_lock(&dev->struct_mutex);
1510
1511                 if (bo->pinned_node != bo->mem.mm_node)
1512                         drm_mm_put_block(bo->pinned_node);
1513
1514                 list_del_init(&bo->pinned_lru);
1515                 bo->pinned_node = NULL;
1516                 mutex_unlock(&dev->struct_mutex);
1517
1518         }
1519
1520         /*
1521          * We might need to add a TTM.
1522          */
1523
1524         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1525                 ret = drm_bo_add_ttm(bo);
1526                 if (ret)
1527                         return ret;
1528         }
1529         /*
1530          * Validation has succeeded, move the access and other
1531          * non-mapping-related flag bits from the proposed flags to
1532          * the active flags
1533          */
1534
1535         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
1536
1537         /*
1538          * Finally, adjust lru to be sure.
1539          */
1540
1541         mutex_lock(&dev->struct_mutex);
1542         list_del(&bo->lru);
1543         if (move_unfenced) {
1544                 list_add_tail(&bo->lru, &bm->unfenced);
1545                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1546                                 _DRM_BO_FLAG_UNFENCED);
1547         } else {
1548                 drm_bo_add_to_lru(bo);
1549                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1550                         wake_up_all(&bo->event_queue);
1551                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1552                                         _DRM_BO_FLAG_UNFENCED);
1553                 }
1554         }
1555         mutex_unlock(&dev->struct_mutex);
1556
1557         return 0;
1558 }
1559
1560 /**
1561  * drm_bo_do_validate:
1562  *
1563  * @bo: the buffer object
1564  *
1565  * @flags: access rights, mapping parameters and cacheability. See
1566  * the DRM_BO_FLAG_* values in drm.h
1567  *
1568  * @mask: Which flag values to change; this allows callers to modify
1569  * things without knowing the current state of other flags.
1570  *
1571  * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1572  * values in drm.h.
1573  *
1574  * @fence_class: a driver-specific way of doing fences. Presumably,
1575  * this would be used if the driver had more than one submission and
1576  * fencing mechanism. At this point, there isn't any use of this
1577  * from the user mode code.
1578  *
1579  * @rep: To be stuffed with the reply from validation
1580  * 
1581  * 'validate' a buffer object. This changes where the buffer is
1582  * located, along with changing access modes.
1583  */
1584
1585 int drm_bo_do_validate(struct drm_buffer_object *bo,
1586                        uint64_t flags, uint64_t mask, uint32_t hint,
1587                        uint32_t fence_class,
1588                        struct drm_bo_info_rep *rep)
1589 {
1590         int ret;
1591         int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
1592
1593         mutex_lock(&bo->mutex);
1594         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1595
1596         if (ret)
1597                 goto out;
1598
1599         ret = drm_bo_modify_proposed_flags (bo, flags, mask);
1600         if (ret)
1601                 goto out;
1602
1603         ret = drm_buffer_object_validate(bo,
1604                                          fence_class,
1605                                          !(hint & DRM_BO_HINT_DONT_FENCE),
1606                                          no_wait);
1607 out:
1608         if (rep)
1609                 drm_bo_fill_rep_arg(bo, rep);
1610
1611         mutex_unlock(&bo->mutex);
1612         return ret;
1613 }
1614 EXPORT_SYMBOL(drm_bo_do_validate);
1615
1616 /**
1617  * drm_bo_handle_validate
1618  *
1619  * @file_priv: the drm file private, used to get a handle to the user context
1620  *
1621  * @handle: the buffer object handle
1622  *
1623  * @flags: access rights, mapping parameters and cacheability. See
1624  * the DRM_BO_FLAG_* values in drm.h
1625  *
1626  * @mask: Which flag values to change; this allows callers to modify
1627  * things without knowing the current state of other flags.
1628  *
1629  * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1630  * values in drm.h.
1631  *
1632  * @fence_class: a driver-specific way of doing fences. Presumably,
1633  * this would be used if the driver had more than one submission and
1634  * fencing mechanism. At this point, there isn't any use of this
1635  * from the user mode code.
1636  *
1637  * @use_old_fence_class: don't change fence class, pull it from the buffer object
1638  *
1639  * @rep: To be stuffed with the reply from validation
1640  * 
1641  * @bp_rep: To be stuffed with the buffer object pointer
1642  *
1643  * Perform drm_bo_do_validate on a buffer referenced by a user-space handle.
1644  * Some permissions checking is done on the parameters, otherwise this
1645  * is a thin wrapper.
1646  */
1647
1648 int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
1649                            uint64_t flags, uint64_t mask,
1650                            uint32_t hint,
1651                            uint32_t fence_class,
1652                            int use_old_fence_class,
1653                            struct drm_bo_info_rep *rep,
1654                            struct drm_buffer_object **bo_rep)
1655 {
1656         struct drm_device *dev = file_priv->minor->dev;
1657         struct drm_buffer_object *bo;
1658         int ret;
1659
1660         mutex_lock(&dev->struct_mutex);
1661         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1662         mutex_unlock(&dev->struct_mutex);
1663
1664         if (!bo)
1665                 return -EINVAL;
1666
1667         if (use_old_fence_class)
1668                 fence_class = bo->fence_class;
1669
1670         /*
1671          * Only allow creator to change shared buffer mask.
1672          */
1673
1674         if (bo->base.owner != file_priv)
1675                 mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1676
1677
1678         ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
1679
1680         if (!ret && bo_rep)
1681                 *bo_rep = bo;
1682         else
1683                 drm_bo_usage_deref_unlocked(&bo);
1684
1685         return ret;
1686 }
1687 EXPORT_SYMBOL(drm_bo_handle_validate);
1688
1689 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1690                               struct drm_bo_info_rep *rep)
1691 {
1692         struct drm_device *dev = file_priv->minor->dev;
1693         struct drm_buffer_object *bo;
1694
1695         mutex_lock(&dev->struct_mutex);
1696         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1697         mutex_unlock(&dev->struct_mutex);
1698
1699         if (!bo)
1700                 return -EINVAL;
1701
1702         mutex_lock(&bo->mutex);
1703         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1704                 (void)drm_bo_busy(bo);
1705         drm_bo_fill_rep_arg(bo, rep);
1706         mutex_unlock(&bo->mutex);
1707         drm_bo_usage_deref_unlocked(&bo);
1708         return 0;
1709 }
1710
1711 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1712                               uint32_t hint,
1713                               struct drm_bo_info_rep *rep)
1714 {
1715         struct drm_device *dev = file_priv->minor->dev;
1716         struct drm_buffer_object *bo;
1717         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1718         int ret;
1719
1720         mutex_lock(&dev->struct_mutex);
1721         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1722         mutex_unlock(&dev->struct_mutex);
1723
1724         if (!bo)
1725                 return -EINVAL;
1726
1727         mutex_lock(&bo->mutex);
1728         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1729         if (ret)
1730                 goto out;
1731         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1732         if (ret)
1733                 goto out;
1734
1735         drm_bo_fill_rep_arg(bo, rep);
1736
1737 out:
1738         mutex_unlock(&bo->mutex);
1739         drm_bo_usage_deref_unlocked(&bo);
1740         return ret;
1741 }
1742
1743 int drm_buffer_object_create(struct drm_device *dev,
1744                              unsigned long size,
1745                              enum drm_bo_type type,
1746                              uint64_t flags,
1747                              uint32_t hint,
1748                              uint32_t page_alignment,
1749                              unsigned long buffer_start,
1750                              struct drm_buffer_object **buf_obj)
1751 {
1752         struct drm_buffer_manager *bm = &dev->bm;
1753         struct drm_buffer_object *bo;
1754         int ret = 0;
1755         unsigned long num_pages;
1756
1757         size += buffer_start & ~PAGE_MASK;
1758         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1759         if (num_pages == 0) {
1760                 DRM_ERROR("Illegal buffer object size %ld.\n", size);
1761                 return -EINVAL;
1762         }
1763
1764         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1765
1766         if (!bo)
1767                 return -ENOMEM;
1768
1769         mutex_init(&bo->mutex);
1770         mutex_lock(&bo->mutex);
1771
1772         atomic_set(&bo->usage, 1);
1773         atomic_set(&bo->mapped, -1);
1774         DRM_INIT_WAITQUEUE(&bo->event_queue);
1775         INIT_LIST_HEAD(&bo->lru);
1776         INIT_LIST_HEAD(&bo->pinned_lru);
1777         INIT_LIST_HEAD(&bo->ddestroy);
1778 #ifdef DRM_ODD_MM_COMPAT
1779         INIT_LIST_HEAD(&bo->p_mm_list);
1780         INIT_LIST_HEAD(&bo->vma_list);
1781 #endif
1782         bo->dev = dev;
1783         bo->type = type;
1784         bo->num_pages = num_pages;
1785         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1786         bo->mem.num_pages = bo->num_pages;
1787         bo->mem.mm_node = NULL;
1788         bo->mem.page_alignment = page_alignment;
1789         bo->buffer_start = buffer_start & PAGE_MASK;
1790         bo->priv_flags = 0;
1791         bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1792                          DRM_BO_FLAG_MAPPABLE);
1793         bo->mem.proposed_flags = 0;
1794         atomic_inc(&bm->count);
1795         /*
1796          * Use drm_bo_modify_proposed_flags to error-check the proposed flags
1797          */
1798         ret = drm_bo_modify_proposed_flags (bo, flags, flags);
1799         if (ret)
1800                 goto out_err;
1801
1802         /*
1803          * For drm_bo_type_device buffers, allocate
1804          * address space from the device so that applications
1805          * can mmap the buffer from there
1806          */
1807         if (bo->type == drm_bo_type_device) {
1808                 mutex_lock(&dev->struct_mutex);
1809                 ret = drm_bo_setup_vm_locked(bo);
1810                 mutex_unlock(&dev->struct_mutex);
1811                 if (ret)
1812                         goto out_err;
1813         }
1814
1815         ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1816         if (ret)
1817                 goto out_err;
1818
1819         mutex_unlock(&bo->mutex);
1820         *buf_obj = bo;
1821         return 0;
1822
1823 out_err:
1824         mutex_unlock(&bo->mutex);
1825
1826         drm_bo_usage_deref_unlocked(&bo);
1827         return ret;
1828 }
1829 EXPORT_SYMBOL(drm_buffer_object_create);
1830
1831
1832 static int drm_bo_add_user_object(struct drm_file *file_priv,
1833                                   struct drm_buffer_object *bo, int shareable)
1834 {
1835         struct drm_device *dev = file_priv->minor->dev;
1836         int ret;
1837
1838         mutex_lock(&dev->struct_mutex);
1839         ret = drm_add_user_object(file_priv, &bo->base, shareable);
1840         if (ret)
1841                 goto out;
1842
1843         bo->base.remove = drm_bo_base_deref_locked;
1844         bo->base.type = drm_buffer_type;
1845         bo->base.ref_struct_locked = NULL;
1846         bo->base.unref = drm_buffer_user_object_unmap;
1847
1848 out:
1849         mutex_unlock(&dev->struct_mutex);
1850         return ret;
1851 }
1852
1853 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1854 {
1855         struct drm_bo_create_arg *arg = data;
1856         struct drm_bo_create_req *req = &arg->d.req;
1857         struct drm_bo_info_rep *rep = &arg->d.rep;
1858         struct drm_buffer_object *entry;
1859         enum drm_bo_type bo_type;
1860         int ret = 0;
1861
1862         DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
1863             (int)(req->size / 1024), req->page_alignment * 4);
1864
1865         if (!dev->bm.initialized) {
1866                 DRM_ERROR("Buffer object manager is not initialized.\n");
1867                 return -EINVAL;
1868         }
1869
1870         /*
1871          * If the buffer creation request comes in with a starting address,
1872          * that points at the desired user pages to map. Otherwise, create
1873          * a drm_bo_type_device buffer, which uses pages allocated from the kernel
1874          */
1875         bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
1876
1877         /*
1878          * User buffers cannot be shared
1879          */
1880         if (bo_type == drm_bo_type_user)
1881                 req->flags &= ~DRM_BO_FLAG_SHAREABLE;
1882
1883         ret = drm_buffer_object_create(file_priv->minor->dev,
1884                                        req->size, bo_type, req->flags,
1885                                        req->hint, req->page_alignment,
1886                                        req->buffer_start, &entry);
1887         if (ret)
1888                 goto out;
1889
1890         ret = drm_bo_add_user_object(file_priv, entry,
1891                                      req->flags & DRM_BO_FLAG_SHAREABLE);
1892         if (ret) {
1893                 drm_bo_usage_deref_unlocked(&entry);
1894                 goto out;
1895         }
1896
1897         mutex_lock(&entry->mutex);
1898         drm_bo_fill_rep_arg(entry, rep);
1899         mutex_unlock(&entry->mutex);
1900
1901 out:
1902         return ret;
1903 }
1904
1905 int drm_bo_setstatus_ioctl(struct drm_device *dev,
1906                            void *data, struct drm_file *file_priv)
1907 {
1908         struct drm_bo_map_wait_idle_arg *arg = data;
1909         struct drm_bo_info_req *req = &arg->d.req;
1910         struct drm_bo_info_rep *rep = &arg->d.rep;
1911         int ret;
1912
1913         if (!dev->bm.initialized) {
1914                 DRM_ERROR("Buffer object manager is not initialized.\n");
1915                 return -EINVAL;
1916         }
1917
1918         ret = drm_bo_read_lock(&dev->bm.bm_lock);
1919         if (ret)
1920                 return ret;
1921
1922         /*
1923          * validate the buffer. note that 'fence_class' will be unused
1924          * as we pass use_old_fence_class=1 here. Note also that
1925          * the libdrm API doesn't pass fence_class to the kernel,
1926          * so it's a good thing it isn't used here.
1927          */
1928         ret = drm_bo_handle_validate(file_priv, req->handle,
1929                                      req->flags,
1930                                      req->mask,
1931                                      req->hint | DRM_BO_HINT_DONT_FENCE,
1932                                      req->fence_class, 1,
1933                                      rep, NULL);
1934
1935         (void) drm_bo_read_unlock(&dev->bm.bm_lock);
1936         if (ret)
1937                 return ret;
1938
1939         return 0;
1940 }
1941
1942 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1943 {
1944         struct drm_bo_map_wait_idle_arg *arg = data;
1945         struct drm_bo_info_req *req = &arg->d.req;
1946         struct drm_bo_info_rep *rep = &arg->d.rep;
1947         int ret;
1948         if (!dev->bm.initialized) {
1949                 DRM_ERROR("Buffer object manager is not initialized.\n");
1950                 return -EINVAL;
1951         }
1952
1953         ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1954                                     req->hint, rep);
1955         if (ret)
1956                 return ret;
1957
1958         return 0;
1959 }
1960
1961 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1962 {
1963         struct drm_bo_handle_arg *arg = data;
1964         int ret;
1965         if (!dev->bm.initialized) {
1966                 DRM_ERROR("Buffer object manager is not initialized.\n");
1967                 return -EINVAL;
1968         }
1969
1970         ret = drm_buffer_object_unmap(file_priv, arg->handle);
1971         return ret;
1972 }
1973
1974
1975 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1976 {
1977         struct drm_bo_reference_info_arg *arg = data;
1978         struct drm_bo_handle_arg *req = &arg->d.req;
1979         struct drm_bo_info_rep *rep = &arg->d.rep;
1980         struct drm_user_object *uo;
1981         int ret;
1982
1983         if (!dev->bm.initialized) {
1984                 DRM_ERROR("Buffer object manager is not initialized.\n");
1985                 return -EINVAL;
1986         }
1987
1988         ret = drm_user_object_ref(file_priv, req->handle,
1989                                   drm_buffer_type, &uo);
1990         if (ret)
1991                 return ret;
1992
1993         ret = drm_bo_handle_info(file_priv, req->handle, rep);
1994         if (ret)
1995                 return ret;
1996
1997         return 0;
1998 }
1999
2000 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2001 {
2002         struct drm_bo_handle_arg *arg = data;
2003         int ret = 0;
2004
2005         if (!dev->bm.initialized) {
2006                 DRM_ERROR("Buffer object manager is not initialized.\n");
2007                 return -EINVAL;
2008         }
2009
2010         ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
2011         return ret;
2012 }
2013
2014 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2015 {
2016         struct drm_bo_reference_info_arg *arg = data;
2017         struct drm_bo_handle_arg *req = &arg->d.req;
2018         struct drm_bo_info_rep *rep = &arg->d.rep;
2019         int ret;
2020
2021         if (!dev->bm.initialized) {
2022                 DRM_ERROR("Buffer object manager is not initialized.\n");
2023                 return -EINVAL;
2024         }
2025
2026         ret = drm_bo_handle_info(file_priv, req->handle, rep);
2027         if (ret)
2028                 return ret;
2029
2030         return 0;
2031 }
2032
2033 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2034 {
2035         struct drm_bo_map_wait_idle_arg *arg = data;
2036         struct drm_bo_info_req *req = &arg->d.req;
2037         struct drm_bo_info_rep *rep = &arg->d.rep;
2038         int ret;
2039         if (!dev->bm.initialized) {
2040                 DRM_ERROR("Buffer object manager is not initialized.\n");
2041                 return -EINVAL;
2042         }
2043
2044         ret = drm_bo_handle_wait(file_priv, req->handle,
2045                                  req->hint, rep);
2046         if (ret)
2047                 return ret;
2048
2049         return 0;
2050 }
2051
2052 static int drm_bo_leave_list(struct drm_buffer_object *bo,
2053                              uint32_t mem_type,
2054                              int free_pinned,
2055                              int allow_errors)
2056 {
2057         struct drm_device *dev = bo->dev;
2058         int ret = 0;
2059
2060         mutex_lock(&bo->mutex);
2061
2062         ret = drm_bo_expire_fence(bo, allow_errors);
2063         if (ret)
2064                 goto out;
2065
2066         if (free_pinned) {
2067                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2068                 mutex_lock(&dev->struct_mutex);
2069                 list_del_init(&bo->pinned_lru);
2070                 if (bo->pinned_node == bo->mem.mm_node)
2071                         bo->pinned_node = NULL;
2072                 if (bo->pinned_node != NULL) {
2073                         drm_mm_put_block(bo->pinned_node);
2074                         bo->pinned_node = NULL;
2075                 }
2076                 mutex_unlock(&dev->struct_mutex);
2077         }
2078
2079         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2080                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2081                           "cleanup. Removing flag and evicting.\n");
2082                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2083                 bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
2084         }
2085
2086         if (bo->mem.mem_type == mem_type)
2087                 ret = drm_bo_evict(bo, mem_type, 0);
2088
2089         if (ret) {
2090                 if (allow_errors) {
2091                         goto out;
2092                 } else {
2093                         ret = 0;
2094                         DRM_ERROR("Cleanup eviction failed\n");
2095                 }
2096         }
2097
2098 out:
2099         mutex_unlock(&bo->mutex);
2100         return ret;
2101 }
2102
2103
2104 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2105                                          int pinned_list)
2106 {
2107         if (pinned_list)
2108                 return list_entry(list, struct drm_buffer_object, pinned_lru);
2109         else
2110                 return list_entry(list, struct drm_buffer_object, lru);
2111 }
2112
2113 /*
2114  * dev->struct_mutex locked.
2115  */
2116
2117 static int drm_bo_force_list_clean(struct drm_device *dev,
2118                                    struct list_head *head,
2119                                    unsigned mem_type,
2120                                    int free_pinned,
2121                                    int allow_errors,
2122                                    int pinned_list)
2123 {
2124         struct list_head *list, *next, *prev;
2125         struct drm_buffer_object *entry, *nentry;
2126         int ret;
2127         int do_restart;
2128
2129         /*
2130          * The list traversal is a bit odd here, because an item may
2131          * disappear from the list when we release the struct_mutex or
2132          * when we decrease the usage count. Also we're not guaranteed
2133          * to drain pinned lists, so we can't always restart.
2134          */
2135
2136 restart:
2137         nentry = NULL;
2138         list_for_each_safe(list, next, head) {
2139                 prev = list->prev;
2140
2141                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2142                 atomic_inc(&entry->usage);
2143                 if (nentry) {
2144                         atomic_dec(&nentry->usage);
2145                         nentry = NULL;
2146                 }
2147
2148                 /*
2149                  * Protect the next item from destruction, so we can check
2150                  * its list pointers later on.
2151                  */
2152
2153                 if (next != head) {
2154                         nentry = drm_bo_entry(next, pinned_list);
2155                         atomic_inc(&nentry->usage);
2156                 }
2157                 mutex_unlock(&dev->struct_mutex);
2158
2159                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2160                                         allow_errors);
2161                 mutex_lock(&dev->struct_mutex);
2162
2163                 drm_bo_usage_deref_locked(&entry);
2164                 if (ret)
2165                         return ret;
2166
2167                 /*
2168                  * Has the next item disappeared from the list?
2169                  */
2170
2171                 do_restart = ((next->prev != list) && (next->prev != prev));
2172
2173                 if (nentry != NULL && do_restart)
2174                         drm_bo_usage_deref_locked(&nentry);
2175
2176                 if (do_restart)
2177                         goto restart;
2178         }
2179         return 0;
2180 }
2181
2182 int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
2183 {
2184         struct drm_buffer_manager *bm = &dev->bm;
2185         struct drm_mem_type_manager *man = &bm->man[mem_type];
2186         int ret = -EINVAL;
2187
2188         if (mem_type >= DRM_BO_MEM_TYPES) {
2189                 DRM_ERROR("Illegal memory type %d\n", mem_type);
2190                 return ret;
2191         }
2192
2193         if (!man->has_type) {
2194                 DRM_ERROR("Trying to take down uninitialized "
2195                           "memory manager type %u\n", mem_type);
2196                 return ret;
2197         }
2198
2199         if ((man->kern_init_type) && (kern_clean == 0)) {
2200                 DRM_ERROR("Trying to take down kernel initialized "
2201                           "memory manager type %u\n", mem_type);
2202                 return -EPERM;
2203         }
2204
2205         man->use_type = 0;
2206         man->has_type = 0;
2207
2208         ret = 0;
2209         if (mem_type > 0) {
2210                 BUG_ON(!list_empty(&bm->unfenced));
2211                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2212                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2213
2214                 if (drm_mm_clean(&man->manager)) {
2215                         drm_mm_takedown(&man->manager);
2216                 } else {
2217                         ret = -EBUSY;
2218                 }
2219         }
2220
2221         return ret;
2222 }
2223 EXPORT_SYMBOL(drm_bo_clean_mm);
2224
2225 /**
2226  *Evict all buffers of a particular mem_type, but leave memory manager
2227  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2228  *point since we have the hardware lock.
2229  */
2230
2231 static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
2232 {
2233         int ret;
2234         struct drm_buffer_manager *bm = &dev->bm;
2235         struct drm_mem_type_manager *man = &bm->man[mem_type];
2236
2237         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2238                 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2239                 return -EINVAL;
2240         }
2241
2242         if (!man->has_type) {
2243                 DRM_ERROR("Memory type %u has not been initialized.\n",
2244                           mem_type);
2245                 return 0;
2246         }
2247
2248         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2249         if (ret)
2250                 return ret;
2251         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2252
2253         return ret;
2254 }
2255
2256 int drm_bo_init_mm(struct drm_device *dev, unsigned type,
2257                    unsigned long p_offset, unsigned long p_size,
2258                    int kern_init)
2259 {
2260         struct drm_buffer_manager *bm = &dev->bm;
2261         int ret = -EINVAL;
2262         struct drm_mem_type_manager *man;
2263
2264         if (type >= DRM_BO_MEM_TYPES) {
2265                 DRM_ERROR("Illegal memory type %d\n", type);
2266                 return ret;
2267         }
2268
2269         man = &bm->man[type];
2270         if (man->has_type) {
2271                 DRM_ERROR("Memory manager already initialized for type %d\n",
2272                           type);
2273                 return ret;
2274         }
2275
2276         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2277         if (ret)
2278                 return ret;
2279
2280         ret = 0;
2281         if (type != DRM_BO_MEM_LOCAL) {
2282                 if (!p_size) {
2283                         DRM_ERROR("Zero size memory manager type %d\n", type);
2284                         return ret;
2285                 }
2286                 ret = drm_mm_init(&man->manager, p_offset, p_size);
2287                 if (ret)
2288                         return ret;
2289         }
2290         man->has_type = 1;
2291         man->use_type = 1;
2292         man->kern_init_type = kern_init;
2293         man->size = p_size;
2294
2295         INIT_LIST_HEAD(&man->lru);
2296         INIT_LIST_HEAD(&man->pinned);
2297
2298         return 0;
2299 }
2300 EXPORT_SYMBOL(drm_bo_init_mm);
2301
2302 /*
2303  * This function is intended to be called on drm driver unload.
2304  * If you decide to call it from lastclose, you must protect the call
2305  * from a potentially racing drm_bo_driver_init in firstopen.
2306  * (This may happen on X server restart).
2307  */
2308
2309 int drm_bo_driver_finish(struct drm_device *dev)
2310 {
2311         struct drm_buffer_manager *bm = &dev->bm;
2312         int ret = 0;
2313         unsigned i = DRM_BO_MEM_TYPES;
2314         struct drm_mem_type_manager *man;
2315
2316         mutex_lock(&dev->struct_mutex);
2317
2318         if (!bm->initialized)
2319                 goto out;
2320         bm->initialized = 0;
2321
2322         while (i--) {
2323                 man = &bm->man[i];
2324                 if (man->has_type) {
2325                         man->use_type = 0;
2326                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) {
2327                                 ret = -EBUSY;
2328                                 DRM_ERROR("DRM memory manager type %d "
2329                                           "is not clean.\n", i);
2330                         }
2331                         man->has_type = 0;
2332                 }
2333         }
2334         mutex_unlock(&dev->struct_mutex);
2335
2336         if (!cancel_delayed_work(&bm->wq))
2337                 flush_scheduled_work();
2338
2339         mutex_lock(&dev->struct_mutex);
2340         drm_bo_delayed_delete(dev, 1);
2341         if (list_empty(&bm->ddestroy))
2342                 DRM_DEBUG("Delayed destroy list was clean\n");
2343
2344         if (list_empty(&bm->man[0].lru))
2345                 DRM_DEBUG("Swap list was clean\n");
2346
2347         if (list_empty(&bm->man[0].pinned))
2348                 DRM_DEBUG("NO_MOVE list was clean\n");
2349
2350         if (list_empty(&bm->unfenced))
2351                 DRM_DEBUG("Unfenced list was clean\n");
2352
2353 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2354         ClearPageReserved(bm->dummy_read_page);
2355 #endif
2356         __free_page(bm->dummy_read_page);
2357
2358 out:
2359         mutex_unlock(&dev->struct_mutex);
2360         return ret;
2361 }
2362 EXPORT_SYMBOL(drm_bo_driver_finish);
2363
2364 /*
2365  * This function is intended to be called on drm driver load.
2366  * If you decide to call it from firstopen, you must protect the call
2367  * from a potentially racing drm_bo_driver_finish in lastclose.
2368  * (This may happen on X server restart).
2369  */
2370
2371 int drm_bo_driver_init(struct drm_device *dev)
2372 {
2373         struct drm_bo_driver *driver = dev->driver->bo_driver;
2374         struct drm_buffer_manager *bm = &dev->bm;
2375         int ret = -EINVAL;
2376
2377         bm->dummy_read_page = NULL;
2378         drm_bo_init_lock(&bm->bm_lock);
2379         mutex_lock(&dev->struct_mutex);
2380         if (!driver)
2381                 goto out_unlock;
2382
2383         bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
2384         if (!bm->dummy_read_page) {
2385                 ret = -ENOMEM;
2386                 goto out_unlock;
2387         }
2388
2389 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2390         SetPageReserved(bm->dummy_read_page);
2391 #endif
2392
2393         /*
2394          * Initialize the system memory buffer type.
2395          * Other types need to be driver / IOCTL initialized.
2396          */
2397         ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1);
2398         if (ret)
2399                 goto out_unlock;
2400
2401 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2402         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2403 #else
2404         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2405 #endif
2406         bm->initialized = 1;
2407         bm->nice_mode = 1;
2408         atomic_set(&bm->count, 0);
2409         bm->cur_pages = 0;
2410         INIT_LIST_HEAD(&bm->unfenced);
2411         INIT_LIST_HEAD(&bm->ddestroy);
2412 out_unlock:
2413         mutex_unlock(&dev->struct_mutex);
2414         return ret;
2415 }
2416 EXPORT_SYMBOL(drm_bo_driver_init);
2417
2418 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2419 {
2420         struct drm_mm_init_arg *arg = data;
2421         struct drm_buffer_manager *bm = &dev->bm;
2422         struct drm_bo_driver *driver = dev->driver->bo_driver;
2423         int ret;
2424
2425         if (!driver) {
2426                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2427                 return -EINVAL;
2428         }
2429
2430         ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
2431         if (ret)
2432                 return ret;
2433
2434         ret = -EINVAL;
2435         if (arg->magic != DRM_BO_INIT_MAGIC) {
2436                 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2437                           "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2438                 return -EINVAL;
2439         }
2440         if (arg->major != DRM_BO_INIT_MAJOR) {
2441                 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2442                           "\tversion don't match. Got %d, expected %d.\n",
2443                           arg->major, DRM_BO_INIT_MAJOR);
2444                 return -EINVAL;
2445         }
2446
2447         mutex_lock(&dev->struct_mutex);
2448         if (!bm->initialized) {
2449                 DRM_ERROR("DRM memory manager was not initialized.\n");
2450                 goto out;
2451         }
2452         if (arg->mem_type == 0) {
2453                 DRM_ERROR("System memory buffers already initialized.\n");
2454                 goto out;
2455         }
2456         ret = drm_bo_init_mm(dev, arg->mem_type,
2457                              arg->p_offset, arg->p_size, 0);
2458
2459 out:
2460         mutex_unlock(&dev->struct_mutex);
2461         (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2462
2463         if (ret)
2464                 return ret;
2465
2466         return 0;
2467 }
2468
2469 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2470 {
2471         struct drm_mm_type_arg *arg = data;
2472         struct drm_buffer_manager *bm = &dev->bm;
2473         struct drm_bo_driver *driver = dev->driver->bo_driver;
2474         int ret;
2475
2476         if (!driver) {
2477                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2478                 return -EINVAL;
2479         }
2480
2481         ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
2482         if (ret)
2483                 return ret;
2484
2485         mutex_lock(&dev->struct_mutex);
2486         ret = -EINVAL;
2487         if (!bm->initialized) {
2488                 DRM_ERROR("DRM memory manager was not initialized\n");
2489                 goto out;
2490         }
2491         if (arg->mem_type == 0) {
2492                 DRM_ERROR("No takedown for System memory buffers.\n");
2493                 goto out;
2494         }
2495         ret = 0;
2496         if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) {
2497                 if (ret == -EINVAL)
2498                         DRM_ERROR("Memory manager type %d not clean. "
2499                                   "Delaying takedown\n", arg->mem_type);
2500                 ret = 0;
2501         }
2502 out:
2503         mutex_unlock(&dev->struct_mutex);
2504         (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2505
2506         if (ret)
2507                 return ret;
2508
2509         return 0;
2510 }
2511
2512 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2513 {
2514         struct drm_mm_type_arg *arg = data;
2515         struct drm_bo_driver *driver = dev->driver->bo_driver;
2516         int ret;
2517
2518         if (!driver) {
2519                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2520                 return -EINVAL;
2521         }
2522
2523         if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
2524                 DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
2525                 return -EINVAL;
2526         }
2527
2528         if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2529                 ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
2530                 if (ret)
2531                         return ret;
2532         }
2533
2534         mutex_lock(&dev->struct_mutex);
2535         ret = drm_bo_lock_mm(dev, arg->mem_type);
2536         mutex_unlock(&dev->struct_mutex);
2537         if (ret) {
2538                 (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2539                 return ret;
2540         }
2541
2542         return 0;
2543 }
2544
2545 int drm_mm_unlock_ioctl(struct drm_device *dev,
2546                         void *data,
2547                         struct drm_file *file_priv)
2548 {
2549         struct drm_mm_type_arg *arg = data;
2550         struct drm_bo_driver *driver = dev->driver->bo_driver;
2551         int ret;
2552
2553         if (!driver) {
2554                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2555                 return -EINVAL;
2556         }
2557
2558         if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2559                 ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2560                 if (ret)
2561                         return ret;
2562         }
2563
2564         return 0;
2565 }
2566
2567 int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2568 {
2569         struct drm_mm_info_arg *arg = data;
2570         struct drm_buffer_manager *bm = &dev->bm;
2571         struct drm_bo_driver *driver = dev->driver->bo_driver;
2572         struct drm_mem_type_manager *man;
2573         int ret = 0;
2574         int mem_type = arg->mem_type;
2575
2576         if (!driver) {
2577                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2578                 return -EINVAL;
2579         }
2580
2581         if (mem_type >= DRM_BO_MEM_TYPES) {
2582                 DRM_ERROR("Illegal memory type %d\n", arg->mem_type);
2583                 return -EINVAL;
2584         }
2585
2586         mutex_lock(&dev->struct_mutex);
2587         if (!bm->initialized) {
2588                 DRM_ERROR("DRM memory manager was not initialized\n");
2589                 ret = -EINVAL;
2590                 goto out;
2591         }
2592
2593
2594         man = &bm->man[arg->mem_type];
2595
2596         arg->p_size = man->size;
2597
2598 out:
2599         mutex_unlock(&dev->struct_mutex);
2600      
2601         return ret;
2602 }
2603 /*
2604  * buffer object vm functions.
2605  */
2606
2607 int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
2608 {
2609         struct drm_buffer_manager *bm = &dev->bm;
2610         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2611
2612         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2613                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2614                         return 0;
2615
2616                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2617                         return 0;
2618
2619                 if (mem->flags & DRM_BO_FLAG_CACHED)
2620                         return 0;
2621         }
2622         return 1;
2623 }
2624 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2625
2626 /**
2627  * \c Get the PCI offset for the buffer object memory.
2628  *
2629  * \param bo The buffer object.
2630  * \param bus_base On return the base of the PCI region
2631  * \param bus_offset On return the byte offset into the PCI region
2632  * \param bus_size On return the byte size of the buffer object or zero if
2633  *     the buffer object memory is not accessible through a PCI region.
2634  * \return Failure indication.
2635  *
2636  * Returns -EINVAL if the buffer object is currently not mappable.
2637  * Otherwise returns zero.
2638  */
2639
2640 int drm_bo_pci_offset(struct drm_device *dev,
2641                       struct drm_bo_mem_reg *mem,
2642                       unsigned long *bus_base,
2643                       unsigned long *bus_offset, unsigned long *bus_size)
2644 {
2645         struct drm_buffer_manager *bm = &dev->bm;
2646         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2647
2648         *bus_size = 0;
2649         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2650                 return -EINVAL;
2651
2652         if (drm_mem_reg_is_pci(dev, mem)) {
2653                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2654                 *bus_size = mem->num_pages << PAGE_SHIFT;
2655                 *bus_base = man->io_offset;
2656         }
2657
2658         return 0;
2659 }
2660
2661 /**
2662  * \c Kill all user-space virtual mappings of this buffer object.
2663  *
2664  * \param bo The buffer object.
2665  *
2666  * Call bo->mutex locked.
2667  */
2668
2669 void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
2670 {
2671         struct drm_device *dev = bo->dev;
2672         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2673         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2674
2675         if (!dev->dev_mapping)
2676                 return;
2677
2678         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2679 }
2680
2681 /**
2682  * drm_bo_takedown_vm_locked:
2683  *
2684  * @bo: the buffer object to remove any drm device mapping
2685  *
2686  * Remove any associated vm mapping on the drm device node that
2687  * would have been created for a drm_bo_type_device buffer
2688  */
2689 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
2690 {
2691         struct drm_map_list *list;
2692         drm_local_map_t *map;
2693         struct drm_device *dev = bo->dev;
2694
2695         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2696         if (bo->type != drm_bo_type_device)
2697                 return;
2698
2699         list = &bo->map_list;
2700         if (list->user_token) {
2701                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2702                 list->user_token = 0;
2703         }
2704         if (list->file_offset_node) {
2705                 drm_mm_put_block(list->file_offset_node);
2706                 list->file_offset_node = NULL;
2707         }
2708
2709         map = list->map;
2710         if (!map)
2711                 return;
2712
2713         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2714         list->map = NULL;
2715         list->user_token = 0ULL;
2716         drm_bo_usage_deref_locked(&bo);
2717 }
2718
2719 /**
2720  * drm_bo_setup_vm_locked:
2721  *
2722  * @bo: the buffer to allocate address space for
2723  *
2724  * Allocate address space in the drm device so that applications
2725  * can mmap the buffer and access the contents. This only
2726  * applies to drm_bo_type_device objects as others are not
2727  * placed in the drm device address space.
2728  */
2729 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
2730 {
2731         struct drm_map_list *list = &bo->map_list;
2732         drm_local_map_t *map;
2733         struct drm_device *dev = bo->dev;
2734
2735         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2736         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2737         if (!list->map)
2738                 return -ENOMEM;
2739
2740         map = list->map;
2741         map->offset = 0;
2742         map->type = _DRM_TTM;
2743         map->flags = _DRM_REMOVABLE;
2744         map->size = bo->mem.num_pages * PAGE_SIZE;
2745         atomic_inc(&bo->usage);
2746         map->handle = (void *)bo;
2747
2748         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2749                                                     bo->mem.num_pages, 0, 0);
2750
2751         if (!list->file_offset_node) {
2752                 drm_bo_takedown_vm_locked(bo);
2753                 return -ENOMEM;
2754         }
2755
2756         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2757                                                   bo->mem.num_pages, 0);
2758
2759         list->hash.key = list->file_offset_node->start;
2760         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2761                 drm_bo_takedown_vm_locked(bo);
2762                 return -ENOMEM;
2763         }
2764
2765         list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2766
2767         return 0;
2768 }
2769
2770 int drm_bo_version_ioctl(struct drm_device *dev, void *data,
2771                          struct drm_file *file_priv)
2772 {
2773         struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
2774
2775         arg->major = DRM_BO_INIT_MAJOR;
2776         arg->minor = DRM_BO_INIT_MINOR;
2777         arg->patchlevel = DRM_BO_INIT_PATCH;
2778
2779         return 0;
2780 }