[modesetting-101] update mode count after fill_modes.
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads,
40  * Hash tables and hash heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those,
44  * we need both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
47  * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
48  * the list traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
56
57 static inline uint64_t drm_bo_type_flags(unsigned type)
58 {
59         return (1ULL << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
67 {
68         struct drm_mem_type_manager *man;
69
70         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71         DRM_ASSERT_LOCKED(&bo->mutex);
72
73         man = &bo->dev->bm.man[bo->pinned_mem_type];
74         list_add_tail(&bo->pinned_lru, &man->pinned);
75 }
76
77 void drm_bo_add_to_lru(struct drm_buffer_object *bo)
78 {
79         struct drm_mem_type_manager *man;
80
81         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
82
83         if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84             || bo->mem.mem_type != bo->pinned_mem_type) {
85                 man = &bo->dev->bm.man[bo->mem.mem_type];
86                 list_add_tail(&bo->lru, &man->lru);
87         } else {
88                 INIT_LIST_HEAD(&bo->lru);
89         }
90 }
91
92 static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
93 {
94 #ifdef DRM_ODD_MM_COMPAT
95         int ret;
96
97         if (!bo->map_list.map)
98                 return 0;
99
100         ret = drm_bo_lock_kmm(bo);
101         if (ret)
102                 return ret;
103         drm_bo_unmap_virtual(bo);
104         if (old_is_pci)
105                 drm_bo_finish_unmap(bo);
106 #else
107         if (!bo->map_list.map)
108                 return 0;
109
110         drm_bo_unmap_virtual(bo);
111 #endif
112         return 0;
113 }
114
115 static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
116 {
117 #ifdef DRM_ODD_MM_COMPAT
118         int ret;
119
120         if (!bo->map_list.map)
121                 return;
122
123         ret = drm_bo_remap_bound(bo);
124         if (ret) {
125                 DRM_ERROR("Failed to remap a bound buffer object.\n"
126                           "\tThis might cause a sigbus later.\n");
127         }
128         drm_bo_unlock_kmm(bo);
129 #endif
130 }
131
132 /*
133  * Call bo->mutex locked.
134  */
135
136 static int drm_bo_add_ttm(struct drm_buffer_object *bo)
137 {
138         struct drm_device *dev = bo->dev;
139         int ret = 0;
140         uint32_t page_flags = 0;
141
142         DRM_ASSERT_LOCKED(&bo->mutex);
143         bo->ttm = NULL;
144
145         if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
146                 page_flags |= DRM_TTM_PAGE_WRITE;
147
148         switch (bo->type) {
149         case drm_bo_type_device:
150         case drm_bo_type_kernel:
151                 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, 
152                                          page_flags, dev->bm.dummy_read_page);
153                 if (!bo->ttm)
154                         ret = -ENOMEM;
155                 break;
156         case drm_bo_type_user:
157                 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
158                                          page_flags | DRM_TTM_PAGE_USER,
159                                          dev->bm.dummy_read_page);
160                 if (!bo->ttm)
161                         ret = -ENOMEM;
162
163                 ret = drm_ttm_set_user(bo->ttm, current,
164                                        bo->buffer_start,
165                                        bo->num_pages);
166                 if (ret)
167                         return ret;
168
169                 break;
170         default:
171                 DRM_ERROR("Illegal buffer object type\n");
172                 ret = -EINVAL;
173                 break;
174         }
175
176         return ret;
177 }
178
179 static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
180                                   struct drm_bo_mem_reg *mem,
181                                   int evict, int no_wait)
182 {
183         struct drm_device *dev = bo->dev;
184         struct drm_buffer_manager *bm = &dev->bm;
185         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
186         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
187         struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
188         struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
189         int ret = 0;
190
191         if (old_is_pci || new_is_pci ||
192             ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
193                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
194         if (ret)
195                 return ret;
196
197         /*
198          * Create and bind a ttm if required.
199          */
200
201         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
202                 ret = drm_bo_add_ttm(bo);
203                 if (ret)
204                         goto out_err;
205
206                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
207                         ret = drm_ttm_bind(bo->ttm, mem);
208                         if (ret)
209                                 goto out_err;
210                 }
211
212                 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
213                         
214                         struct drm_bo_mem_reg *old_mem = &bo->mem;
215                         uint64_t save_flags = old_mem->flags;
216                         uint64_t save_proposed_flags = old_mem->proposed_flags;
217                         
218                         *old_mem = *mem;
219                         mem->mm_node = NULL;
220                         old_mem->proposed_flags = save_proposed_flags;
221                         DRM_FLAG_MASKED(save_flags, mem->flags,
222                                         DRM_BO_MASK_MEMTYPE);
223                         goto moved;
224                 }
225                 
226         }
227
228         if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
229             !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED))                
230                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
231         else if (dev->driver->bo_driver->move) 
232                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
233         else
234                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
235
236         if (ret)
237                 goto out_err;
238
239 moved:
240         if (old_is_pci || new_is_pci)
241                 drm_bo_vm_post_move(bo);
242
243         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
244                 ret =
245                     dev->driver->bo_driver->invalidate_caches(dev,
246                                                               bo->mem.flags);
247                 if (ret)
248                         DRM_ERROR("Can not flush read caches\n");
249         }
250
251         DRM_FLAG_MASKED(bo->priv_flags,
252                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
253                         _DRM_BO_FLAG_EVICTED);
254
255         if (bo->mem.mm_node)
256                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
257                         bm->man[bo->mem.mem_type].gpu_offset;
258
259
260         return 0;
261
262 out_err:
263         if (old_is_pci || new_is_pci)
264                 drm_bo_vm_post_move(bo);
265
266         new_man = &bm->man[bo->mem.mem_type];
267         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
268                 drm_ttm_unbind(bo->ttm);
269                 drm_ttm_destroy(bo->ttm);
270                 bo->ttm = NULL;
271         }
272
273         return ret;
274 }
275
276 /*
277  * Call bo->mutex locked.
278  * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
279  */
280
281 static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced)
282 {
283         struct drm_fence_object *fence = bo->fence;
284
285         if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
286                 return -EBUSY;
287
288         if (fence) {
289                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
290                         drm_fence_usage_deref_unlocked(&bo->fence);
291                         return 0;
292                 }
293                 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
294                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
295                         drm_fence_usage_deref_unlocked(&bo->fence);
296                         return 0;
297                 }
298                 return -EBUSY;
299         }
300         return 0;
301 }
302
303 static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
304 {
305         int ret;
306
307         mutex_lock(&bo->mutex);
308         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
309         mutex_unlock(&bo->mutex);
310         return ret;
311 }
312
313
314 /*
315  * Call bo->mutex locked.
316  * Wait until the buffer is idle.
317  */
318
319 int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
320                 int no_wait, int check_unfenced)
321 {
322         int ret;
323
324         DRM_ASSERT_LOCKED(&bo->mutex);
325         while(unlikely(drm_bo_busy(bo, check_unfenced))) {
326                 if (no_wait)
327                         return -EBUSY;
328
329                 if (check_unfenced &&  (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) {
330                         mutex_unlock(&bo->mutex);
331                         wait_event(bo->event_queue, !drm_bo_check_unfenced(bo));
332                         mutex_lock(&bo->mutex);
333                         bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
334                 }
335
336                 if (bo->fence) {
337                         struct drm_fence_object *fence;
338                         uint32_t fence_type = bo->fence_type;
339
340                         drm_fence_reference_unlocked(&fence, bo->fence);
341                         mutex_unlock(&bo->mutex);
342
343                         ret = drm_fence_object_wait(fence, lazy, !interruptible,
344                                                     fence_type);
345
346                         drm_fence_usage_deref_unlocked(&fence);
347                         mutex_lock(&bo->mutex);
348                         bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
349                         if (ret)
350                                 return ret;
351                 }
352
353         }
354         return 0;
355 }
356 EXPORT_SYMBOL(drm_bo_wait);
357
358 static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
359 {
360         struct drm_device *dev = bo->dev;
361         struct drm_buffer_manager *bm = &dev->bm;
362
363         if (bo->fence) {
364                 if (bm->nice_mode) {
365                         unsigned long _end = jiffies + 3 * DRM_HZ;
366                         int ret;
367                         do {
368                                 ret = drm_bo_wait(bo, 0, 0, 0, 0);
369                                 if (ret && allow_errors)
370                                         return ret;
371
372                         } while (ret && !time_after_eq(jiffies, _end));
373
374                         if (bo->fence) {
375                                 bm->nice_mode = 0;
376                                 DRM_ERROR("Detected GPU lockup or "
377                                           "fence driver was taken down. "
378                                           "Evicting buffer.\n");
379                         }
380                 }
381                 if (bo->fence)
382                         drm_fence_usage_deref_unlocked(&bo->fence);
383         }
384         return 0;
385 }
386
387 /*
388  * Call dev->struct_mutex locked.
389  * Attempts to remove all private references to a buffer by expiring its
390  * fence object and removing from lru lists and memory managers.
391  */
392
393 static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
394 {
395         struct drm_device *dev = bo->dev;
396         struct drm_buffer_manager *bm = &dev->bm;
397
398         DRM_ASSERT_LOCKED(&dev->struct_mutex);
399
400         atomic_inc(&bo->usage);
401         mutex_unlock(&dev->struct_mutex);
402         mutex_lock(&bo->mutex);
403
404         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
405
406         if (bo->fence && drm_fence_object_signaled(bo->fence,
407                                                    bo->fence_type))
408                 drm_fence_usage_deref_unlocked(&bo->fence);
409
410         if (bo->fence && remove_all)
411                 (void)drm_bo_expire_fence(bo, 0);
412
413         mutex_lock(&dev->struct_mutex);
414
415         if (!atomic_dec_and_test(&bo->usage))
416                 goto out;
417
418         if (!bo->fence) {
419                 list_del_init(&bo->lru);
420                 if (bo->mem.mm_node) {
421                         drm_mm_put_block(bo->mem.mm_node);
422                         if (bo->pinned_node == bo->mem.mm_node)
423                                 bo->pinned_node = NULL;
424                         bo->mem.mm_node = NULL;
425                 }
426                 list_del_init(&bo->pinned_lru);
427                 if (bo->pinned_node) {
428                         drm_mm_put_block(bo->pinned_node);
429                         bo->pinned_node = NULL;
430                 }
431                 list_del_init(&bo->ddestroy);
432                 mutex_unlock(&bo->mutex);
433                 drm_bo_destroy_locked(bo);
434                 return;
435         }
436
437         if (list_empty(&bo->ddestroy)) {
438                 drm_fence_object_flush(bo->fence, bo->fence_type);
439                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
440                 schedule_delayed_work(&bm->wq,
441                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
442         }
443
444 out:
445         mutex_unlock(&bo->mutex);
446         return;
447 }
448
449 /*
450  * Verify that refcount is 0 and that there are no internal references
451  * to the buffer object. Then destroy it.
452  */
453
454 static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
455 {
456         struct drm_device *dev = bo->dev;
457         struct drm_buffer_manager *bm = &dev->bm;
458
459         DRM_ASSERT_LOCKED(&dev->struct_mutex);
460
461         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
462             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
463             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
464                 if (bo->fence != NULL) {
465                         DRM_ERROR("Fence was non-zero.\n");
466                         drm_bo_cleanup_refs(bo, 0);
467                         return;
468                 }
469
470 #ifdef DRM_ODD_MM_COMPAT
471                 BUG_ON(!list_empty(&bo->vma_list));
472                 BUG_ON(!list_empty(&bo->p_mm_list));
473 #endif
474
475                 if (bo->ttm) {
476                         drm_ttm_unbind(bo->ttm);
477                         drm_ttm_destroy(bo->ttm);
478                         bo->ttm = NULL;
479                 }
480
481                 atomic_dec(&bm->count);
482
483                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
484
485                 return;
486         }
487
488         /*
489          * Some stuff is still trying to reference the buffer object.
490          * Get rid of those references.
491          */
492
493         drm_bo_cleanup_refs(bo, 0);
494
495         return;
496 }
497
498 /*
499  * Call dev->struct_mutex locked.
500  */
501
502 static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
503 {
504         struct drm_buffer_manager *bm = &dev->bm;
505
506         struct drm_buffer_object *entry, *nentry;
507         struct list_head *list, *next;
508
509         list_for_each_safe(list, next, &bm->ddestroy) {
510                 entry = list_entry(list, struct drm_buffer_object, ddestroy);
511
512                 nentry = NULL;
513                 if (next != &bm->ddestroy) {
514                         nentry = list_entry(next, struct drm_buffer_object,
515                                             ddestroy);
516                         atomic_inc(&nentry->usage);
517                 }
518
519                 drm_bo_cleanup_refs(entry, remove_all);
520
521                 if (nentry)
522                         atomic_dec(&nentry->usage);
523         }
524 }
525
526 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
527 static void drm_bo_delayed_workqueue(void *data)
528 #else
529 static void drm_bo_delayed_workqueue(struct work_struct *work)
530 #endif
531 {
532 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
533         struct drm_device *dev = (struct drm_device *) data;
534         struct drm_buffer_manager *bm = &dev->bm;
535 #else
536         struct drm_buffer_manager *bm =
537             container_of(work, struct drm_buffer_manager, wq.work);
538         struct drm_device *dev = container_of(bm, struct drm_device, bm);
539 #endif
540
541         DRM_DEBUG("Delayed delete Worker\n");
542
543         mutex_lock(&dev->struct_mutex);
544         if (!bm->initialized) {
545                 mutex_unlock(&dev->struct_mutex);
546                 return;
547         }
548         drm_bo_delayed_delete(dev, 0);
549         if (bm->initialized && !list_empty(&bm->ddestroy)) {
550                 schedule_delayed_work(&bm->wq,
551                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
552         }
553         mutex_unlock(&dev->struct_mutex);
554 }
555
556 void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
557 {
558         struct drm_buffer_object *tmp_bo = *bo;
559         bo = NULL;
560
561         DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
562
563         if (atomic_dec_and_test(&tmp_bo->usage))
564                 drm_bo_destroy_locked(tmp_bo);
565 }
566 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
567
568 static void drm_bo_base_deref_locked(struct drm_file *file_priv,
569                                      struct drm_user_object *uo)
570 {
571         struct drm_buffer_object *bo =
572             drm_user_object_entry(uo, struct drm_buffer_object, base);
573
574         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
575
576         drm_bo_takedown_vm_locked(bo);
577         drm_bo_usage_deref_locked(&bo);
578 }
579
580 void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
581 {
582         struct drm_buffer_object *tmp_bo = *bo;
583         struct drm_device *dev = tmp_bo->dev;
584
585         *bo = NULL;
586         if (atomic_dec_and_test(&tmp_bo->usage)) {
587                 mutex_lock(&dev->struct_mutex);
588                 if (atomic_read(&tmp_bo->usage) == 0)
589                         drm_bo_destroy_locked(tmp_bo);
590                 mutex_unlock(&dev->struct_mutex);
591         }
592 }
593 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
594
595 void drm_putback_buffer_objects(struct drm_device *dev)
596 {
597         struct drm_buffer_manager *bm = &dev->bm;
598         struct list_head *list = &bm->unfenced;
599         struct drm_buffer_object *entry, *next;
600
601         mutex_lock(&dev->struct_mutex);
602         list_for_each_entry_safe(entry, next, list, lru) {
603                 atomic_inc(&entry->usage);
604                 mutex_unlock(&dev->struct_mutex);
605
606                 mutex_lock(&entry->mutex);
607                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
608                 mutex_lock(&dev->struct_mutex);
609
610                 list_del_init(&entry->lru);
611                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
612                 wake_up_all(&entry->event_queue);
613
614                 /*
615                  * FIXME: Might want to put back on head of list
616                  * instead of tail here.
617                  */
618
619                 drm_bo_add_to_lru(entry);
620                 mutex_unlock(&entry->mutex);
621                 drm_bo_usage_deref_locked(&entry);
622         }
623         mutex_unlock(&dev->struct_mutex);
624 }
625 EXPORT_SYMBOL(drm_putback_buffer_objects);
626
627 /*
628  * Note. The caller has to register (if applicable)
629  * and deregister fence object usage.
630  */
631
632 int drm_fence_buffer_objects(struct drm_device *dev,
633                              struct list_head *list,
634                              uint32_t fence_flags,
635                              struct drm_fence_object *fence,
636                              struct drm_fence_object **used_fence)
637 {
638         struct drm_buffer_manager *bm = &dev->bm;
639         struct drm_buffer_object *entry;
640         uint32_t fence_type = 0;
641         uint32_t fence_class = ~0;
642         int count = 0;
643         int ret = 0;
644         struct list_head *l;
645
646         mutex_lock(&dev->struct_mutex);
647
648         if (!list)
649                 list = &bm->unfenced;
650
651         if (fence)
652                 fence_class = fence->fence_class;
653
654         list_for_each_entry(entry, list, lru) {
655                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
656                 fence_type |= entry->new_fence_type;
657                 if (fence_class == ~0)
658                         fence_class = entry->new_fence_class;
659                 else if (entry->new_fence_class != fence_class) {
660                         DRM_ERROR("Unmatching fence classes on unfenced list: "
661                                   "%d and %d.\n",
662                                   fence_class,
663                                   entry->new_fence_class);
664                         ret = -EINVAL;
665                         goto out;
666                 }
667                 count++;
668         }
669
670         if (!count) {
671                 ret = -EINVAL;
672                 goto out;
673         }
674
675         if (fence) {
676                 if ((fence_type & fence->type) != fence_type ||
677                     (fence->fence_class != fence_class)) {
678                         DRM_ERROR("Given fence doesn't match buffers "
679                                   "on unfenced list.\n");
680                         ret = -EINVAL;
681                         goto out;
682                 }
683         } else {
684                 mutex_unlock(&dev->struct_mutex);
685                 ret = drm_fence_object_create(dev, fence_class, fence_type,
686                                               fence_flags | DRM_FENCE_FLAG_EMIT,
687                                               &fence);
688                 mutex_lock(&dev->struct_mutex);
689                 if (ret)
690                         goto out;
691         }
692
693         count = 0;
694         l = list->next;
695         while (l != list) {
696                 prefetch(l->next);
697                 entry = list_entry(l, struct drm_buffer_object, lru);
698                 atomic_inc(&entry->usage);
699                 mutex_unlock(&dev->struct_mutex);
700                 mutex_lock(&entry->mutex);
701                 mutex_lock(&dev->struct_mutex);
702                 list_del_init(l);
703                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
704                         count++;
705                         if (entry->fence)
706                                 drm_fence_usage_deref_locked(&entry->fence);
707                         entry->fence = drm_fence_reference_locked(fence);
708                         entry->fence_class = entry->new_fence_class;
709                         entry->fence_type = entry->new_fence_type;
710                         DRM_FLAG_MASKED(entry->priv_flags, 0,
711                                         _DRM_BO_FLAG_UNFENCED);
712                         wake_up_all(&entry->event_queue);
713                         drm_bo_add_to_lru(entry);
714                 }
715                 mutex_unlock(&entry->mutex);
716                 drm_bo_usage_deref_locked(&entry);
717                 l = list->next;
718         }
719         DRM_DEBUG("Fenced %d buffers\n", count);
720 out:
721         mutex_unlock(&dev->struct_mutex);
722         *used_fence = fence;
723         return ret;
724 }
725 EXPORT_SYMBOL(drm_fence_buffer_objects);
726
727 /*
728  * bo->mutex locked
729  */
730
731 static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
732                         int no_wait)
733 {
734         int ret = 0;
735         struct drm_device *dev = bo->dev;
736         struct drm_bo_mem_reg evict_mem;
737
738         /*
739          * Someone might have modified the buffer before we took the
740          * buffer mutex.
741          */
742
743         do {
744                 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
745
746                 if (unlikely(bo->mem.flags &
747                              (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)))
748                         goto out_unlock;
749                 if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
750                         goto out_unlock;
751                 if (unlikely(bo->mem.mem_type != mem_type))
752                         goto out_unlock;
753                 ret = drm_bo_wait(bo, 0, 1, no_wait, 0);
754                 if (ret)
755                         goto out_unlock;
756
757         } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
758
759         evict_mem = bo->mem;
760         evict_mem.mm_node = NULL;
761
762         evict_mem = bo->mem;
763         evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
764
765         mutex_lock(&dev->struct_mutex);
766         list_del_init(&bo->lru);
767         mutex_unlock(&dev->struct_mutex);
768
769         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
770
771         if (ret) {
772                 if (ret != -EAGAIN)
773                         DRM_ERROR("Failed to find memory space for "
774                                   "buffer 0x%p eviction.\n", bo);
775                 goto out;
776         }
777
778         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
779
780         if (ret) {
781                 if (ret != -EAGAIN)
782                         DRM_ERROR("Buffer eviction failed\n");
783                 goto out;
784         }
785
786         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
787                         _DRM_BO_FLAG_EVICTED);
788
789 out:
790         mutex_lock(&dev->struct_mutex);
791         if (evict_mem.mm_node) {
792                 if (evict_mem.mm_node != bo->pinned_node)
793                         drm_mm_put_block(evict_mem.mm_node);
794                 evict_mem.mm_node = NULL;
795         }
796         drm_bo_add_to_lru(bo);
797         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
798 out_unlock:
799         mutex_unlock(&dev->struct_mutex);
800
801         return ret;
802 }
803
804 /**
805  * Repeatedly evict memory from the LRU for @mem_type until we create enough
806  * space, or we've evicted everything and there isn't enough space.
807  */
808 static int drm_bo_mem_force_space(struct drm_device *dev,
809                                   struct drm_bo_mem_reg *mem,
810                                   uint32_t mem_type, int no_wait)
811 {
812         struct drm_mm_node *node;
813         struct drm_buffer_manager *bm = &dev->bm;
814         struct drm_buffer_object *entry;
815         struct drm_mem_type_manager *man = &bm->man[mem_type];
816         struct list_head *lru;
817         unsigned long num_pages = mem->num_pages;
818         int ret;
819
820         mutex_lock(&dev->struct_mutex);
821         do {
822                 node = drm_mm_search_free(&man->manager, num_pages,
823                                           mem->page_alignment, 1);
824                 if (node)
825                         break;
826
827                 lru = &man->lru;
828                 if (lru->next == lru)
829                         break;
830
831                 entry = list_entry(lru->next, struct drm_buffer_object, lru);
832                 atomic_inc(&entry->usage);
833                 mutex_unlock(&dev->struct_mutex);
834                 mutex_lock(&entry->mutex);
835                 ret = drm_bo_evict(entry, mem_type, no_wait);
836                 mutex_unlock(&entry->mutex);
837                 drm_bo_usage_deref_unlocked(&entry);
838                 if (ret)
839                         return ret;
840                 mutex_lock(&dev->struct_mutex);
841         } while (1);
842
843         if (!node) {
844                 mutex_unlock(&dev->struct_mutex);
845                 return -ENOMEM;
846         }
847
848         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
849         if (unlikely(!node)) {
850                 mutex_unlock(&dev->struct_mutex);
851                 return -ENOMEM;
852         }
853
854         mutex_unlock(&dev->struct_mutex);
855         mem->mm_node = node;
856         mem->mem_type = mem_type;
857         return 0;
858 }
859
860 static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
861                                 int disallow_fixed,
862                                 uint32_t mem_type,
863                                 uint64_t mask, uint32_t *res_mask)
864 {
865         uint64_t cur_flags = drm_bo_type_flags(mem_type);
866         uint64_t flag_diff;
867
868         if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
869                 return 0;
870         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
871                 cur_flags |= DRM_BO_FLAG_CACHED;
872         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
873                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
874         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
875                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
876
877         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
878                 return 0;
879
880         if (mem_type == DRM_BO_MEM_LOCAL) {
881                 *res_mask = cur_flags;
882                 return 1;
883         }
884
885         flag_diff = (mask ^ cur_flags);
886         if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
887                 cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
888
889         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
890             (!(mask & DRM_BO_FLAG_CACHED) ||
891              (mask & DRM_BO_FLAG_FORCE_CACHING)))
892                 return 0;
893
894         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
895             ((mask & DRM_BO_FLAG_MAPPABLE) ||
896              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
897                 return 0;
898
899         *res_mask = cur_flags;
900         return 1;
901 }
902
903 /**
904  * Creates space for memory region @mem according to its type.
905  *
906  * This function first searches for free space in compatible memory types in
907  * the priority order defined by the driver.  If free space isn't found, then
908  * drm_bo_mem_force_space is attempted in priority order to evict and find
909  * space.
910  */
911 int drm_bo_mem_space(struct drm_buffer_object *bo,
912                      struct drm_bo_mem_reg *mem, int no_wait)
913 {
914         struct drm_device *dev = bo->dev;
915         struct drm_buffer_manager *bm = &dev->bm;
916         struct drm_mem_type_manager *man;
917
918         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
919         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
920         uint32_t i;
921         uint32_t mem_type = DRM_BO_MEM_LOCAL;
922         uint32_t cur_flags;
923         int type_found = 0;
924         int type_ok = 0;
925         int has_eagain = 0;
926         struct drm_mm_node *node = NULL;
927         int ret;
928
929         mem->mm_node = NULL;
930         for (i = 0; i < num_prios; ++i) {
931                 mem_type = prios[i];
932                 man = &bm->man[mem_type];
933
934                 type_ok = drm_bo_mt_compatible(man,
935                                                bo->type == drm_bo_type_user,
936                                                mem_type, mem->proposed_flags,
937                                                &cur_flags);
938
939                 if (!type_ok)
940                         continue;
941
942                 if (mem_type == DRM_BO_MEM_LOCAL)
943                         break;
944
945                 if ((mem_type == bo->pinned_mem_type) &&
946                     (bo->pinned_node != NULL)) {
947                         node = bo->pinned_node;
948                         break;
949                 }
950
951                 mutex_lock(&dev->struct_mutex);
952                 if (man->has_type && man->use_type) {
953                         type_found = 1;
954                         node = drm_mm_search_free(&man->manager, mem->num_pages,
955                                                   mem->page_alignment, 1);
956                         if (node)
957                                 node = drm_mm_get_block(node, mem->num_pages,
958                                                         mem->page_alignment);
959                 }
960                 mutex_unlock(&dev->struct_mutex);
961                 if (node)
962                         break;
963         }
964
965         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
966                 mem->mm_node = node;
967                 mem->mem_type = mem_type;
968                 mem->flags = cur_flags;
969                 return 0;
970         }
971
972         if (!type_found)
973                 return -EINVAL;
974
975         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
976         prios = dev->driver->bo_driver->mem_busy_prio;
977
978         for (i = 0; i < num_prios; ++i) {
979                 mem_type = prios[i];
980                 man = &bm->man[mem_type];
981
982                 if (!man->has_type)
983                         continue;
984
985                 if (!drm_bo_mt_compatible(man,
986                                           bo->type == drm_bo_type_user,
987                                           mem_type,
988                                           mem->proposed_flags,
989                                           &cur_flags))
990                         continue;
991
992                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
993
994                 if (ret == 0 && mem->mm_node) {
995                         mem->flags = cur_flags;
996                         return 0;
997                 }
998
999                 if (ret == -EAGAIN)
1000                         has_eagain = 1;
1001         }
1002
1003         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
1004         return ret;
1005 }
1006 EXPORT_SYMBOL(drm_bo_mem_space);
1007
1008 /*
1009  * drm_bo_propose_flags:
1010  *
1011  * @bo: the buffer object getting new flags
1012  *
1013  * @new_flags: the new set of proposed flag bits
1014  *
1015  * @new_mask: the mask of bits changed in new_flags
1016  *
1017  * Modify the proposed_flag bits in @bo
1018  */
1019 static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
1020                                          uint64_t new_flags, uint64_t new_mask)
1021 {
1022         uint32_t new_access;
1023
1024         /* Copy unchanging bits from existing proposed_flags */
1025         DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
1026          
1027         if (bo->type == drm_bo_type_user &&
1028             ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
1029              (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
1030                 DRM_ERROR("User buffers require cache-coherent memory.\n");
1031                 return -EINVAL;
1032         }
1033
1034         if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
1035                 DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
1036                 return -EPERM;
1037         }
1038
1039         if (likely(new_mask & DRM_BO_MASK_MEM) &&
1040             (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
1041             !DRM_SUSER(DRM_CURPROC)) {
1042                 if (likely(bo->mem.flags & new_flags & new_mask &
1043                            DRM_BO_MASK_MEM))
1044                         new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
1045                                 (bo->mem.flags & DRM_BO_MASK_MEM);
1046                 else {
1047                         DRM_ERROR("Incompatible memory type specification "
1048                                   "for NO_EVICT buffer.\n");
1049                         return -EPERM;
1050                 }
1051         }
1052
1053         if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
1054                 DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
1055                 return -EPERM;
1056         }
1057
1058         new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
1059                                   DRM_BO_FLAG_READ);
1060
1061         if (new_access == 0) {
1062                 DRM_ERROR("Invalid buffer object rwx properties\n");
1063                 return -EINVAL;
1064         }
1065
1066         bo->mem.proposed_flags = new_flags;
1067         return 0;
1068 }
1069
1070 /*
1071  * Call dev->struct_mutex locked.
1072  */
1073
1074 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
1075                                               uint32_t handle, int check_owner)
1076 {
1077         struct drm_user_object *uo;
1078         struct drm_buffer_object *bo;
1079
1080         uo = drm_lookup_user_object(file_priv, handle);
1081
1082         if (!uo || (uo->type != drm_buffer_type)) {
1083                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
1084                 return NULL;
1085         }
1086
1087         if (check_owner && file_priv != uo->owner) {
1088                 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
1089                         return NULL;
1090         }
1091
1092         bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
1093         atomic_inc(&bo->usage);
1094         return bo;
1095 }
1096 EXPORT_SYMBOL(drm_lookup_buffer_object);
1097
1098 /*
1099  * Call bo->mutex locked.
1100  * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
1101  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
1102  */
1103
1104 static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
1105 {
1106         struct drm_fence_object *fence = bo->fence;
1107
1108         if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1109                 return -EBUSY;
1110
1111         if (fence) {
1112                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1113                         drm_fence_usage_deref_unlocked(&bo->fence);
1114                         return 0;
1115                 }
1116                 return -EBUSY;
1117         }
1118         return 0;
1119 }
1120
1121 int drm_bo_evict_cached(struct drm_buffer_object *bo)
1122 {
1123         int ret = 0;
1124
1125         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1126         if (bo->mem.mm_node)
1127                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1128         return ret;
1129 }
1130
1131 EXPORT_SYMBOL(drm_bo_evict_cached);
1132 /*
1133  * Wait until a buffer is unmapped.
1134  */
1135
1136 static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
1137 {
1138         int ret = 0;
1139
1140         if (likely(atomic_read(&bo->mapped)) == 0)
1141                 return 0;
1142
1143         if (unlikely(no_wait))
1144                 return -EBUSY;
1145
1146         do {
1147                 mutex_unlock(&bo->mutex);
1148                 ret = wait_event_interruptible(bo->event_queue,
1149                                                atomic_read(&bo->mapped) == 0);
1150                 mutex_lock(&bo->mutex);
1151                 bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
1152
1153                 if (ret == -ERESTARTSYS)
1154                         ret = -EAGAIN;
1155         } while((ret == 0) && atomic_read(&bo->mapped) > 0);
1156
1157         return ret;
1158 }
1159
1160 /*
1161  * Fill in the ioctl reply argument with buffer info.
1162  * Bo locked.
1163  */
1164
1165 void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
1166                          struct drm_bo_info_rep *rep)
1167 {
1168         if (!rep)
1169                 return;
1170
1171         rep->handle = bo->base.hash.key;
1172         rep->flags = bo->mem.flags;
1173         rep->size = bo->num_pages * PAGE_SIZE;
1174         rep->offset = bo->offset;
1175
1176         /*
1177          * drm_bo_type_device buffers have user-visible
1178          * handles which can be used to share across
1179          * processes. Hand that back to the application
1180          */
1181         if (bo->type == drm_bo_type_device)
1182                 rep->arg_handle = bo->map_list.user_token;
1183         else
1184                 rep->arg_handle = 0;
1185
1186         rep->proposed_flags = bo->mem.proposed_flags;
1187         rep->buffer_start = bo->buffer_start;
1188         rep->fence_flags = bo->fence_type;
1189         rep->rep_flags = 0;
1190         rep->page_alignment = bo->mem.page_alignment;
1191
1192         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) {
1193                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1194                                 DRM_BO_REP_BUSY);
1195         }
1196 }
1197 EXPORT_SYMBOL(drm_bo_fill_rep_arg);
1198
1199 /*
1200  * Wait for buffer idle and register that we've mapped the buffer.
1201  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1202  * so that if the client dies, the mapping is automatically
1203  * unregistered.
1204  */
1205
1206 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1207                                  uint32_t map_flags, unsigned hint,
1208                                  struct drm_bo_info_rep *rep)
1209 {
1210         struct drm_buffer_object *bo;
1211         struct drm_device *dev = file_priv->minor->dev;
1212         int ret = 0;
1213         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1214
1215         mutex_lock(&dev->struct_mutex);
1216         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1217         mutex_unlock(&dev->struct_mutex);
1218
1219         if (!bo)
1220                 return -EINVAL;
1221
1222         mutex_lock(&bo->mutex);
1223         do {
1224                 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
1225
1226                 ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1227                 if (unlikely(ret))
1228                         goto out;
1229
1230                 if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
1231                         drm_bo_evict_cached(bo);
1232
1233         } while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
1234
1235         atomic_inc(&bo->mapped);
1236         mutex_lock(&dev->struct_mutex);
1237         ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1238         mutex_unlock(&dev->struct_mutex);
1239         if (ret) {
1240                 if (atomic_dec_and_test(&bo->mapped))
1241                         wake_up_all(&bo->event_queue);
1242
1243         } else
1244                 drm_bo_fill_rep_arg(bo, rep);
1245
1246  out:
1247         mutex_unlock(&bo->mutex);
1248         drm_bo_usage_deref_unlocked(&bo);
1249
1250         return ret;
1251 }
1252
1253 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1254 {
1255         struct drm_device *dev = file_priv->minor->dev;
1256         struct drm_buffer_object *bo;
1257         struct drm_ref_object *ro;
1258         int ret = 0;
1259
1260         mutex_lock(&dev->struct_mutex);
1261
1262         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1263         if (!bo) {
1264                 ret = -EINVAL;
1265                 goto out;
1266         }
1267
1268         ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1269         if (!ro) {
1270                 ret = -EINVAL;
1271                 goto out;
1272         }
1273
1274         drm_remove_ref_object(file_priv, ro);
1275         drm_bo_usage_deref_locked(&bo);
1276 out:
1277         mutex_unlock(&dev->struct_mutex);
1278         return ret;
1279 }
1280
1281 /*
1282  * Call struct-sem locked.
1283  */
1284
1285 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1286                                          struct drm_user_object *uo,
1287                                          enum drm_ref_type action)
1288 {
1289         struct drm_buffer_object *bo =
1290             drm_user_object_entry(uo, struct drm_buffer_object, base);
1291
1292         /*
1293          * We DON'T want to take the bo->lock here, because we want to
1294          * hold it when we wait for unmapped buffer.
1295          */
1296
1297         BUG_ON(action != _DRM_REF_TYPE1);
1298
1299         if (atomic_dec_and_test(&bo->mapped))
1300                 wake_up_all(&bo->event_queue);
1301 }
1302
1303 /*
1304  * bo->mutex locked.
1305  * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
1306  */
1307
1308 int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
1309                        int no_wait, int move_unfenced)
1310 {
1311         struct drm_device *dev = bo->dev;
1312         struct drm_buffer_manager *bm = &dev->bm;
1313         int ret = 0;
1314         struct drm_bo_mem_reg mem;
1315
1316         BUG_ON(bo->fence != NULL);
1317
1318         mem.num_pages = bo->num_pages;
1319         mem.size = mem.num_pages << PAGE_SHIFT;
1320         mem.proposed_flags = new_mem_flags;
1321         mem.page_alignment = bo->mem.page_alignment;
1322
1323         mutex_lock(&bm->evict_mutex);
1324         mutex_lock(&dev->struct_mutex);
1325         list_del_init(&bo->lru);
1326         mutex_unlock(&dev->struct_mutex);
1327
1328         /*
1329          * Determine where to move the buffer.
1330          */
1331         ret = drm_bo_mem_space(bo, &mem, no_wait);
1332         if (ret)
1333                 goto out_unlock;
1334
1335         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1336
1337 out_unlock:
1338         mutex_lock(&dev->struct_mutex);
1339         if (ret || !move_unfenced) {
1340                 if (mem.mm_node) {
1341                         if (mem.mm_node != bo->pinned_node)
1342                                 drm_mm_put_block(mem.mm_node);
1343                         mem.mm_node = NULL;
1344                 }
1345                 drm_bo_add_to_lru(bo);
1346                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1347                         wake_up_all(&bo->event_queue);
1348                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1349                                         _DRM_BO_FLAG_UNFENCED);
1350                 }
1351         } else {
1352                 list_add_tail(&bo->lru, &bm->unfenced);
1353                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1354                                 _DRM_BO_FLAG_UNFENCED);
1355         }
1356         mutex_unlock(&dev->struct_mutex);
1357         mutex_unlock(&bm->evict_mutex);
1358         return ret;
1359 }
1360
1361 static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
1362 {
1363         uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
1364
1365         if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
1366                 return 0;
1367         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1368             (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
1369              (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
1370                 return 0;
1371
1372         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1373             ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
1374              (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
1375                 return 0;
1376         return 1;
1377 }
1378
1379 /**
1380  * drm_buffer_object_validate:
1381  *
1382  * @bo: the buffer object to modify
1383  *
1384  * @fence_class: the new fence class covering this buffer
1385  *
1386  * @move_unfenced: a boolean indicating whether switching the
1387  * memory space of this buffer should cause the buffer to
1388  * be placed on the unfenced list.
1389  *
1390  * @no_wait: whether this function should return -EBUSY instead
1391  * of waiting.
1392  *
1393  * Change buffer access parameters. This can involve moving
1394  * the buffer to the correct memory type, pinning the buffer
1395  * or changing the class/type of fence covering this buffer
1396  *
1397  * Must be called with bo locked.
1398  */
1399
1400 static int drm_buffer_object_validate(struct drm_buffer_object *bo,
1401                                       uint32_t fence_class,
1402                                       int move_unfenced, int no_wait,
1403                                       int move_buffer)
1404 {
1405         struct drm_device *dev = bo->dev;
1406         struct drm_buffer_manager *bm = &dev->bm;
1407         int ret;
1408
1409         if (move_buffer) {
1410                 ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
1411                                          move_unfenced);
1412                 if (ret) {
1413                         if (ret != -EAGAIN)
1414                                 DRM_ERROR("Failed moving buffer.\n");
1415                         if (ret == -ENOMEM)
1416                                 DRM_ERROR("Out of aperture space or "
1417                                           "DRM memory quota.\n");
1418                         return ret;
1419                 }
1420         }
1421
1422         /*
1423          * Pinned buffers.
1424          */
1425
1426         if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1427                 bo->pinned_mem_type = bo->mem.mem_type;
1428                 mutex_lock(&dev->struct_mutex);
1429                 list_del_init(&bo->pinned_lru);
1430                 drm_bo_add_to_pinned_lru(bo);
1431
1432                 if (bo->pinned_node != bo->mem.mm_node) {
1433                         if (bo->pinned_node != NULL)
1434                                 drm_mm_put_block(bo->pinned_node);
1435                         bo->pinned_node = bo->mem.mm_node;
1436                 }
1437
1438                 mutex_unlock(&dev->struct_mutex);
1439
1440         } else if (bo->pinned_node != NULL) {
1441
1442                 mutex_lock(&dev->struct_mutex);
1443
1444                 if (bo->pinned_node != bo->mem.mm_node)
1445                         drm_mm_put_block(bo->pinned_node);
1446
1447                 list_del_init(&bo->pinned_lru);
1448                 bo->pinned_node = NULL;
1449                 mutex_unlock(&dev->struct_mutex);
1450
1451         }
1452
1453         /*
1454          * We might need to add a TTM.
1455          */
1456
1457         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1458                 ret = drm_bo_add_ttm(bo);
1459                 if (ret)
1460                         return ret;
1461         }
1462         /*
1463          * Validation has succeeded, move the access and other
1464          * non-mapping-related flag bits from the proposed flags to
1465          * the active flags
1466          */
1467
1468         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
1469
1470         /*
1471          * Finally, adjust lru to be sure.
1472          */
1473
1474         mutex_lock(&dev->struct_mutex);
1475         list_del(&bo->lru);
1476         if (move_unfenced) {
1477                 list_add_tail(&bo->lru, &bm->unfenced);
1478                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1479                                 _DRM_BO_FLAG_UNFENCED);
1480         } else {
1481                 drm_bo_add_to_lru(bo);
1482                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1483                         wake_up_all(&bo->event_queue);
1484                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1485                                         _DRM_BO_FLAG_UNFENCED);
1486                 }
1487         }
1488         mutex_unlock(&dev->struct_mutex);
1489
1490         return 0;
1491 }
1492
1493 /*
1494  * This function is called with bo->mutex locked, but may release it
1495  * temporarily to wait for events.
1496  */
1497
1498 static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
1499                                        uint64_t flags,
1500                                        uint64_t mask,
1501                                        uint32_t hint,
1502                                        uint32_t fence_class,
1503                                        int no_wait,
1504                                        int *move_buffer)
1505 {
1506         struct drm_device *dev = bo->dev;
1507         struct drm_bo_driver *driver = dev->driver->bo_driver;
1508         uint32_t ftype;
1509
1510         int ret;
1511
1512         DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
1513                   (unsigned long long) bo->mem.proposed_flags,
1514                   (unsigned long long) bo->mem.flags);
1515
1516         ret = drm_bo_modify_proposed_flags (bo, flags, mask);
1517         if (ret)
1518                 return ret;
1519
1520         ret = drm_bo_wait_unmapped(bo, no_wait);
1521         if (ret)
1522                 return ret;
1523
1524         ret = driver->fence_type(bo, &fence_class, &ftype);
1525
1526         if (ret) {
1527                 DRM_ERROR("Driver did not support given buffer permissions.\n");
1528                 return ret;
1529         }
1530
1531         /*
1532          * We're switching command submission mechanism,
1533          * or cannot simply rely on the hardware serializing for us.
1534          * Insert a driver-dependant barrier or wait for buffer idle.
1535          */
1536
1537         if ((fence_class != bo->fence_class) ||
1538             ((ftype ^ bo->fence_type) & bo->fence_type)) {
1539
1540                 ret = -EINVAL;
1541                 if (driver->command_stream_barrier) {
1542                         ret = driver->command_stream_barrier(bo,
1543                                                              fence_class,
1544                                                              ftype,
1545                                                              no_wait);
1546                 }
1547                 if (ret && ret != -EAGAIN) 
1548                         ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1549                 
1550                 if (ret)
1551                         return ret;
1552         }
1553
1554         bo->new_fence_class = fence_class;
1555         bo->new_fence_type = ftype;
1556
1557         /*
1558          * Check whether we need to move buffer.
1559          */
1560
1561         *move_buffer = 0;
1562         if (!drm_bo_mem_compat(&bo->mem)) {
1563                 *move_buffer = 1;
1564                 ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1565         }
1566
1567         return ret;
1568 }
1569
1570 /**
1571  * drm_bo_do_validate:
1572  *
1573  * @bo: the buffer object
1574  *
1575  * @flags: access rights, mapping parameters and cacheability. See
1576  * the DRM_BO_FLAG_* values in drm.h
1577  *
1578  * @mask: Which flag values to change; this allows callers to modify
1579  * things without knowing the current state of other flags.
1580  *
1581  * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1582  * values in drm.h.
1583  *
1584  * @fence_class: a driver-specific way of doing fences. Presumably,
1585  * this would be used if the driver had more than one submission and
1586  * fencing mechanism. At this point, there isn't any use of this
1587  * from the user mode code.
1588  *
1589  * @rep: To be stuffed with the reply from validation
1590  * 
1591  * 'validate' a buffer object. This changes where the buffer is
1592  * located, along with changing access modes.
1593  */
1594
1595 int drm_bo_do_validate(struct drm_buffer_object *bo,
1596                        uint64_t flags, uint64_t mask, uint32_t hint,
1597                        uint32_t fence_class,
1598                        struct drm_bo_info_rep *rep)
1599 {
1600         int ret;
1601         int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
1602         int move_buffer;
1603
1604         mutex_lock(&bo->mutex);
1605
1606         do {
1607                 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
1608
1609                 ret = drm_bo_prepare_for_validate(bo, flags, mask, hint,
1610                                                   fence_class, no_wait,
1611                                                   &move_buffer);
1612                 if (ret)
1613                         goto out;
1614
1615         } while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
1616
1617         ret = drm_buffer_object_validate(bo,
1618                                          fence_class,
1619                                          !(hint & DRM_BO_HINT_DONT_FENCE),
1620                                          no_wait,
1621                                          move_buffer);
1622
1623         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
1624 out:
1625         if (rep)
1626                 drm_bo_fill_rep_arg(bo, rep);
1627
1628         mutex_unlock(&bo->mutex);
1629
1630         return ret;
1631 }
1632 EXPORT_SYMBOL(drm_bo_do_validate);
1633
1634 /**
1635  * drm_bo_handle_validate
1636  *
1637  * @file_priv: the drm file private, used to get a handle to the user context
1638  *
1639  * @handle: the buffer object handle
1640  *
1641  * @flags: access rights, mapping parameters and cacheability. See
1642  * the DRM_BO_FLAG_* values in drm.h
1643  *
1644  * @mask: Which flag values to change; this allows callers to modify
1645  * things without knowing the current state of other flags.
1646  *
1647  * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1648  * values in drm.h.
1649  *
1650  * @fence_class: a driver-specific way of doing fences. Presumably,
1651  * this would be used if the driver had more than one submission and
1652  * fencing mechanism. At this point, there isn't any use of this
1653  * from the user mode code.
1654  *
1655  * @rep: To be stuffed with the reply from validation
1656  *
1657  * @bp_rep: To be stuffed with the buffer object pointer
1658  *
1659  * Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead
1660  * of a pointer to a buffer object. Optionally return a pointer to the buffer object.
1661  * This is a convenience wrapper only.
1662  */
1663
1664 int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
1665                            uint64_t flags, uint64_t mask,
1666                            uint32_t hint,
1667                            uint32_t fence_class,
1668                            struct drm_bo_info_rep *rep,
1669                            struct drm_buffer_object **bo_rep)
1670 {
1671         struct drm_device *dev = file_priv->minor->dev;
1672         struct drm_buffer_object *bo;
1673         int ret;
1674
1675         mutex_lock(&dev->struct_mutex);
1676         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1677         mutex_unlock(&dev->struct_mutex);
1678
1679         if (!bo)
1680                 return -EINVAL;
1681
1682         if (bo->base.owner != file_priv)
1683                 mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1684
1685         ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
1686
1687         if (!ret && bo_rep)
1688                 *bo_rep = bo;
1689         else
1690                 drm_bo_usage_deref_unlocked(&bo);
1691
1692         return ret;
1693 }
1694 EXPORT_SYMBOL(drm_bo_handle_validate);
1695
1696
1697 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1698                               struct drm_bo_info_rep *rep)
1699 {
1700         struct drm_device *dev = file_priv->minor->dev;
1701         struct drm_buffer_object *bo;
1702
1703         mutex_lock(&dev->struct_mutex);
1704         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1705         mutex_unlock(&dev->struct_mutex);
1706
1707         if (!bo)
1708                 return -EINVAL;
1709
1710         mutex_lock(&bo->mutex);
1711
1712         /*
1713          * FIXME: Quick busy here?
1714          */
1715
1716         drm_bo_busy(bo, 1);
1717         drm_bo_fill_rep_arg(bo, rep);
1718         mutex_unlock(&bo->mutex);
1719         drm_bo_usage_deref_unlocked(&bo);
1720         return 0;
1721 }
1722
1723 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1724                               uint32_t hint,
1725                               struct drm_bo_info_rep *rep)
1726 {
1727         struct drm_device *dev = file_priv->minor->dev;
1728         struct drm_buffer_object *bo;
1729         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1730         int ret;
1731
1732         mutex_lock(&dev->struct_mutex);
1733         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1734         mutex_unlock(&dev->struct_mutex);
1735
1736         if (!bo)
1737                 return -EINVAL;
1738
1739         mutex_lock(&bo->mutex);
1740         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1);
1741         if (ret)
1742                 goto out;
1743
1744         drm_bo_fill_rep_arg(bo, rep);
1745 out:
1746         mutex_unlock(&bo->mutex);
1747         drm_bo_usage_deref_unlocked(&bo);
1748         return ret;
1749 }
1750
1751 int drm_buffer_object_create(struct drm_device *dev,
1752                              unsigned long size,
1753                              enum drm_bo_type type,
1754                              uint64_t flags,
1755                              uint32_t hint,
1756                              uint32_t page_alignment,
1757                              unsigned long buffer_start,
1758                              struct drm_buffer_object **buf_obj)
1759 {
1760         struct drm_buffer_manager *bm = &dev->bm;
1761         struct drm_buffer_object *bo;
1762         int ret = 0;
1763         unsigned long num_pages;
1764
1765         size += buffer_start & ~PAGE_MASK;
1766         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1767         if (num_pages == 0) {
1768                 DRM_ERROR("Illegal buffer object size %ld.\n", size);
1769                 return -EINVAL;
1770         }
1771
1772         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1773
1774         if (!bo)
1775                 return -ENOMEM;
1776
1777         mutex_init(&bo->mutex);
1778         mutex_lock(&bo->mutex);
1779
1780         atomic_set(&bo->usage, 1);
1781         atomic_set(&bo->mapped, 0);
1782         DRM_INIT_WAITQUEUE(&bo->event_queue);
1783         INIT_LIST_HEAD(&bo->lru);
1784         INIT_LIST_HEAD(&bo->pinned_lru);
1785         INIT_LIST_HEAD(&bo->ddestroy);
1786 #ifdef DRM_ODD_MM_COMPAT
1787         INIT_LIST_HEAD(&bo->p_mm_list);
1788         INIT_LIST_HEAD(&bo->vma_list);
1789 #endif
1790         bo->dev = dev;
1791         bo->type = type;
1792         bo->num_pages = num_pages;
1793         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1794         bo->mem.num_pages = bo->num_pages;
1795         bo->mem.mm_node = NULL;
1796         bo->mem.page_alignment = page_alignment;
1797         bo->buffer_start = buffer_start & PAGE_MASK;
1798         bo->priv_flags = 0;
1799         bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1800                          DRM_BO_FLAG_MAPPABLE);
1801         bo->mem.proposed_flags = 0;
1802         atomic_inc(&bm->count);
1803         /*
1804          * Use drm_bo_modify_proposed_flags to error-check the proposed flags
1805          */
1806         ret = drm_bo_modify_proposed_flags (bo, flags, flags);
1807         if (ret)
1808                 goto out_err;
1809
1810         /*
1811          * For drm_bo_type_device buffers, allocate
1812          * address space from the device so that applications
1813          * can mmap the buffer from there
1814          */
1815         if (bo->type == drm_bo_type_device) {
1816                 mutex_lock(&dev->struct_mutex);
1817                 ret = drm_bo_setup_vm_locked(bo);
1818                 mutex_unlock(&dev->struct_mutex);
1819                 if (ret)
1820                         goto out_err;
1821         }
1822
1823         mutex_unlock(&bo->mutex);
1824         ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
1825                                  0, NULL);
1826         if (ret)
1827                 goto out_err_unlocked;
1828
1829         *buf_obj = bo;
1830         return 0;
1831
1832 out_err:
1833         mutex_unlock(&bo->mutex);
1834 out_err_unlocked:
1835         drm_bo_usage_deref_unlocked(&bo);
1836         return ret;
1837 }
1838 EXPORT_SYMBOL(drm_buffer_object_create);
1839
1840
1841 static int drm_bo_add_user_object(struct drm_file *file_priv,
1842                                   struct drm_buffer_object *bo, int shareable)
1843 {
1844         struct drm_device *dev = file_priv->minor->dev;
1845         int ret;
1846
1847         mutex_lock(&dev->struct_mutex);
1848         ret = drm_add_user_object(file_priv, &bo->base, shareable);
1849         if (ret)
1850                 goto out;
1851
1852         bo->base.remove = drm_bo_base_deref_locked;
1853         bo->base.type = drm_buffer_type;
1854         bo->base.ref_struct_locked = NULL;
1855         bo->base.unref = drm_buffer_user_object_unmap;
1856
1857 out:
1858         mutex_unlock(&dev->struct_mutex);
1859         return ret;
1860 }
1861
1862 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1863 {
1864         struct drm_bo_create_arg *arg = data;
1865         struct drm_bo_create_req *req = &arg->d.req;
1866         struct drm_bo_info_rep *rep = &arg->d.rep;
1867         struct drm_buffer_object *entry;
1868         enum drm_bo_type bo_type;
1869         int ret = 0;
1870
1871         DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
1872             (int)(req->size / 1024), req->page_alignment * 4);
1873
1874         if (!dev->bm.initialized) {
1875                 DRM_ERROR("Buffer object manager is not initialized.\n");
1876                 return -EINVAL;
1877         }
1878
1879         /*
1880          * If the buffer creation request comes in with a starting address,
1881          * that points at the desired user pages to map. Otherwise, create
1882          * a drm_bo_type_device buffer, which uses pages allocated from the kernel
1883          */
1884         bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
1885
1886         /*
1887          * User buffers cannot be shared
1888          */
1889         if (bo_type == drm_bo_type_user)
1890                 req->flags &= ~DRM_BO_FLAG_SHAREABLE;
1891
1892         ret = drm_buffer_object_create(file_priv->minor->dev,
1893                                        req->size, bo_type, req->flags,
1894                                        req->hint, req->page_alignment,
1895                                        req->buffer_start, &entry);
1896         if (ret)
1897                 goto out;
1898
1899         ret = drm_bo_add_user_object(file_priv, entry,
1900                                      req->flags & DRM_BO_FLAG_SHAREABLE);
1901         if (ret) {
1902                 drm_bo_usage_deref_unlocked(&entry);
1903                 goto out;
1904         }
1905
1906         mutex_lock(&entry->mutex);
1907         drm_bo_fill_rep_arg(entry, rep);
1908         mutex_unlock(&entry->mutex);
1909
1910 out:
1911         return ret;
1912 }
1913
1914 int drm_bo_setstatus_ioctl(struct drm_device *dev,
1915                            void *data, struct drm_file *file_priv)
1916 {
1917         struct drm_bo_map_wait_idle_arg *arg = data;
1918         struct drm_bo_info_req *req = &arg->d.req;
1919         struct drm_bo_info_rep *rep = &arg->d.rep;
1920         struct drm_buffer_object *bo;
1921         int ret;
1922
1923         if (!dev->bm.initialized) {
1924                 DRM_ERROR("Buffer object manager is not initialized.\n");
1925                 return -EINVAL;
1926         }
1927
1928         ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
1929         if (ret)
1930                 return ret;
1931
1932         mutex_lock(&dev->struct_mutex);
1933         bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
1934         mutex_unlock(&dev->struct_mutex);
1935
1936         if (!bo)
1937                 return -EINVAL;
1938
1939         if (bo->base.owner != file_priv)
1940                 req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1941
1942         ret = drm_bo_do_validate(bo, req->flags, req->mask,
1943                                  req->hint | DRM_BO_HINT_DONT_FENCE,
1944                                  bo->fence_class, rep);
1945
1946         drm_bo_usage_deref_unlocked(&bo);
1947
1948         (void) drm_bo_read_unlock(&dev->bm.bm_lock);
1949
1950         return ret;
1951 }
1952
1953 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1954 {
1955         struct drm_bo_map_wait_idle_arg *arg = data;
1956         struct drm_bo_info_req *req = &arg->d.req;
1957         struct drm_bo_info_rep *rep = &arg->d.rep;
1958         int ret;
1959         if (!dev->bm.initialized) {
1960                 DRM_ERROR("Buffer object manager is not initialized.\n");
1961                 return -EINVAL;
1962         }
1963
1964         ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1965                                     req->hint, rep);
1966         if (ret)
1967                 return ret;
1968
1969         return 0;
1970 }
1971
1972 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1973 {
1974         struct drm_bo_handle_arg *arg = data;
1975         int ret;
1976         if (!dev->bm.initialized) {
1977                 DRM_ERROR("Buffer object manager is not initialized.\n");
1978                 return -EINVAL;
1979         }
1980
1981         ret = drm_buffer_object_unmap(file_priv, arg->handle);
1982         return ret;
1983 }
1984
1985
1986 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1987 {
1988         struct drm_bo_reference_info_arg *arg = data;
1989         struct drm_bo_handle_arg *req = &arg->d.req;
1990         struct drm_bo_info_rep *rep = &arg->d.rep;
1991         struct drm_user_object *uo;
1992         int ret;
1993
1994         if (!dev->bm.initialized) {
1995                 DRM_ERROR("Buffer object manager is not initialized.\n");
1996                 return -EINVAL;
1997         }
1998
1999         ret = drm_user_object_ref(file_priv, req->handle,
2000                                   drm_buffer_type, &uo);
2001         if (ret)
2002                 return ret;
2003
2004         ret = drm_bo_handle_info(file_priv, req->handle, rep);
2005         if (ret)
2006                 return ret;
2007
2008         return 0;
2009 }
2010
2011 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2012 {
2013         struct drm_bo_handle_arg *arg = data;
2014         int ret = 0;
2015
2016         if (!dev->bm.initialized) {
2017                 DRM_ERROR("Buffer object manager is not initialized.\n");
2018                 return -EINVAL;
2019         }
2020
2021         ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
2022         return ret;
2023 }
2024
2025 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2026 {
2027         struct drm_bo_reference_info_arg *arg = data;
2028         struct drm_bo_handle_arg *req = &arg->d.req;
2029         struct drm_bo_info_rep *rep = &arg->d.rep;
2030         int ret;
2031
2032         if (!dev->bm.initialized) {
2033                 DRM_ERROR("Buffer object manager is not initialized.\n");
2034                 return -EINVAL;
2035         }
2036
2037         ret = drm_bo_handle_info(file_priv, req->handle, rep);
2038         if (ret)
2039                 return ret;
2040
2041         return 0;
2042 }
2043
2044 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2045 {
2046         struct drm_bo_map_wait_idle_arg *arg = data;
2047         struct drm_bo_info_req *req = &arg->d.req;
2048         struct drm_bo_info_rep *rep = &arg->d.rep;
2049         int ret;
2050         if (!dev->bm.initialized) {
2051                 DRM_ERROR("Buffer object manager is not initialized.\n");
2052                 return -EINVAL;
2053         }
2054
2055         ret = drm_bo_handle_wait(file_priv, req->handle,
2056                                  req->hint, rep);
2057         if (ret)
2058                 return ret;
2059
2060         return 0;
2061 }
2062
2063 static int drm_bo_leave_list(struct drm_buffer_object *bo,
2064                              uint32_t mem_type,
2065                              int free_pinned,
2066                              int allow_errors)
2067 {
2068         struct drm_device *dev = bo->dev;
2069         int ret = 0;
2070
2071         mutex_lock(&bo->mutex);
2072
2073         ret = drm_bo_expire_fence(bo, allow_errors);
2074         if (ret)
2075                 goto out;
2076
2077         if (free_pinned) {
2078                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2079                 mutex_lock(&dev->struct_mutex);
2080                 list_del_init(&bo->pinned_lru);
2081                 if (bo->pinned_node == bo->mem.mm_node)
2082                         bo->pinned_node = NULL;
2083                 if (bo->pinned_node != NULL) {
2084                         drm_mm_put_block(bo->pinned_node);
2085                         bo->pinned_node = NULL;
2086                 }
2087                 mutex_unlock(&dev->struct_mutex);
2088         }
2089
2090         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2091                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2092                           "cleanup. Removing flag and evicting.\n");
2093                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2094                 bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
2095         }
2096
2097         if (bo->mem.mem_type == mem_type)
2098                 ret = drm_bo_evict(bo, mem_type, 0);
2099
2100         if (ret) {
2101                 if (allow_errors) {
2102                         goto out;
2103                 } else {
2104                         ret = 0;
2105                         DRM_ERROR("Cleanup eviction failed\n");
2106                 }
2107         }
2108
2109 out:
2110         mutex_unlock(&bo->mutex);
2111         return ret;
2112 }
2113
2114
2115 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2116                                          int pinned_list)
2117 {
2118         if (pinned_list)
2119                 return list_entry(list, struct drm_buffer_object, pinned_lru);
2120         else
2121                 return list_entry(list, struct drm_buffer_object, lru);
2122 }
2123
2124 /*
2125  * dev->struct_mutex locked.
2126  */
2127
2128 static int drm_bo_force_list_clean(struct drm_device *dev,
2129                                    struct list_head *head,
2130                                    unsigned mem_type,
2131                                    int free_pinned,
2132                                    int allow_errors,
2133                                    int pinned_list)
2134 {
2135         struct list_head *list, *next, *prev;
2136         struct drm_buffer_object *entry, *nentry;
2137         int ret;
2138         int do_restart;
2139
2140         /*
2141          * The list traversal is a bit odd here, because an item may
2142          * disappear from the list when we release the struct_mutex or
2143          * when we decrease the usage count. Also we're not guaranteed
2144          * to drain pinned lists, so we can't always restart.
2145          */
2146
2147 restart:
2148         nentry = NULL;
2149         list_for_each_safe(list, next, head) {
2150                 prev = list->prev;
2151
2152                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2153                 atomic_inc(&entry->usage);
2154                 if (nentry) {
2155                         atomic_dec(&nentry->usage);
2156                         nentry = NULL;
2157                 }
2158
2159                 /*
2160                  * Protect the next item from destruction, so we can check
2161                  * its list pointers later on.
2162                  */
2163
2164                 if (next != head) {
2165                         nentry = drm_bo_entry(next, pinned_list);
2166                         atomic_inc(&nentry->usage);
2167                 }
2168                 mutex_unlock(&dev->struct_mutex);
2169
2170                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2171                                         allow_errors);
2172                 mutex_lock(&dev->struct_mutex);
2173
2174                 drm_bo_usage_deref_locked(&entry);
2175                 if (ret)
2176                         return ret;
2177
2178                 /*
2179                  * Has the next item disappeared from the list?
2180                  */
2181
2182                 do_restart = ((next->prev != list) && (next->prev != prev));
2183
2184                 if (nentry != NULL && do_restart)
2185                         drm_bo_usage_deref_locked(&nentry);
2186
2187                 if (do_restart)
2188                         goto restart;
2189         }
2190         return 0;
2191 }
2192
2193 int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
2194 {
2195         struct drm_buffer_manager *bm = &dev->bm;
2196         struct drm_mem_type_manager *man = &bm->man[mem_type];
2197         int ret = -EINVAL;
2198
2199         if (mem_type >= DRM_BO_MEM_TYPES) {
2200                 DRM_ERROR("Illegal memory type %d\n", mem_type);
2201                 return ret;
2202         }
2203
2204         if (!man->has_type) {
2205                 DRM_ERROR("Trying to take down uninitialized "
2206                           "memory manager type %u\n", mem_type);
2207                 return ret;
2208         }
2209
2210         if ((man->kern_init_type) && (kern_clean == 0)) {
2211                 DRM_ERROR("Trying to take down kernel initialized "
2212                           "memory manager type %u\n", mem_type);
2213                 return -EPERM;
2214         }
2215
2216         man->use_type = 0;
2217         man->has_type = 0;
2218
2219         ret = 0;
2220         if (mem_type > 0) {
2221                 BUG_ON(!list_empty(&bm->unfenced));
2222                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2223                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2224
2225                 if (drm_mm_clean(&man->manager)) {
2226                         drm_mm_takedown(&man->manager);
2227                 } else {
2228                         ret = -EBUSY;
2229                 }
2230         }
2231
2232         return ret;
2233 }
2234 EXPORT_SYMBOL(drm_bo_clean_mm);
2235
2236 /**
2237  *Evict all buffers of a particular mem_type, but leave memory manager
2238  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2239  *point since we have the hardware lock.
2240  */
2241
2242 static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
2243 {
2244         int ret;
2245         struct drm_buffer_manager *bm = &dev->bm;
2246         struct drm_mem_type_manager *man = &bm->man[mem_type];
2247
2248         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2249                 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2250                 return -EINVAL;
2251         }
2252
2253         if (!man->has_type) {
2254                 DRM_ERROR("Memory type %u has not been initialized.\n",
2255                           mem_type);
2256                 return 0;
2257         }
2258
2259         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2260         if (ret)
2261                 return ret;
2262         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2263
2264         return ret;
2265 }
2266
2267 int drm_bo_init_mm(struct drm_device *dev, unsigned type,
2268                    unsigned long p_offset, unsigned long p_size,
2269                    int kern_init)
2270 {
2271         struct drm_buffer_manager *bm = &dev->bm;
2272         int ret = -EINVAL;
2273         struct drm_mem_type_manager *man;
2274
2275         if (type >= DRM_BO_MEM_TYPES) {
2276                 DRM_ERROR("Illegal memory type %d\n", type);
2277                 return ret;
2278         }
2279
2280         man = &bm->man[type];
2281         if (man->has_type) {
2282                 DRM_ERROR("Memory manager already initialized for type %d\n",
2283                           type);
2284                 return ret;
2285         }
2286
2287         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2288         if (ret)
2289                 return ret;
2290
2291         ret = 0;
2292         if (type != DRM_BO_MEM_LOCAL) {
2293                 if (!p_size) {
2294                         DRM_ERROR("Zero size memory manager type %d\n", type);
2295                         return ret;
2296                 }
2297                 ret = drm_mm_init(&man->manager, p_offset, p_size);
2298                 if (ret)
2299                         return ret;
2300         }
2301         man->has_type = 1;
2302         man->use_type = 1;
2303         man->kern_init_type = kern_init;
2304         man->size = p_size;
2305
2306         INIT_LIST_HEAD(&man->lru);
2307         INIT_LIST_HEAD(&man->pinned);
2308
2309         return 0;
2310 }
2311 EXPORT_SYMBOL(drm_bo_init_mm);
2312
2313 /*
2314  * This function is intended to be called on drm driver unload.
2315  * If you decide to call it from lastclose, you must protect the call
2316  * from a potentially racing drm_bo_driver_init in firstopen.
2317  * (This may happen on X server restart).
2318  */
2319
2320 int drm_bo_driver_finish(struct drm_device *dev)
2321 {
2322         struct drm_buffer_manager *bm = &dev->bm;
2323         int ret = 0;
2324         unsigned i = DRM_BO_MEM_TYPES;
2325         struct drm_mem_type_manager *man;
2326
2327         mutex_lock(&dev->struct_mutex);
2328
2329         if (!bm->initialized)
2330                 goto out;
2331         bm->initialized = 0;
2332
2333         while (i--) {
2334                 man = &bm->man[i];
2335                 if (man->has_type) {
2336                         man->use_type = 0;
2337                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) {
2338                                 ret = -EBUSY;
2339                                 DRM_ERROR("DRM memory manager type %d "
2340                                           "is not clean.\n", i);
2341                         }
2342                         man->has_type = 0;
2343                 }
2344         }
2345         mutex_unlock(&dev->struct_mutex);
2346
2347         if (!cancel_delayed_work(&bm->wq))
2348                 flush_scheduled_work();
2349
2350         mutex_lock(&dev->struct_mutex);
2351         drm_bo_delayed_delete(dev, 1);
2352         if (list_empty(&bm->ddestroy))
2353                 DRM_DEBUG("Delayed destroy list was clean\n");
2354
2355         if (list_empty(&bm->man[0].lru))
2356                 DRM_DEBUG("Swap list was clean\n");
2357
2358         if (list_empty(&bm->man[0].pinned))
2359                 DRM_DEBUG("NO_MOVE list was clean\n");
2360
2361         if (list_empty(&bm->unfenced))
2362                 DRM_DEBUG("Unfenced list was clean\n");
2363
2364         if (bm->dummy_read_page) {
2365 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2366                 ClearPageReserved(bm->dummy_read_page);
2367 #endif
2368                 __free_page(bm->dummy_read_page);
2369         }
2370
2371 out:
2372         mutex_unlock(&dev->struct_mutex);
2373         return ret;
2374 }
2375 EXPORT_SYMBOL(drm_bo_driver_finish);
2376
2377 /*
2378  * This function is intended to be called on drm driver load.
2379  * If you decide to call it from firstopen, you must protect the call
2380  * from a potentially racing drm_bo_driver_finish in lastclose.
2381  * (This may happen on X server restart).
2382  */
2383
2384 int drm_bo_driver_init(struct drm_device *dev)
2385 {
2386         struct drm_bo_driver *driver = dev->driver->bo_driver;
2387         struct drm_buffer_manager *bm = &dev->bm;
2388         int ret = -EINVAL;
2389
2390         bm->dummy_read_page = NULL;
2391         drm_bo_init_lock(&bm->bm_lock);
2392         mutex_lock(&dev->struct_mutex);
2393         if (!driver)
2394                 goto out_unlock;
2395
2396         bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
2397         if (!bm->dummy_read_page) {
2398                 ret = -ENOMEM;
2399                 goto out_unlock;
2400         }
2401
2402 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2403         SetPageReserved(bm->dummy_read_page);
2404 #endif
2405
2406         /*
2407          * Initialize the system memory buffer type.
2408          * Other types need to be driver / IOCTL initialized.
2409          */
2410         ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1);
2411         if (ret) {
2412 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2413                 ClearPageReserved(bm->dummy_read_page);
2414 #endif
2415                 __free_page(bm->dummy_read_page);
2416                 bm->dummy_read_page = NULL;
2417                 goto out_unlock;
2418         }
2419
2420 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2421         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2422 #else
2423         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2424 #endif
2425         bm->initialized = 1;
2426         bm->nice_mode = 1;
2427         atomic_set(&bm->count, 0);
2428         bm->cur_pages = 0;
2429         INIT_LIST_HEAD(&bm->unfenced);
2430         INIT_LIST_HEAD(&bm->ddestroy);
2431 out_unlock:
2432         mutex_unlock(&dev->struct_mutex);
2433         return ret;
2434 }
2435 EXPORT_SYMBOL(drm_bo_driver_init);
2436
2437 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2438 {
2439         struct drm_mm_init_arg *arg = data;
2440         struct drm_buffer_manager *bm = &dev->bm;
2441         struct drm_bo_driver *driver = dev->driver->bo_driver;
2442         int ret;
2443
2444         if (!driver) {
2445                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2446                 return -EINVAL;
2447         }
2448
2449         ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
2450         if (ret)
2451                 return ret;
2452
2453         ret = -EINVAL;
2454         if (arg->magic != DRM_BO_INIT_MAGIC) {
2455                 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2456                           "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2457                 return -EINVAL;
2458         }
2459         if (arg->major != DRM_BO_INIT_MAJOR) {
2460                 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2461                           "\tversion don't match. Got %d, expected %d.\n",
2462                           arg->major, DRM_BO_INIT_MAJOR);
2463                 return -EINVAL;
2464         }
2465
2466         mutex_lock(&dev->struct_mutex);
2467         if (!bm->initialized) {
2468                 DRM_ERROR("DRM memory manager was not initialized.\n");
2469                 goto out;
2470         }
2471         if (arg->mem_type == 0) {
2472                 DRM_ERROR("System memory buffers already initialized.\n");
2473                 goto out;
2474         }
2475         ret = drm_bo_init_mm(dev, arg->mem_type,
2476                              arg->p_offset, arg->p_size, 0);
2477
2478 out:
2479         mutex_unlock(&dev->struct_mutex);
2480         (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2481
2482         if (ret)
2483                 return ret;
2484
2485         return 0;
2486 }
2487
2488 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2489 {
2490         struct drm_mm_type_arg *arg = data;
2491         struct drm_buffer_manager *bm = &dev->bm;
2492         struct drm_bo_driver *driver = dev->driver->bo_driver;
2493         int ret;
2494
2495         if (!driver) {
2496                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2497                 return -EINVAL;
2498         }
2499
2500         ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv);
2501         if (ret)
2502                 return ret;
2503
2504         mutex_lock(&dev->struct_mutex);
2505         ret = -EINVAL;
2506         if (!bm->initialized) {
2507                 DRM_ERROR("DRM memory manager was not initialized\n");
2508                 goto out;
2509         }
2510         if (arg->mem_type == 0) {
2511                 DRM_ERROR("No takedown for System memory buffers.\n");
2512                 goto out;
2513         }
2514         ret = 0;
2515         if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) {
2516                 if (ret == -EINVAL)
2517                         DRM_ERROR("Memory manager type %d not clean. "
2518                                   "Delaying takedown\n", arg->mem_type);
2519                 ret = 0;
2520         }
2521 out:
2522         mutex_unlock(&dev->struct_mutex);
2523         (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2524
2525         if (ret)
2526                 return ret;
2527
2528         return 0;
2529 }
2530
2531 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2532 {
2533         struct drm_mm_type_arg *arg = data;
2534         struct drm_bo_driver *driver = dev->driver->bo_driver;
2535         int ret;
2536
2537         if (!driver) {
2538                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2539                 return -EINVAL;
2540         }
2541
2542         if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
2543                 DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
2544                 return -EINVAL;
2545         }
2546
2547         if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2548                 ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
2549                 if (ret)
2550                         return ret;
2551         }
2552
2553         mutex_lock(&dev->struct_mutex);
2554         ret = drm_bo_lock_mm(dev, arg->mem_type);
2555         mutex_unlock(&dev->struct_mutex);
2556         if (ret) {
2557                 (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2558                 return ret;
2559         }
2560
2561         return 0;
2562 }
2563
2564 int drm_mm_unlock_ioctl(struct drm_device *dev,
2565                         void *data,
2566                         struct drm_file *file_priv)
2567 {
2568         struct drm_mm_type_arg *arg = data;
2569         struct drm_bo_driver *driver = dev->driver->bo_driver;
2570         int ret;
2571
2572         if (!driver) {
2573                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2574                 return -EINVAL;
2575         }
2576
2577         if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2578                 ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2579                 if (ret)
2580                         return ret;
2581         }
2582
2583         return 0;
2584 }
2585
2586 int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2587 {
2588         struct drm_mm_info_arg *arg = data;
2589         struct drm_buffer_manager *bm = &dev->bm;
2590         struct drm_bo_driver *driver = dev->driver->bo_driver;
2591         struct drm_mem_type_manager *man;
2592         int ret = 0;
2593         int mem_type = arg->mem_type;
2594
2595         if (!driver) {
2596                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2597                 return -EINVAL;
2598         }
2599
2600         if (mem_type >= DRM_BO_MEM_TYPES) {
2601                 DRM_ERROR("Illegal memory type %d\n", arg->mem_type);
2602                 return -EINVAL;
2603         }
2604
2605         mutex_lock(&dev->struct_mutex);
2606         if (!bm->initialized) {
2607                 DRM_ERROR("DRM memory manager was not initialized\n");
2608                 ret = -EINVAL;
2609                 goto out;
2610         }
2611
2612
2613         man = &bm->man[arg->mem_type];
2614
2615         arg->p_size = man->size;
2616
2617 out:
2618         mutex_unlock(&dev->struct_mutex);
2619      
2620         return ret;
2621 }
2622 /*
2623  * buffer object vm functions.
2624  */
2625
2626 int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
2627 {
2628         struct drm_buffer_manager *bm = &dev->bm;
2629         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2630
2631         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2632                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2633                         return 0;
2634
2635                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2636                         return 0;
2637
2638                 if (mem->flags & DRM_BO_FLAG_CACHED)
2639                         return 0;
2640         }
2641         return 1;
2642 }
2643 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2644
2645 /**
2646  * \c Get the PCI offset for the buffer object memory.
2647  *
2648  * \param bo The buffer object.
2649  * \param bus_base On return the base of the PCI region
2650  * \param bus_offset On return the byte offset into the PCI region
2651  * \param bus_size On return the byte size of the buffer object or zero if
2652  *     the buffer object memory is not accessible through a PCI region.
2653  * \return Failure indication.
2654  *
2655  * Returns -EINVAL if the buffer object is currently not mappable.
2656  * Otherwise returns zero.
2657  */
2658
2659 int drm_bo_pci_offset(struct drm_device *dev,
2660                       struct drm_bo_mem_reg *mem,
2661                       unsigned long *bus_base,
2662                       unsigned long *bus_offset, unsigned long *bus_size)
2663 {
2664         struct drm_buffer_manager *bm = &dev->bm;
2665         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2666
2667         *bus_size = 0;
2668         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2669                 return -EINVAL;
2670
2671         if (drm_mem_reg_is_pci(dev, mem)) {
2672                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2673                 *bus_size = mem->num_pages << PAGE_SHIFT;
2674                 *bus_base = man->io_offset;
2675         }
2676
2677         return 0;
2678 }
2679
2680 /**
2681  * \c Kill all user-space virtual mappings of this buffer object.
2682  *
2683  * \param bo The buffer object.
2684  *
2685  * Call bo->mutex locked.
2686  */
2687
2688 void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
2689 {
2690         struct drm_device *dev = bo->dev;
2691         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2692         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2693
2694         if (!dev->dev_mapping)
2695                 return;
2696
2697         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2698 }
2699
2700 /**
2701  * drm_bo_takedown_vm_locked:
2702  *
2703  * @bo: the buffer object to remove any drm device mapping
2704  *
2705  * Remove any associated vm mapping on the drm device node that
2706  * would have been created for a drm_bo_type_device buffer
2707  */
2708 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
2709 {
2710         struct drm_map_list *list;
2711         drm_local_map_t *map;
2712         struct drm_device *dev = bo->dev;
2713
2714         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2715         if (bo->type != drm_bo_type_device)
2716                 return;
2717
2718         list = &bo->map_list;
2719         if (list->user_token) {
2720                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2721                 list->user_token = 0;
2722         }
2723         if (list->file_offset_node) {
2724                 drm_mm_put_block(list->file_offset_node);
2725                 list->file_offset_node = NULL;
2726         }
2727
2728         map = list->map;
2729         if (!map)
2730                 return;
2731
2732         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2733         list->map = NULL;
2734         list->user_token = 0ULL;
2735         drm_bo_usage_deref_locked(&bo);
2736 }
2737
2738 /**
2739  * drm_bo_setup_vm_locked:
2740  *
2741  * @bo: the buffer to allocate address space for
2742  *
2743  * Allocate address space in the drm device so that applications
2744  * can mmap the buffer and access the contents. This only
2745  * applies to drm_bo_type_device objects as others are not
2746  * placed in the drm device address space.
2747  */
2748 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
2749 {
2750         struct drm_map_list *list = &bo->map_list;
2751         drm_local_map_t *map;
2752         struct drm_device *dev = bo->dev;
2753
2754         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2755         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2756         if (!list->map)
2757                 return -ENOMEM;
2758
2759         map = list->map;
2760         map->offset = 0;
2761         map->type = _DRM_TTM;
2762         map->flags = _DRM_REMOVABLE;
2763         map->size = bo->mem.num_pages * PAGE_SIZE;
2764         atomic_inc(&bo->usage);
2765         map->handle = (void *)bo;
2766
2767         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2768                                                     bo->mem.num_pages, 0, 0);
2769
2770         if (unlikely(!list->file_offset_node)) {
2771                 drm_bo_takedown_vm_locked(bo);
2772                 return -ENOMEM;
2773         }
2774
2775         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2776                                                   bo->mem.num_pages, 0);
2777
2778         if (unlikely(!list->file_offset_node)) {
2779                 drm_bo_takedown_vm_locked(bo);
2780                 return -ENOMEM;
2781         }
2782                 
2783         list->hash.key = list->file_offset_node->start;
2784         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2785                 drm_bo_takedown_vm_locked(bo);
2786                 return -ENOMEM;
2787         }
2788
2789         list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2790
2791         return 0;
2792 }
2793
2794 int drm_bo_version_ioctl(struct drm_device *dev, void *data,
2795                          struct drm_file *file_priv)
2796 {
2797         struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
2798
2799         arg->major = DRM_BO_INIT_MAJOR;
2800         arg->minor = DRM_BO_INIT_MINOR;
2801         arg->patchlevel = DRM_BO_INIT_PATCH;
2802
2803         return 0;
2804 }