1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads,
40 * Hash tables and hash heads.
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those,
44 * we need both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
47 * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
48 * the list traversal will, in general, need to be restarted.
52 static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
57 static inline uint64_t drm_bo_type_flags(unsigned type)
59 return (1ULL << (24 + type));
63 * bo locked. dev->struct_mutex locked.
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
68 struct drm_mem_type_manager *man;
70 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71 DRM_ASSERT_LOCKED(&bo->mutex);
73 man = &bo->dev->bm.man[bo->pinned_mem_type];
74 list_add_tail(&bo->pinned_lru, &man->pinned);
77 void drm_bo_add_to_lru(struct drm_buffer_object *bo)
79 struct drm_mem_type_manager *man;
81 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
83 if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84 || bo->mem.mem_type != bo->pinned_mem_type) {
85 man = &bo->dev->bm.man[bo->mem.mem_type];
86 list_add_tail(&bo->lru, &man->lru);
88 INIT_LIST_HEAD(&bo->lru);
92 static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
94 #ifdef DRM_ODD_MM_COMPAT
97 if (!bo->map_list.map)
100 ret = drm_bo_lock_kmm(bo);
103 drm_bo_unmap_virtual(bo);
105 drm_bo_finish_unmap(bo);
107 if (!bo->map_list.map)
110 drm_bo_unmap_virtual(bo);
115 static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
117 #ifdef DRM_ODD_MM_COMPAT
120 if (!bo->map_list.map)
123 ret = drm_bo_remap_bound(bo);
125 DRM_ERROR("Failed to remap a bound buffer object.\n"
126 "\tThis might cause a sigbus later.\n");
128 drm_bo_unlock_kmm(bo);
133 * Call bo->mutex locked.
136 static int drm_bo_add_ttm(struct drm_buffer_object *bo)
138 struct drm_device *dev = bo->dev;
140 uint32_t page_flags = 0;
142 DRM_ASSERT_LOCKED(&bo->mutex);
145 if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
146 page_flags |= DRM_TTM_PAGE_WRITE;
149 case drm_bo_type_device:
150 case drm_bo_type_kernel:
151 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
152 page_flags, dev->bm.dummy_read_page);
156 case drm_bo_type_user:
157 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
158 page_flags | DRM_TTM_PAGE_USER,
159 dev->bm.dummy_read_page);
163 ret = drm_ttm_set_user(bo->ttm, current,
171 DRM_ERROR("Illegal buffer object type\n");
179 static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
180 struct drm_bo_mem_reg *mem,
181 int evict, int no_wait)
183 struct drm_device *dev = bo->dev;
184 struct drm_buffer_manager *bm = &dev->bm;
185 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
186 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
187 struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
188 struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
191 if (old_is_pci || new_is_pci ||
192 ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
193 ret = drm_bo_vm_pre_move(bo, old_is_pci);
198 * Create and bind a ttm if required.
201 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
202 ret = drm_bo_add_ttm(bo);
206 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
207 ret = drm_ttm_bind(bo->ttm, mem);
212 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
214 struct drm_bo_mem_reg *old_mem = &bo->mem;
215 uint64_t save_flags = old_mem->flags;
216 uint64_t save_proposed_flags = old_mem->proposed_flags;
220 old_mem->proposed_flags = save_proposed_flags;
221 DRM_FLAG_MASKED(save_flags, mem->flags,
222 DRM_BO_MASK_MEMTYPE);
228 if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
229 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED))
230 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
231 else if (dev->driver->bo_driver->move)
232 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
234 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
240 if (old_is_pci || new_is_pci)
241 drm_bo_vm_post_move(bo);
243 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
245 dev->driver->bo_driver->invalidate_caches(dev,
248 DRM_ERROR("Can not flush read caches\n");
251 DRM_FLAG_MASKED(bo->priv_flags,
252 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
253 _DRM_BO_FLAG_EVICTED);
256 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
257 bm->man[bo->mem.mem_type].gpu_offset;
263 if (old_is_pci || new_is_pci)
264 drm_bo_vm_post_move(bo);
266 new_man = &bm->man[bo->mem.mem_type];
267 if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
268 drm_ttm_unbind(bo->ttm);
269 drm_ttm_destroy(bo->ttm);
277 * Call bo->mutex locked.
278 * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
281 static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced)
283 struct drm_fence_object *fence = bo->fence;
285 if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
289 if (drm_fence_object_signaled(fence, bo->fence_type)) {
290 drm_fence_usage_deref_unlocked(&bo->fence);
293 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
294 if (drm_fence_object_signaled(fence, bo->fence_type)) {
295 drm_fence_usage_deref_unlocked(&bo->fence);
303 static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
307 mutex_lock(&bo->mutex);
308 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
309 mutex_unlock(&bo->mutex);
315 * Call bo->mutex locked.
316 * Wait until the buffer is idle.
319 int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
320 int no_wait, int check_unfenced)
324 DRM_ASSERT_LOCKED(&bo->mutex);
325 while(unlikely(drm_bo_busy(bo, check_unfenced))) {
329 if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) {
330 mutex_unlock(&bo->mutex);
331 wait_event(bo->event_queue, !drm_bo_check_unfenced(bo));
332 mutex_lock(&bo->mutex);
333 bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
337 struct drm_fence_object *fence;
338 uint32_t fence_type = bo->fence_type;
340 drm_fence_reference_unlocked(&fence, bo->fence);
341 mutex_unlock(&bo->mutex);
343 ret = drm_fence_object_wait(fence, lazy, !interruptible,
346 drm_fence_usage_deref_unlocked(&fence);
347 mutex_lock(&bo->mutex);
348 bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
356 EXPORT_SYMBOL(drm_bo_wait);
358 static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
360 struct drm_device *dev = bo->dev;
361 struct drm_buffer_manager *bm = &dev->bm;
365 unsigned long _end = jiffies + 3 * DRM_HZ;
368 ret = drm_bo_wait(bo, 0, 0, 0, 0);
369 if (ret && allow_errors)
372 } while (ret && !time_after_eq(jiffies, _end));
376 DRM_ERROR("Detected GPU lockup or "
377 "fence driver was taken down. "
378 "Evicting buffer.\n");
382 drm_fence_usage_deref_unlocked(&bo->fence);
388 * Call dev->struct_mutex locked.
389 * Attempts to remove all private references to a buffer by expiring its
390 * fence object and removing from lru lists and memory managers.
393 static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
395 struct drm_device *dev = bo->dev;
396 struct drm_buffer_manager *bm = &dev->bm;
398 DRM_ASSERT_LOCKED(&dev->struct_mutex);
400 atomic_inc(&bo->usage);
401 mutex_unlock(&dev->struct_mutex);
402 mutex_lock(&bo->mutex);
404 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
406 if (bo->fence && drm_fence_object_signaled(bo->fence,
408 drm_fence_usage_deref_unlocked(&bo->fence);
410 if (bo->fence && remove_all)
411 (void)drm_bo_expire_fence(bo, 0);
413 mutex_lock(&dev->struct_mutex);
415 if (!atomic_dec_and_test(&bo->usage))
419 list_del_init(&bo->lru);
420 if (bo->mem.mm_node) {
421 drm_mm_put_block(bo->mem.mm_node);
422 if (bo->pinned_node == bo->mem.mm_node)
423 bo->pinned_node = NULL;
424 bo->mem.mm_node = NULL;
426 list_del_init(&bo->pinned_lru);
427 if (bo->pinned_node) {
428 drm_mm_put_block(bo->pinned_node);
429 bo->pinned_node = NULL;
431 list_del_init(&bo->ddestroy);
432 mutex_unlock(&bo->mutex);
433 drm_bo_destroy_locked(bo);
437 if (list_empty(&bo->ddestroy)) {
438 drm_fence_object_flush(bo->fence, bo->fence_type);
439 list_add_tail(&bo->ddestroy, &bm->ddestroy);
440 schedule_delayed_work(&bm->wq,
441 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
445 mutex_unlock(&bo->mutex);
450 * Verify that refcount is 0 and that there are no internal references
451 * to the buffer object. Then destroy it.
454 static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
456 struct drm_device *dev = bo->dev;
457 struct drm_buffer_manager *bm = &dev->bm;
459 DRM_ASSERT_LOCKED(&dev->struct_mutex);
461 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
462 list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
463 list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
464 if (bo->fence != NULL) {
465 DRM_ERROR("Fence was non-zero.\n");
466 drm_bo_cleanup_refs(bo, 0);
470 #ifdef DRM_ODD_MM_COMPAT
471 BUG_ON(!list_empty(&bo->vma_list));
472 BUG_ON(!list_empty(&bo->p_mm_list));
476 drm_ttm_unbind(bo->ttm);
477 drm_ttm_destroy(bo->ttm);
481 atomic_dec(&bm->count);
483 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
489 * Some stuff is still trying to reference the buffer object.
490 * Get rid of those references.
493 drm_bo_cleanup_refs(bo, 0);
499 * Call dev->struct_mutex locked.
502 static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
504 struct drm_buffer_manager *bm = &dev->bm;
506 struct drm_buffer_object *entry, *nentry;
507 struct list_head *list, *next;
509 list_for_each_safe(list, next, &bm->ddestroy) {
510 entry = list_entry(list, struct drm_buffer_object, ddestroy);
513 if (next != &bm->ddestroy) {
514 nentry = list_entry(next, struct drm_buffer_object,
516 atomic_inc(&nentry->usage);
519 drm_bo_cleanup_refs(entry, remove_all);
522 atomic_dec(&nentry->usage);
526 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
527 static void drm_bo_delayed_workqueue(void *data)
529 static void drm_bo_delayed_workqueue(struct work_struct *work)
532 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
533 struct drm_device *dev = (struct drm_device *) data;
534 struct drm_buffer_manager *bm = &dev->bm;
536 struct drm_buffer_manager *bm =
537 container_of(work, struct drm_buffer_manager, wq.work);
538 struct drm_device *dev = container_of(bm, struct drm_device, bm);
541 DRM_DEBUG("Delayed delete Worker\n");
543 mutex_lock(&dev->struct_mutex);
544 if (!bm->initialized) {
545 mutex_unlock(&dev->struct_mutex);
548 drm_bo_delayed_delete(dev, 0);
549 if (bm->initialized && !list_empty(&bm->ddestroy)) {
550 schedule_delayed_work(&bm->wq,
551 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
553 mutex_unlock(&dev->struct_mutex);
556 void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
558 struct drm_buffer_object *tmp_bo = *bo;
561 DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
563 if (atomic_dec_and_test(&tmp_bo->usage))
564 drm_bo_destroy_locked(tmp_bo);
566 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
568 static void drm_bo_base_deref_locked(struct drm_file *file_priv,
569 struct drm_user_object *uo)
571 struct drm_buffer_object *bo =
572 drm_user_object_entry(uo, struct drm_buffer_object, base);
574 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
576 drm_bo_takedown_vm_locked(bo);
577 drm_bo_usage_deref_locked(&bo);
580 void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
582 struct drm_buffer_object *tmp_bo = *bo;
583 struct drm_device *dev = tmp_bo->dev;
586 if (atomic_dec_and_test(&tmp_bo->usage)) {
587 mutex_lock(&dev->struct_mutex);
588 if (atomic_read(&tmp_bo->usage) == 0)
589 drm_bo_destroy_locked(tmp_bo);
590 mutex_unlock(&dev->struct_mutex);
593 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
595 void drm_putback_buffer_objects(struct drm_device *dev)
597 struct drm_buffer_manager *bm = &dev->bm;
598 struct list_head *list = &bm->unfenced;
599 struct drm_buffer_object *entry, *next;
601 mutex_lock(&dev->struct_mutex);
602 list_for_each_entry_safe(entry, next, list, lru) {
603 atomic_inc(&entry->usage);
604 mutex_unlock(&dev->struct_mutex);
606 mutex_lock(&entry->mutex);
607 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
608 mutex_lock(&dev->struct_mutex);
610 list_del_init(&entry->lru);
611 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
612 wake_up_all(&entry->event_queue);
615 * FIXME: Might want to put back on head of list
616 * instead of tail here.
619 drm_bo_add_to_lru(entry);
620 mutex_unlock(&entry->mutex);
621 drm_bo_usage_deref_locked(&entry);
623 mutex_unlock(&dev->struct_mutex);
625 EXPORT_SYMBOL(drm_putback_buffer_objects);
628 * Note. The caller has to register (if applicable)
629 * and deregister fence object usage.
632 int drm_fence_buffer_objects(struct drm_device *dev,
633 struct list_head *list,
634 uint32_t fence_flags,
635 struct drm_fence_object *fence,
636 struct drm_fence_object **used_fence)
638 struct drm_buffer_manager *bm = &dev->bm;
639 struct drm_buffer_object *entry;
640 uint32_t fence_type = 0;
641 uint32_t fence_class = ~0;
646 mutex_lock(&dev->struct_mutex);
649 list = &bm->unfenced;
652 fence_class = fence->fence_class;
654 list_for_each_entry(entry, list, lru) {
655 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
656 fence_type |= entry->new_fence_type;
657 if (fence_class == ~0)
658 fence_class = entry->new_fence_class;
659 else if (entry->new_fence_class != fence_class) {
660 DRM_ERROR("Unmatching fence classes on unfenced list: "
663 entry->new_fence_class);
676 if ((fence_type & fence->type) != fence_type ||
677 (fence->fence_class != fence_class)) {
678 DRM_ERROR("Given fence doesn't match buffers "
679 "on unfenced list.\n");
684 mutex_unlock(&dev->struct_mutex);
685 ret = drm_fence_object_create(dev, fence_class, fence_type,
686 fence_flags | DRM_FENCE_FLAG_EMIT,
688 mutex_lock(&dev->struct_mutex);
697 entry = list_entry(l, struct drm_buffer_object, lru);
698 atomic_inc(&entry->usage);
699 mutex_unlock(&dev->struct_mutex);
700 mutex_lock(&entry->mutex);
701 mutex_lock(&dev->struct_mutex);
703 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
706 drm_fence_usage_deref_locked(&entry->fence);
707 entry->fence = drm_fence_reference_locked(fence);
708 entry->fence_class = entry->new_fence_class;
709 entry->fence_type = entry->new_fence_type;
710 DRM_FLAG_MASKED(entry->priv_flags, 0,
711 _DRM_BO_FLAG_UNFENCED);
712 wake_up_all(&entry->event_queue);
713 drm_bo_add_to_lru(entry);
715 mutex_unlock(&entry->mutex);
716 drm_bo_usage_deref_locked(&entry);
719 DRM_DEBUG("Fenced %d buffers\n", count);
721 mutex_unlock(&dev->struct_mutex);
725 EXPORT_SYMBOL(drm_fence_buffer_objects);
731 static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
735 struct drm_device *dev = bo->dev;
736 struct drm_bo_mem_reg evict_mem;
739 * Someone might have modified the buffer before we took the
744 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
746 if (unlikely(bo->mem.flags &
747 (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)))
749 if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
751 if (unlikely(bo->mem.mem_type != mem_type))
753 ret = drm_bo_wait(bo, 0, 1, no_wait, 0);
757 } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
760 evict_mem.mm_node = NULL;
763 evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
765 mutex_lock(&dev->struct_mutex);
766 list_del_init(&bo->lru);
767 mutex_unlock(&dev->struct_mutex);
769 ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
773 DRM_ERROR("Failed to find memory space for "
774 "buffer 0x%p eviction.\n", bo);
778 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
782 DRM_ERROR("Buffer eviction failed\n");
786 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
787 _DRM_BO_FLAG_EVICTED);
790 mutex_lock(&dev->struct_mutex);
791 if (evict_mem.mm_node) {
792 if (evict_mem.mm_node != bo->pinned_node)
793 drm_mm_put_block(evict_mem.mm_node);
794 evict_mem.mm_node = NULL;
796 drm_bo_add_to_lru(bo);
797 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
799 mutex_unlock(&dev->struct_mutex);
805 * Repeatedly evict memory from the LRU for @mem_type until we create enough
806 * space, or we've evicted everything and there isn't enough space.
808 static int drm_bo_mem_force_space(struct drm_device *dev,
809 struct drm_bo_mem_reg *mem,
810 uint32_t mem_type, int no_wait)
812 struct drm_mm_node *node;
813 struct drm_buffer_manager *bm = &dev->bm;
814 struct drm_buffer_object *entry;
815 struct drm_mem_type_manager *man = &bm->man[mem_type];
816 struct list_head *lru;
817 unsigned long num_pages = mem->num_pages;
820 mutex_lock(&dev->struct_mutex);
822 node = drm_mm_search_free(&man->manager, num_pages,
823 mem->page_alignment, 1);
828 if (lru->next == lru)
831 entry = list_entry(lru->next, struct drm_buffer_object, lru);
832 atomic_inc(&entry->usage);
833 mutex_unlock(&dev->struct_mutex);
834 mutex_lock(&entry->mutex);
835 ret = drm_bo_evict(entry, mem_type, no_wait);
836 mutex_unlock(&entry->mutex);
837 drm_bo_usage_deref_unlocked(&entry);
840 mutex_lock(&dev->struct_mutex);
844 mutex_unlock(&dev->struct_mutex);
848 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
849 if (unlikely(!node)) {
850 mutex_unlock(&dev->struct_mutex);
854 mutex_unlock(&dev->struct_mutex);
856 mem->mem_type = mem_type;
860 static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
863 uint64_t mask, uint32_t *res_mask)
865 uint64_t cur_flags = drm_bo_type_flags(mem_type);
868 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
870 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
871 cur_flags |= DRM_BO_FLAG_CACHED;
872 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
873 cur_flags |= DRM_BO_FLAG_MAPPABLE;
874 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
875 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
877 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
880 if (mem_type == DRM_BO_MEM_LOCAL) {
881 *res_mask = cur_flags;
885 flag_diff = (mask ^ cur_flags);
886 if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
887 cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
889 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
890 (!(mask & DRM_BO_FLAG_CACHED) ||
891 (mask & DRM_BO_FLAG_FORCE_CACHING)))
894 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
895 ((mask & DRM_BO_FLAG_MAPPABLE) ||
896 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
899 *res_mask = cur_flags;
904 * Creates space for memory region @mem according to its type.
906 * This function first searches for free space in compatible memory types in
907 * the priority order defined by the driver. If free space isn't found, then
908 * drm_bo_mem_force_space is attempted in priority order to evict and find
911 int drm_bo_mem_space(struct drm_buffer_object *bo,
912 struct drm_bo_mem_reg *mem, int no_wait)
914 struct drm_device *dev = bo->dev;
915 struct drm_buffer_manager *bm = &dev->bm;
916 struct drm_mem_type_manager *man;
918 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
919 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
921 uint32_t mem_type = DRM_BO_MEM_LOCAL;
926 struct drm_mm_node *node = NULL;
930 for (i = 0; i < num_prios; ++i) {
932 man = &bm->man[mem_type];
934 type_ok = drm_bo_mt_compatible(man,
935 bo->type == drm_bo_type_user,
936 mem_type, mem->proposed_flags,
942 if (mem_type == DRM_BO_MEM_LOCAL)
945 if ((mem_type == bo->pinned_mem_type) &&
946 (bo->pinned_node != NULL)) {
947 node = bo->pinned_node;
951 mutex_lock(&dev->struct_mutex);
952 if (man->has_type && man->use_type) {
954 node = drm_mm_search_free(&man->manager, mem->num_pages,
955 mem->page_alignment, 1);
957 node = drm_mm_get_block(node, mem->num_pages,
958 mem->page_alignment);
960 mutex_unlock(&dev->struct_mutex);
965 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
967 mem->mem_type = mem_type;
968 mem->flags = cur_flags;
975 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
976 prios = dev->driver->bo_driver->mem_busy_prio;
978 for (i = 0; i < num_prios; ++i) {
980 man = &bm->man[mem_type];
985 if (!drm_bo_mt_compatible(man,
986 bo->type == drm_bo_type_user,
992 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
994 if (ret == 0 && mem->mm_node) {
995 mem->flags = cur_flags;
1003 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
1006 EXPORT_SYMBOL(drm_bo_mem_space);
1009 * drm_bo_propose_flags:
1011 * @bo: the buffer object getting new flags
1013 * @new_flags: the new set of proposed flag bits
1015 * @new_mask: the mask of bits changed in new_flags
1017 * Modify the proposed_flag bits in @bo
1019 static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
1020 uint64_t new_flags, uint64_t new_mask)
1022 uint32_t new_access;
1024 /* Copy unchanging bits from existing proposed_flags */
1025 DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
1027 if (bo->type == drm_bo_type_user &&
1028 ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
1029 (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
1030 DRM_ERROR("User buffers require cache-coherent memory.\n");
1034 if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
1035 DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
1039 if (likely(new_mask & DRM_BO_MASK_MEM) &&
1040 (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
1041 !DRM_SUSER(DRM_CURPROC)) {
1042 if (likely(bo->mem.flags & new_flags & new_mask &
1044 new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
1045 (bo->mem.flags & DRM_BO_MASK_MEM);
1047 DRM_ERROR("Incompatible memory type specification "
1048 "for NO_EVICT buffer.\n");
1053 if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
1054 DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
1058 new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
1061 if (new_access == 0) {
1062 DRM_ERROR("Invalid buffer object rwx properties\n");
1066 bo->mem.proposed_flags = new_flags;
1071 * Call dev->struct_mutex locked.
1074 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
1075 uint32_t handle, int check_owner)
1077 struct drm_user_object *uo;
1078 struct drm_buffer_object *bo;
1080 uo = drm_lookup_user_object(file_priv, handle);
1082 if (!uo || (uo->type != drm_buffer_type)) {
1083 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
1087 if (check_owner && file_priv != uo->owner) {
1088 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
1092 bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
1093 atomic_inc(&bo->usage);
1096 EXPORT_SYMBOL(drm_lookup_buffer_object);
1099 * Call bo->mutex locked.
1100 * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
1101 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
1104 static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
1106 struct drm_fence_object *fence = bo->fence;
1108 if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1112 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1113 drm_fence_usage_deref_unlocked(&bo->fence);
1121 int drm_bo_evict_cached(struct drm_buffer_object *bo)
1125 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1126 if (bo->mem.mm_node)
1127 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1131 EXPORT_SYMBOL(drm_bo_evict_cached);
1133 * Wait until a buffer is unmapped.
1136 static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
1140 if (likely(atomic_read(&bo->mapped)) == 0)
1143 if (unlikely(no_wait))
1147 mutex_unlock(&bo->mutex);
1148 ret = wait_event_interruptible(bo->event_queue,
1149 atomic_read(&bo->mapped) == 0);
1150 mutex_lock(&bo->mutex);
1151 bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
1153 if (ret == -ERESTARTSYS)
1155 } while((ret == 0) && atomic_read(&bo->mapped) > 0);
1161 * Fill in the ioctl reply argument with buffer info.
1165 void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
1166 struct drm_bo_info_rep *rep)
1171 rep->handle = bo->base.hash.key;
1172 rep->flags = bo->mem.flags;
1173 rep->size = bo->num_pages * PAGE_SIZE;
1174 rep->offset = bo->offset;
1177 * drm_bo_type_device buffers have user-visible
1178 * handles which can be used to share across
1179 * processes. Hand that back to the application
1181 if (bo->type == drm_bo_type_device)
1182 rep->arg_handle = bo->map_list.user_token;
1184 rep->arg_handle = 0;
1186 rep->proposed_flags = bo->mem.proposed_flags;
1187 rep->buffer_start = bo->buffer_start;
1188 rep->fence_flags = bo->fence_type;
1190 rep->page_alignment = bo->mem.page_alignment;
1192 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) {
1193 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1197 EXPORT_SYMBOL(drm_bo_fill_rep_arg);
1200 * Wait for buffer idle and register that we've mapped the buffer.
1201 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1202 * so that if the client dies, the mapping is automatically
1206 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1207 uint32_t map_flags, unsigned hint,
1208 struct drm_bo_info_rep *rep)
1210 struct drm_buffer_object *bo;
1211 struct drm_device *dev = file_priv->minor->dev;
1213 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1215 mutex_lock(&dev->struct_mutex);
1216 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1217 mutex_unlock(&dev->struct_mutex);
1222 mutex_lock(&bo->mutex);
1224 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
1226 ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1230 if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
1231 drm_bo_evict_cached(bo);
1233 } while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
1235 atomic_inc(&bo->mapped);
1236 mutex_lock(&dev->struct_mutex);
1237 ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1238 mutex_unlock(&dev->struct_mutex);
1240 if (atomic_dec_and_test(&bo->mapped))
1241 wake_up_all(&bo->event_queue);
1244 drm_bo_fill_rep_arg(bo, rep);
1247 mutex_unlock(&bo->mutex);
1248 drm_bo_usage_deref_unlocked(&bo);
1253 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1255 struct drm_device *dev = file_priv->minor->dev;
1256 struct drm_buffer_object *bo;
1257 struct drm_ref_object *ro;
1260 mutex_lock(&dev->struct_mutex);
1262 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1268 ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1274 drm_remove_ref_object(file_priv, ro);
1275 drm_bo_usage_deref_locked(&bo);
1277 mutex_unlock(&dev->struct_mutex);
1282 * Call struct-sem locked.
1285 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1286 struct drm_user_object *uo,
1287 enum drm_ref_type action)
1289 struct drm_buffer_object *bo =
1290 drm_user_object_entry(uo, struct drm_buffer_object, base);
1293 * We DON'T want to take the bo->lock here, because we want to
1294 * hold it when we wait for unmapped buffer.
1297 BUG_ON(action != _DRM_REF_TYPE1);
1299 if (atomic_dec_and_test(&bo->mapped))
1300 wake_up_all(&bo->event_queue);
1305 * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
1308 int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
1309 int no_wait, int move_unfenced)
1311 struct drm_device *dev = bo->dev;
1312 struct drm_buffer_manager *bm = &dev->bm;
1314 struct drm_bo_mem_reg mem;
1316 BUG_ON(bo->fence != NULL);
1318 mem.num_pages = bo->num_pages;
1319 mem.size = mem.num_pages << PAGE_SHIFT;
1320 mem.proposed_flags = new_mem_flags;
1321 mem.page_alignment = bo->mem.page_alignment;
1323 mutex_lock(&bm->evict_mutex);
1324 mutex_lock(&dev->struct_mutex);
1325 list_del_init(&bo->lru);
1326 mutex_unlock(&dev->struct_mutex);
1329 * Determine where to move the buffer.
1331 ret = drm_bo_mem_space(bo, &mem, no_wait);
1335 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1338 mutex_lock(&dev->struct_mutex);
1339 if (ret || !move_unfenced) {
1341 if (mem.mm_node != bo->pinned_node)
1342 drm_mm_put_block(mem.mm_node);
1345 drm_bo_add_to_lru(bo);
1346 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1347 wake_up_all(&bo->event_queue);
1348 DRM_FLAG_MASKED(bo->priv_flags, 0,
1349 _DRM_BO_FLAG_UNFENCED);
1352 list_add_tail(&bo->lru, &bm->unfenced);
1353 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1354 _DRM_BO_FLAG_UNFENCED);
1356 mutex_unlock(&dev->struct_mutex);
1357 mutex_unlock(&bm->evict_mutex);
1361 static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
1363 uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
1365 if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
1367 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1368 (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
1369 (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
1372 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1373 ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
1374 (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
1380 * drm_buffer_object_validate:
1382 * @bo: the buffer object to modify
1384 * @fence_class: the new fence class covering this buffer
1386 * @move_unfenced: a boolean indicating whether switching the
1387 * memory space of this buffer should cause the buffer to
1388 * be placed on the unfenced list.
1390 * @no_wait: whether this function should return -EBUSY instead
1393 * Change buffer access parameters. This can involve moving
1394 * the buffer to the correct memory type, pinning the buffer
1395 * or changing the class/type of fence covering this buffer
1397 * Must be called with bo locked.
1400 static int drm_buffer_object_validate(struct drm_buffer_object *bo,
1401 uint32_t fence_class,
1402 int move_unfenced, int no_wait,
1405 struct drm_device *dev = bo->dev;
1406 struct drm_buffer_manager *bm = &dev->bm;
1410 ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
1414 DRM_ERROR("Failed moving buffer.\n");
1416 DRM_ERROR("Out of aperture space or "
1417 "DRM memory quota.\n");
1426 if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1427 bo->pinned_mem_type = bo->mem.mem_type;
1428 mutex_lock(&dev->struct_mutex);
1429 list_del_init(&bo->pinned_lru);
1430 drm_bo_add_to_pinned_lru(bo);
1432 if (bo->pinned_node != bo->mem.mm_node) {
1433 if (bo->pinned_node != NULL)
1434 drm_mm_put_block(bo->pinned_node);
1435 bo->pinned_node = bo->mem.mm_node;
1438 mutex_unlock(&dev->struct_mutex);
1440 } else if (bo->pinned_node != NULL) {
1442 mutex_lock(&dev->struct_mutex);
1444 if (bo->pinned_node != bo->mem.mm_node)
1445 drm_mm_put_block(bo->pinned_node);
1447 list_del_init(&bo->pinned_lru);
1448 bo->pinned_node = NULL;
1449 mutex_unlock(&dev->struct_mutex);
1454 * We might need to add a TTM.
1457 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1458 ret = drm_bo_add_ttm(bo);
1463 * Validation has succeeded, move the access and other
1464 * non-mapping-related flag bits from the proposed flags to
1468 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
1471 * Finally, adjust lru to be sure.
1474 mutex_lock(&dev->struct_mutex);
1476 if (move_unfenced) {
1477 list_add_tail(&bo->lru, &bm->unfenced);
1478 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1479 _DRM_BO_FLAG_UNFENCED);
1481 drm_bo_add_to_lru(bo);
1482 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1483 wake_up_all(&bo->event_queue);
1484 DRM_FLAG_MASKED(bo->priv_flags, 0,
1485 _DRM_BO_FLAG_UNFENCED);
1488 mutex_unlock(&dev->struct_mutex);
1494 * This function is called with bo->mutex locked, but may release it
1495 * temporarily to wait for events.
1498 static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
1502 uint32_t fence_class,
1506 struct drm_device *dev = bo->dev;
1507 struct drm_bo_driver *driver = dev->driver->bo_driver;
1512 DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
1513 (unsigned long long) bo->mem.proposed_flags,
1514 (unsigned long long) bo->mem.flags);
1516 ret = drm_bo_modify_proposed_flags (bo, flags, mask);
1520 ret = drm_bo_wait_unmapped(bo, no_wait);
1524 ret = driver->fence_type(bo, &fence_class, &ftype);
1527 DRM_ERROR("Driver did not support given buffer permissions.\n");
1532 * We're switching command submission mechanism,
1533 * or cannot simply rely on the hardware serializing for us.
1534 * Insert a driver-dependant barrier or wait for buffer idle.
1537 if ((fence_class != bo->fence_class) ||
1538 ((ftype ^ bo->fence_type) & bo->fence_type)) {
1541 if (driver->command_stream_barrier) {
1542 ret = driver->command_stream_barrier(bo,
1547 if (ret && ret != -EAGAIN)
1548 ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1554 bo->new_fence_class = fence_class;
1555 bo->new_fence_type = ftype;
1558 * Check whether we need to move buffer.
1562 if (!drm_bo_mem_compat(&bo->mem)) {
1564 ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1571 * drm_bo_do_validate:
1573 * @bo: the buffer object
1575 * @flags: access rights, mapping parameters and cacheability. See
1576 * the DRM_BO_FLAG_* values in drm.h
1578 * @mask: Which flag values to change; this allows callers to modify
1579 * things without knowing the current state of other flags.
1581 * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1584 * @fence_class: a driver-specific way of doing fences. Presumably,
1585 * this would be used if the driver had more than one submission and
1586 * fencing mechanism. At this point, there isn't any use of this
1587 * from the user mode code.
1589 * @rep: To be stuffed with the reply from validation
1591 * 'validate' a buffer object. This changes where the buffer is
1592 * located, along with changing access modes.
1595 int drm_bo_do_validate(struct drm_buffer_object *bo,
1596 uint64_t flags, uint64_t mask, uint32_t hint,
1597 uint32_t fence_class,
1598 struct drm_bo_info_rep *rep)
1601 int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
1604 mutex_lock(&bo->mutex);
1607 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
1609 ret = drm_bo_prepare_for_validate(bo, flags, mask, hint,
1610 fence_class, no_wait,
1615 } while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
1617 ret = drm_buffer_object_validate(bo,
1619 !(hint & DRM_BO_HINT_DONT_FENCE),
1623 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
1626 drm_bo_fill_rep_arg(bo, rep);
1628 mutex_unlock(&bo->mutex);
1632 EXPORT_SYMBOL(drm_bo_do_validate);
1635 * drm_bo_handle_validate
1637 * @file_priv: the drm file private, used to get a handle to the user context
1639 * @handle: the buffer object handle
1641 * @flags: access rights, mapping parameters and cacheability. See
1642 * the DRM_BO_FLAG_* values in drm.h
1644 * @mask: Which flag values to change; this allows callers to modify
1645 * things without knowing the current state of other flags.
1647 * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1650 * @fence_class: a driver-specific way of doing fences. Presumably,
1651 * this would be used if the driver had more than one submission and
1652 * fencing mechanism. At this point, there isn't any use of this
1653 * from the user mode code.
1655 * @rep: To be stuffed with the reply from validation
1657 * @bp_rep: To be stuffed with the buffer object pointer
1659 * Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead
1660 * of a pointer to a buffer object. Optionally return a pointer to the buffer object.
1661 * This is a convenience wrapper only.
1664 int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
1665 uint64_t flags, uint64_t mask,
1667 uint32_t fence_class,
1668 struct drm_bo_info_rep *rep,
1669 struct drm_buffer_object **bo_rep)
1671 struct drm_device *dev = file_priv->minor->dev;
1672 struct drm_buffer_object *bo;
1675 mutex_lock(&dev->struct_mutex);
1676 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1677 mutex_unlock(&dev->struct_mutex);
1682 if (bo->base.owner != file_priv)
1683 mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1685 ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
1690 drm_bo_usage_deref_unlocked(&bo);
1694 EXPORT_SYMBOL(drm_bo_handle_validate);
1697 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1698 struct drm_bo_info_rep *rep)
1700 struct drm_device *dev = file_priv->minor->dev;
1701 struct drm_buffer_object *bo;
1703 mutex_lock(&dev->struct_mutex);
1704 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1705 mutex_unlock(&dev->struct_mutex);
1710 mutex_lock(&bo->mutex);
1713 * FIXME: Quick busy here?
1717 drm_bo_fill_rep_arg(bo, rep);
1718 mutex_unlock(&bo->mutex);
1719 drm_bo_usage_deref_unlocked(&bo);
1723 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1725 struct drm_bo_info_rep *rep)
1727 struct drm_device *dev = file_priv->minor->dev;
1728 struct drm_buffer_object *bo;
1729 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1732 mutex_lock(&dev->struct_mutex);
1733 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1734 mutex_unlock(&dev->struct_mutex);
1739 mutex_lock(&bo->mutex);
1740 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1);
1744 drm_bo_fill_rep_arg(bo, rep);
1746 mutex_unlock(&bo->mutex);
1747 drm_bo_usage_deref_unlocked(&bo);
1751 int drm_buffer_object_create(struct drm_device *dev,
1753 enum drm_bo_type type,
1756 uint32_t page_alignment,
1757 unsigned long buffer_start,
1758 struct drm_buffer_object **buf_obj)
1760 struct drm_buffer_manager *bm = &dev->bm;
1761 struct drm_buffer_object *bo;
1763 unsigned long num_pages;
1765 size += buffer_start & ~PAGE_MASK;
1766 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1767 if (num_pages == 0) {
1768 DRM_ERROR("Illegal buffer object size %ld.\n", size);
1772 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1777 mutex_init(&bo->mutex);
1778 mutex_lock(&bo->mutex);
1780 atomic_set(&bo->usage, 1);
1781 atomic_set(&bo->mapped, 0);
1782 DRM_INIT_WAITQUEUE(&bo->event_queue);
1783 INIT_LIST_HEAD(&bo->lru);
1784 INIT_LIST_HEAD(&bo->pinned_lru);
1785 INIT_LIST_HEAD(&bo->ddestroy);
1786 #ifdef DRM_ODD_MM_COMPAT
1787 INIT_LIST_HEAD(&bo->p_mm_list);
1788 INIT_LIST_HEAD(&bo->vma_list);
1792 bo->num_pages = num_pages;
1793 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1794 bo->mem.num_pages = bo->num_pages;
1795 bo->mem.mm_node = NULL;
1796 bo->mem.page_alignment = page_alignment;
1797 bo->buffer_start = buffer_start & PAGE_MASK;
1799 bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1800 DRM_BO_FLAG_MAPPABLE);
1801 bo->mem.proposed_flags = 0;
1802 atomic_inc(&bm->count);
1804 * Use drm_bo_modify_proposed_flags to error-check the proposed flags
1806 ret = drm_bo_modify_proposed_flags (bo, flags, flags);
1811 * For drm_bo_type_device buffers, allocate
1812 * address space from the device so that applications
1813 * can mmap the buffer from there
1815 if (bo->type == drm_bo_type_device) {
1816 mutex_lock(&dev->struct_mutex);
1817 ret = drm_bo_setup_vm_locked(bo);
1818 mutex_unlock(&dev->struct_mutex);
1823 mutex_unlock(&bo->mutex);
1824 ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
1827 goto out_err_unlocked;
1833 mutex_unlock(&bo->mutex);
1835 drm_bo_usage_deref_unlocked(&bo);
1838 EXPORT_SYMBOL(drm_buffer_object_create);
1841 static int drm_bo_add_user_object(struct drm_file *file_priv,
1842 struct drm_buffer_object *bo, int shareable)
1844 struct drm_device *dev = file_priv->minor->dev;
1847 mutex_lock(&dev->struct_mutex);
1848 ret = drm_add_user_object(file_priv, &bo->base, shareable);
1852 bo->base.remove = drm_bo_base_deref_locked;
1853 bo->base.type = drm_buffer_type;
1854 bo->base.ref_struct_locked = NULL;
1855 bo->base.unref = drm_buffer_user_object_unmap;
1858 mutex_unlock(&dev->struct_mutex);
1862 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1864 struct drm_bo_create_arg *arg = data;
1865 struct drm_bo_create_req *req = &arg->d.req;
1866 struct drm_bo_info_rep *rep = &arg->d.rep;
1867 struct drm_buffer_object *entry;
1868 enum drm_bo_type bo_type;
1871 DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
1872 (int)(req->size / 1024), req->page_alignment * 4);
1874 if (!dev->bm.initialized) {
1875 DRM_ERROR("Buffer object manager is not initialized.\n");
1880 * If the buffer creation request comes in with a starting address,
1881 * that points at the desired user pages to map. Otherwise, create
1882 * a drm_bo_type_device buffer, which uses pages allocated from the kernel
1884 bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
1887 * User buffers cannot be shared
1889 if (bo_type == drm_bo_type_user)
1890 req->flags &= ~DRM_BO_FLAG_SHAREABLE;
1892 ret = drm_buffer_object_create(file_priv->minor->dev,
1893 req->size, bo_type, req->flags,
1894 req->hint, req->page_alignment,
1895 req->buffer_start, &entry);
1899 ret = drm_bo_add_user_object(file_priv, entry,
1900 req->flags & DRM_BO_FLAG_SHAREABLE);
1902 drm_bo_usage_deref_unlocked(&entry);
1906 mutex_lock(&entry->mutex);
1907 drm_bo_fill_rep_arg(entry, rep);
1908 mutex_unlock(&entry->mutex);
1914 int drm_bo_setstatus_ioctl(struct drm_device *dev,
1915 void *data, struct drm_file *file_priv)
1917 struct drm_bo_map_wait_idle_arg *arg = data;
1918 struct drm_bo_info_req *req = &arg->d.req;
1919 struct drm_bo_info_rep *rep = &arg->d.rep;
1920 struct drm_buffer_object *bo;
1923 if (!dev->bm.initialized) {
1924 DRM_ERROR("Buffer object manager is not initialized.\n");
1928 ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
1932 mutex_lock(&dev->struct_mutex);
1933 bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
1934 mutex_unlock(&dev->struct_mutex);
1939 if (bo->base.owner != file_priv)
1940 req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1942 ret = drm_bo_do_validate(bo, req->flags, req->mask,
1943 req->hint | DRM_BO_HINT_DONT_FENCE,
1944 bo->fence_class, rep);
1946 drm_bo_usage_deref_unlocked(&bo);
1948 (void) drm_bo_read_unlock(&dev->bm.bm_lock);
1953 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1955 struct drm_bo_map_wait_idle_arg *arg = data;
1956 struct drm_bo_info_req *req = &arg->d.req;
1957 struct drm_bo_info_rep *rep = &arg->d.rep;
1959 if (!dev->bm.initialized) {
1960 DRM_ERROR("Buffer object manager is not initialized.\n");
1964 ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1972 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1974 struct drm_bo_handle_arg *arg = data;
1976 if (!dev->bm.initialized) {
1977 DRM_ERROR("Buffer object manager is not initialized.\n");
1981 ret = drm_buffer_object_unmap(file_priv, arg->handle);
1986 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1988 struct drm_bo_reference_info_arg *arg = data;
1989 struct drm_bo_handle_arg *req = &arg->d.req;
1990 struct drm_bo_info_rep *rep = &arg->d.rep;
1991 struct drm_user_object *uo;
1994 if (!dev->bm.initialized) {
1995 DRM_ERROR("Buffer object manager is not initialized.\n");
1999 ret = drm_user_object_ref(file_priv, req->handle,
2000 drm_buffer_type, &uo);
2004 ret = drm_bo_handle_info(file_priv, req->handle, rep);
2011 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2013 struct drm_bo_handle_arg *arg = data;
2016 if (!dev->bm.initialized) {
2017 DRM_ERROR("Buffer object manager is not initialized.\n");
2021 ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
2025 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2027 struct drm_bo_reference_info_arg *arg = data;
2028 struct drm_bo_handle_arg *req = &arg->d.req;
2029 struct drm_bo_info_rep *rep = &arg->d.rep;
2032 if (!dev->bm.initialized) {
2033 DRM_ERROR("Buffer object manager is not initialized.\n");
2037 ret = drm_bo_handle_info(file_priv, req->handle, rep);
2044 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2046 struct drm_bo_map_wait_idle_arg *arg = data;
2047 struct drm_bo_info_req *req = &arg->d.req;
2048 struct drm_bo_info_rep *rep = &arg->d.rep;
2050 if (!dev->bm.initialized) {
2051 DRM_ERROR("Buffer object manager is not initialized.\n");
2055 ret = drm_bo_handle_wait(file_priv, req->handle,
2063 static int drm_bo_leave_list(struct drm_buffer_object *bo,
2068 struct drm_device *dev = bo->dev;
2071 mutex_lock(&bo->mutex);
2073 ret = drm_bo_expire_fence(bo, allow_errors);
2078 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2079 mutex_lock(&dev->struct_mutex);
2080 list_del_init(&bo->pinned_lru);
2081 if (bo->pinned_node == bo->mem.mm_node)
2082 bo->pinned_node = NULL;
2083 if (bo->pinned_node != NULL) {
2084 drm_mm_put_block(bo->pinned_node);
2085 bo->pinned_node = NULL;
2087 mutex_unlock(&dev->struct_mutex);
2090 if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2091 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2092 "cleanup. Removing flag and evicting.\n");
2093 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2094 bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
2097 if (bo->mem.mem_type == mem_type)
2098 ret = drm_bo_evict(bo, mem_type, 0);
2105 DRM_ERROR("Cleanup eviction failed\n");
2110 mutex_unlock(&bo->mutex);
2115 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2119 return list_entry(list, struct drm_buffer_object, pinned_lru);
2121 return list_entry(list, struct drm_buffer_object, lru);
2125 * dev->struct_mutex locked.
2128 static int drm_bo_force_list_clean(struct drm_device *dev,
2129 struct list_head *head,
2135 struct list_head *list, *next, *prev;
2136 struct drm_buffer_object *entry, *nentry;
2141 * The list traversal is a bit odd here, because an item may
2142 * disappear from the list when we release the struct_mutex or
2143 * when we decrease the usage count. Also we're not guaranteed
2144 * to drain pinned lists, so we can't always restart.
2149 list_for_each_safe(list, next, head) {
2152 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2153 atomic_inc(&entry->usage);
2155 atomic_dec(&nentry->usage);
2160 * Protect the next item from destruction, so we can check
2161 * its list pointers later on.
2165 nentry = drm_bo_entry(next, pinned_list);
2166 atomic_inc(&nentry->usage);
2168 mutex_unlock(&dev->struct_mutex);
2170 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2172 mutex_lock(&dev->struct_mutex);
2174 drm_bo_usage_deref_locked(&entry);
2179 * Has the next item disappeared from the list?
2182 do_restart = ((next->prev != list) && (next->prev != prev));
2184 if (nentry != NULL && do_restart)
2185 drm_bo_usage_deref_locked(&nentry);
2193 int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
2195 struct drm_buffer_manager *bm = &dev->bm;
2196 struct drm_mem_type_manager *man = &bm->man[mem_type];
2199 if (mem_type >= DRM_BO_MEM_TYPES) {
2200 DRM_ERROR("Illegal memory type %d\n", mem_type);
2204 if (!man->has_type) {
2205 DRM_ERROR("Trying to take down uninitialized "
2206 "memory manager type %u\n", mem_type);
2210 if ((man->kern_init_type) && (kern_clean == 0)) {
2211 DRM_ERROR("Trying to take down kernel initialized "
2212 "memory manager type %u\n", mem_type);
2221 BUG_ON(!list_empty(&bm->unfenced));
2222 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2223 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2225 if (drm_mm_clean(&man->manager)) {
2226 drm_mm_takedown(&man->manager);
2234 EXPORT_SYMBOL(drm_bo_clean_mm);
2237 *Evict all buffers of a particular mem_type, but leave memory manager
2238 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2239 *point since we have the hardware lock.
2242 static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
2245 struct drm_buffer_manager *bm = &dev->bm;
2246 struct drm_mem_type_manager *man = &bm->man[mem_type];
2248 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2249 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2253 if (!man->has_type) {
2254 DRM_ERROR("Memory type %u has not been initialized.\n",
2259 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2262 ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2267 int drm_bo_init_mm(struct drm_device *dev, unsigned type,
2268 unsigned long p_offset, unsigned long p_size,
2271 struct drm_buffer_manager *bm = &dev->bm;
2273 struct drm_mem_type_manager *man;
2275 if (type >= DRM_BO_MEM_TYPES) {
2276 DRM_ERROR("Illegal memory type %d\n", type);
2280 man = &bm->man[type];
2281 if (man->has_type) {
2282 DRM_ERROR("Memory manager already initialized for type %d\n",
2287 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2292 if (type != DRM_BO_MEM_LOCAL) {
2294 DRM_ERROR("Zero size memory manager type %d\n", type);
2297 ret = drm_mm_init(&man->manager, p_offset, p_size);
2303 man->kern_init_type = kern_init;
2306 INIT_LIST_HEAD(&man->lru);
2307 INIT_LIST_HEAD(&man->pinned);
2311 EXPORT_SYMBOL(drm_bo_init_mm);
2314 * This function is intended to be called on drm driver unload.
2315 * If you decide to call it from lastclose, you must protect the call
2316 * from a potentially racing drm_bo_driver_init in firstopen.
2317 * (This may happen on X server restart).
2320 int drm_bo_driver_finish(struct drm_device *dev)
2322 struct drm_buffer_manager *bm = &dev->bm;
2324 unsigned i = DRM_BO_MEM_TYPES;
2325 struct drm_mem_type_manager *man;
2327 mutex_lock(&dev->struct_mutex);
2329 if (!bm->initialized)
2331 bm->initialized = 0;
2335 if (man->has_type) {
2337 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) {
2339 DRM_ERROR("DRM memory manager type %d "
2340 "is not clean.\n", i);
2345 mutex_unlock(&dev->struct_mutex);
2347 if (!cancel_delayed_work(&bm->wq))
2348 flush_scheduled_work();
2350 mutex_lock(&dev->struct_mutex);
2351 drm_bo_delayed_delete(dev, 1);
2352 if (list_empty(&bm->ddestroy))
2353 DRM_DEBUG("Delayed destroy list was clean\n");
2355 if (list_empty(&bm->man[0].lru))
2356 DRM_DEBUG("Swap list was clean\n");
2358 if (list_empty(&bm->man[0].pinned))
2359 DRM_DEBUG("NO_MOVE list was clean\n");
2361 if (list_empty(&bm->unfenced))
2362 DRM_DEBUG("Unfenced list was clean\n");
2364 if (bm->dummy_read_page) {
2365 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2366 ClearPageReserved(bm->dummy_read_page);
2368 __free_page(bm->dummy_read_page);
2372 mutex_unlock(&dev->struct_mutex);
2375 EXPORT_SYMBOL(drm_bo_driver_finish);
2378 * This function is intended to be called on drm driver load.
2379 * If you decide to call it from firstopen, you must protect the call
2380 * from a potentially racing drm_bo_driver_finish in lastclose.
2381 * (This may happen on X server restart).
2384 int drm_bo_driver_init(struct drm_device *dev)
2386 struct drm_bo_driver *driver = dev->driver->bo_driver;
2387 struct drm_buffer_manager *bm = &dev->bm;
2390 bm->dummy_read_page = NULL;
2391 drm_bo_init_lock(&bm->bm_lock);
2392 mutex_lock(&dev->struct_mutex);
2396 bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
2397 if (!bm->dummy_read_page) {
2402 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2403 SetPageReserved(bm->dummy_read_page);
2407 * Initialize the system memory buffer type.
2408 * Other types need to be driver / IOCTL initialized.
2410 ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1);
2412 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2413 ClearPageReserved(bm->dummy_read_page);
2415 __free_page(bm->dummy_read_page);
2416 bm->dummy_read_page = NULL;
2420 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2421 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2423 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2425 bm->initialized = 1;
2427 atomic_set(&bm->count, 0);
2429 INIT_LIST_HEAD(&bm->unfenced);
2430 INIT_LIST_HEAD(&bm->ddestroy);
2432 mutex_unlock(&dev->struct_mutex);
2435 EXPORT_SYMBOL(drm_bo_driver_init);
2437 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2439 struct drm_mm_init_arg *arg = data;
2440 struct drm_buffer_manager *bm = &dev->bm;
2441 struct drm_bo_driver *driver = dev->driver->bo_driver;
2445 DRM_ERROR("Buffer objects are not supported by this driver\n");
2449 ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
2454 if (arg->magic != DRM_BO_INIT_MAGIC) {
2455 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2456 "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2459 if (arg->major != DRM_BO_INIT_MAJOR) {
2460 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2461 "\tversion don't match. Got %d, expected %d.\n",
2462 arg->major, DRM_BO_INIT_MAJOR);
2466 mutex_lock(&dev->struct_mutex);
2467 if (!bm->initialized) {
2468 DRM_ERROR("DRM memory manager was not initialized.\n");
2471 if (arg->mem_type == 0) {
2472 DRM_ERROR("System memory buffers already initialized.\n");
2475 ret = drm_bo_init_mm(dev, arg->mem_type,
2476 arg->p_offset, arg->p_size, 0);
2479 mutex_unlock(&dev->struct_mutex);
2480 (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2488 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2490 struct drm_mm_type_arg *arg = data;
2491 struct drm_buffer_manager *bm = &dev->bm;
2492 struct drm_bo_driver *driver = dev->driver->bo_driver;
2496 DRM_ERROR("Buffer objects are not supported by this driver\n");
2500 ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv);
2504 mutex_lock(&dev->struct_mutex);
2506 if (!bm->initialized) {
2507 DRM_ERROR("DRM memory manager was not initialized\n");
2510 if (arg->mem_type == 0) {
2511 DRM_ERROR("No takedown for System memory buffers.\n");
2515 if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) {
2517 DRM_ERROR("Memory manager type %d not clean. "
2518 "Delaying takedown\n", arg->mem_type);
2522 mutex_unlock(&dev->struct_mutex);
2523 (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2531 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2533 struct drm_mm_type_arg *arg = data;
2534 struct drm_bo_driver *driver = dev->driver->bo_driver;
2538 DRM_ERROR("Buffer objects are not supported by this driver\n");
2542 if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
2543 DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
2547 if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2548 ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
2553 mutex_lock(&dev->struct_mutex);
2554 ret = drm_bo_lock_mm(dev, arg->mem_type);
2555 mutex_unlock(&dev->struct_mutex);
2557 (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2564 int drm_mm_unlock_ioctl(struct drm_device *dev,
2566 struct drm_file *file_priv)
2568 struct drm_mm_type_arg *arg = data;
2569 struct drm_bo_driver *driver = dev->driver->bo_driver;
2573 DRM_ERROR("Buffer objects are not supported by this driver\n");
2577 if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2578 ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2586 int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2588 struct drm_mm_info_arg *arg = data;
2589 struct drm_buffer_manager *bm = &dev->bm;
2590 struct drm_bo_driver *driver = dev->driver->bo_driver;
2591 struct drm_mem_type_manager *man;
2593 int mem_type = arg->mem_type;
2596 DRM_ERROR("Buffer objects are not supported by this driver\n");
2600 if (mem_type >= DRM_BO_MEM_TYPES) {
2601 DRM_ERROR("Illegal memory type %d\n", arg->mem_type);
2605 mutex_lock(&dev->struct_mutex);
2606 if (!bm->initialized) {
2607 DRM_ERROR("DRM memory manager was not initialized\n");
2613 man = &bm->man[arg->mem_type];
2615 arg->p_size = man->size;
2618 mutex_unlock(&dev->struct_mutex);
2623 * buffer object vm functions.
2626 int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
2628 struct drm_buffer_manager *bm = &dev->bm;
2629 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2631 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2632 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2635 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2638 if (mem->flags & DRM_BO_FLAG_CACHED)
2643 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2646 * \c Get the PCI offset for the buffer object memory.
2648 * \param bo The buffer object.
2649 * \param bus_base On return the base of the PCI region
2650 * \param bus_offset On return the byte offset into the PCI region
2651 * \param bus_size On return the byte size of the buffer object or zero if
2652 * the buffer object memory is not accessible through a PCI region.
2653 * \return Failure indication.
2655 * Returns -EINVAL if the buffer object is currently not mappable.
2656 * Otherwise returns zero.
2659 int drm_bo_pci_offset(struct drm_device *dev,
2660 struct drm_bo_mem_reg *mem,
2661 unsigned long *bus_base,
2662 unsigned long *bus_offset, unsigned long *bus_size)
2664 struct drm_buffer_manager *bm = &dev->bm;
2665 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2668 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2671 if (drm_mem_reg_is_pci(dev, mem)) {
2672 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2673 *bus_size = mem->num_pages << PAGE_SHIFT;
2674 *bus_base = man->io_offset;
2681 * \c Kill all user-space virtual mappings of this buffer object.
2683 * \param bo The buffer object.
2685 * Call bo->mutex locked.
2688 void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
2690 struct drm_device *dev = bo->dev;
2691 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2692 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2694 if (!dev->dev_mapping)
2697 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2701 * drm_bo_takedown_vm_locked:
2703 * @bo: the buffer object to remove any drm device mapping
2705 * Remove any associated vm mapping on the drm device node that
2706 * would have been created for a drm_bo_type_device buffer
2708 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
2710 struct drm_map_list *list;
2711 drm_local_map_t *map;
2712 struct drm_device *dev = bo->dev;
2714 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2715 if (bo->type != drm_bo_type_device)
2718 list = &bo->map_list;
2719 if (list->user_token) {
2720 drm_ht_remove_item(&dev->map_hash, &list->hash);
2721 list->user_token = 0;
2723 if (list->file_offset_node) {
2724 drm_mm_put_block(list->file_offset_node);
2725 list->file_offset_node = NULL;
2732 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2734 list->user_token = 0ULL;
2735 drm_bo_usage_deref_locked(&bo);
2739 * drm_bo_setup_vm_locked:
2741 * @bo: the buffer to allocate address space for
2743 * Allocate address space in the drm device so that applications
2744 * can mmap the buffer and access the contents. This only
2745 * applies to drm_bo_type_device objects as others are not
2746 * placed in the drm device address space.
2748 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
2750 struct drm_map_list *list = &bo->map_list;
2751 drm_local_map_t *map;
2752 struct drm_device *dev = bo->dev;
2754 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2755 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2761 map->type = _DRM_TTM;
2762 map->flags = _DRM_REMOVABLE;
2763 map->size = bo->mem.num_pages * PAGE_SIZE;
2764 atomic_inc(&bo->usage);
2765 map->handle = (void *)bo;
2767 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2768 bo->mem.num_pages, 0, 0);
2770 if (unlikely(!list->file_offset_node)) {
2771 drm_bo_takedown_vm_locked(bo);
2775 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2776 bo->mem.num_pages, 0);
2778 if (unlikely(!list->file_offset_node)) {
2779 drm_bo_takedown_vm_locked(bo);
2783 list->hash.key = list->file_offset_node->start;
2784 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2785 drm_bo_takedown_vm_locked(bo);
2789 list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2794 int drm_bo_version_ioctl(struct drm_device *dev, void *data,
2795 struct drm_file *file_priv)
2797 struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
2799 arg->major = DRM_BO_INIT_MAJOR;
2800 arg->minor = DRM_BO_INIT_MINOR;
2801 arg->patchlevel = DRM_BO_INIT_PATCH;