1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44 * both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47 * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48 * traversal will, in general, need to be restarted.
52 static void drm_bo_destroy_locked(struct drm_buffer_object * bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object * bo);
57 static inline uint32_t drm_bo_type_flags(unsigned type)
59 return (1 << (24 + type));
63 * bo locked. dev->struct_mutex locked.
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
68 struct drm_mem_type_manager *man;
70 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71 DRM_ASSERT_LOCKED(&bo->mutex);
73 man = &bo->dev->bm.man[bo->pinned_mem_type];
74 list_add_tail(&bo->pinned_lru, &man->pinned);
77 void drm_bo_add_to_lru(struct drm_buffer_object * bo)
79 struct drm_mem_type_manager *man;
81 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
83 if (!bo->pinned || bo->mem.mem_type != bo->pinned_mem_type) {
84 man = &bo->dev->bm.man[bo->mem.mem_type];
85 list_add_tail(&bo->lru, &man->lru);
87 INIT_LIST_HEAD(&bo->lru);
91 static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
93 #ifdef DRM_ODD_MM_COMPAT
96 if (!bo->map_list.map)
99 ret = drm_bo_lock_kmm(bo);
102 drm_bo_unmap_virtual(bo);
104 drm_bo_finish_unmap(bo);
106 if (!bo->map_list.map)
109 drm_bo_unmap_virtual(bo);
114 static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
116 #ifdef DRM_ODD_MM_COMPAT
119 if (!bo->map_list.map)
122 ret = drm_bo_remap_bound(bo);
124 DRM_ERROR("Failed to remap a bound buffer object.\n"
125 "\tThis might cause a sigbus later.\n");
127 drm_bo_unlock_kmm(bo);
132 * Call bo->mutex locked.
135 static int drm_bo_add_ttm(struct drm_buffer_object * bo)
137 struct drm_device *dev = bo->dev;
141 DRM_ASSERT_LOCKED(&bo->mutex);
145 case drm_bo_type_kernel:
146 bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
150 case drm_bo_type_user:
151 case drm_bo_type_fake:
154 DRM_ERROR("Illegal buffer object type\n");
162 static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
163 struct drm_bo_mem_reg * mem,
164 int evict, int no_wait)
166 struct drm_device *dev = bo->dev;
167 struct drm_buffer_manager *bm = &dev->bm;
168 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
169 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
170 struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
171 struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
174 if (old_is_pci || new_is_pci ||
175 ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
176 ret = drm_bo_vm_pre_move(bo, old_is_pci);
181 * Create and bind a ttm if required.
184 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
185 ret = drm_bo_add_ttm(bo);
189 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
190 ret = drm_bind_ttm(bo->ttm, mem);
196 if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
198 struct drm_bo_mem_reg *old_mem = &bo->mem;
199 uint64_t save_flags = old_mem->flags;
200 uint64_t save_mask = old_mem->mask;
204 old_mem->mask = save_mask;
205 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
207 } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
208 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
210 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
212 } else if (dev->driver->bo_driver->move) {
213 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
217 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
224 if (old_is_pci || new_is_pci)
225 drm_bo_vm_post_move(bo);
227 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
229 dev->driver->bo_driver->invalidate_caches(dev,
232 DRM_ERROR("Can not flush read caches\n");
235 DRM_FLAG_MASKED(bo->priv_flags,
236 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
237 _DRM_BO_FLAG_EVICTED);
240 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
241 bm->man[bo->mem.mem_type].gpu_offset;
247 if (old_is_pci || new_is_pci)
248 drm_bo_vm_post_move(bo);
250 new_man = &bm->man[bo->mem.mem_type];
251 if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
252 drm_ttm_unbind(bo->ttm);
253 drm_destroy_ttm(bo->ttm);
261 * Call bo->mutex locked.
262 * Wait until the buffer is idle.
265 int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
270 DRM_ASSERT_LOCKED(&bo->mutex);
273 if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
274 drm_fence_usage_deref_unlocked(&bo->fence);
281 drm_fence_object_wait(bo->fence, lazy, ignore_signals,
286 drm_fence_usage_deref_unlocked(&bo->fence);
290 EXPORT_SYMBOL(drm_bo_wait);
292 static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
294 struct drm_device *dev = bo->dev;
295 struct drm_buffer_manager *bm = &dev->bm;
299 unsigned long _end = jiffies + 3 * DRM_HZ;
302 ret = drm_bo_wait(bo, 0, 1, 0);
303 if (ret && allow_errors)
306 } while (ret && !time_after_eq(jiffies, _end));
310 DRM_ERROR("Detected GPU lockup or "
311 "fence driver was taken down. "
312 "Evicting buffer.\n");
316 drm_fence_usage_deref_unlocked(&bo->fence);
322 * Call dev->struct_mutex locked.
323 * Attempts to remove all private references to a buffer by expiring its
324 * fence object and removing from lru lists and memory managers.
327 static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
329 struct drm_device *dev = bo->dev;
330 struct drm_buffer_manager *bm = &dev->bm;
332 DRM_ASSERT_LOCKED(&dev->struct_mutex);
334 atomic_inc(&bo->usage);
335 mutex_unlock(&dev->struct_mutex);
336 mutex_lock(&bo->mutex);
338 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
340 if (bo->fence && drm_fence_object_signaled(bo->fence,
342 drm_fence_usage_deref_unlocked(&bo->fence);
344 if (bo->fence && remove_all)
345 (void)drm_bo_expire_fence(bo, 0);
347 mutex_lock(&dev->struct_mutex);
349 if (!atomic_dec_and_test(&bo->usage)) {
354 list_del_init(&bo->lru);
355 if (bo->mem.mm_node) {
356 drm_mm_put_block(bo->mem.mm_node);
357 if (bo->pinned_node == bo->mem.mm_node)
358 bo->pinned_node = NULL;
359 bo->mem.mm_node = NULL;
361 list_del_init(&bo->pinned_lru);
362 if (bo->pinned_node) {
363 drm_mm_put_block(bo->pinned_node);
364 bo->pinned_node = NULL;
366 list_del_init(&bo->ddestroy);
367 mutex_unlock(&bo->mutex);
368 drm_bo_destroy_locked(bo);
372 if (list_empty(&bo->ddestroy)) {
373 drm_fence_object_flush(bo->fence, bo->fence_type);
374 list_add_tail(&bo->ddestroy, &bm->ddestroy);
375 schedule_delayed_work(&bm->wq,
376 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
380 mutex_unlock(&bo->mutex);
385 * Verify that refcount is 0 and that there are no internal references
386 * to the buffer object. Then destroy it.
389 static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
391 struct drm_device *dev = bo->dev;
392 struct drm_buffer_manager *bm = &dev->bm;
394 DRM_ASSERT_LOCKED(&dev->struct_mutex);
396 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
397 list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
398 list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
399 if (bo->fence != NULL) {
400 DRM_ERROR("Fence was non-zero.\n");
401 drm_bo_cleanup_refs(bo, 0);
405 #ifdef DRM_ODD_MM_COMPAT
406 BUG_ON(!list_empty(&bo->vma_list));
407 BUG_ON(!list_empty(&bo->p_mm_list));
411 drm_ttm_unbind(bo->ttm);
412 drm_destroy_ttm(bo->ttm);
416 atomic_dec(&bm->count);
418 // BUG_ON(!list_empty(&bo->base.list));
419 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
425 * Some stuff is still trying to reference the buffer object.
426 * Get rid of those references.
429 drm_bo_cleanup_refs(bo, 0);
435 * Call dev->struct_mutex locked.
438 static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
440 struct drm_buffer_manager *bm = &dev->bm;
442 struct drm_buffer_object *entry, *nentry;
443 struct list_head *list, *next;
445 list_for_each_safe(list, next, &bm->ddestroy) {
446 entry = list_entry(list, struct drm_buffer_object, ddestroy);
449 if (next != &bm->ddestroy) {
450 nentry = list_entry(next, struct drm_buffer_object,
452 atomic_inc(&nentry->usage);
455 drm_bo_cleanup_refs(entry, remove_all);
458 atomic_dec(&nentry->usage);
463 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
464 static void drm_bo_delayed_workqueue(void *data)
466 static void drm_bo_delayed_workqueue(struct work_struct *work)
469 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
470 struct drm_device *dev = (struct drm_device *) data;
471 struct drm_buffer_manager *bm = &dev->bm;
473 struct drm_buffer_manager *bm =
474 container_of(work, struct drm_buffer_manager, wq.work);
475 struct drm_device *dev = container_of(bm, struct drm_device, bm);
478 DRM_DEBUG("Delayed delete Worker\n");
480 mutex_lock(&dev->struct_mutex);
481 if (!bm->initialized) {
482 mutex_unlock(&dev->struct_mutex);
485 drm_bo_delayed_delete(dev, 0);
486 if (bm->initialized && !list_empty(&bm->ddestroy)) {
487 schedule_delayed_work(&bm->wq,
488 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
490 mutex_unlock(&dev->struct_mutex);
493 void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
495 struct drm_buffer_object *tmp_bo = *bo;
498 DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
500 if (atomic_dec_and_test(&tmp_bo->usage)) {
501 drm_bo_destroy_locked(tmp_bo);
504 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
506 static void drm_bo_base_deref_locked(struct drm_file * file_priv,
507 struct drm_user_object * uo)
509 struct drm_buffer_object *bo =
510 drm_user_object_entry(uo, struct drm_buffer_object, base);
512 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
514 drm_bo_takedown_vm_locked(bo);
515 drm_bo_usage_deref_locked(&bo);
518 void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
520 struct drm_buffer_object *tmp_bo = *bo;
521 struct drm_device *dev = tmp_bo->dev;
524 if (atomic_dec_and_test(&tmp_bo->usage)) {
525 mutex_lock(&dev->struct_mutex);
526 if (atomic_read(&tmp_bo->usage) == 0)
527 drm_bo_destroy_locked(tmp_bo);
528 mutex_unlock(&dev->struct_mutex);
531 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
533 void drm_putback_buffer_objects(struct drm_device *dev)
535 struct drm_buffer_manager *bm = &dev->bm;
536 struct list_head *list = &bm->unfenced;
537 struct drm_buffer_object *entry, *next;
539 mutex_lock(&dev->struct_mutex);
540 list_for_each_entry_safe(entry, next, list, lru) {
541 atomic_inc(&entry->usage);
542 mutex_unlock(&dev->struct_mutex);
544 mutex_lock(&entry->mutex);
545 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
546 mutex_lock(&dev->struct_mutex);
548 list_del_init(&entry->lru);
549 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
550 DRM_WAKEUP(&entry->event_queue);
553 * FIXME: Might want to put back on head of list
554 * instead of tail here.
557 drm_bo_add_to_lru(entry);
558 mutex_unlock(&entry->mutex);
559 drm_bo_usage_deref_locked(&entry);
561 mutex_unlock(&dev->struct_mutex);
563 EXPORT_SYMBOL(drm_putback_buffer_objects);
567 * Note. The caller has to register (if applicable)
568 * and deregister fence object usage.
571 int drm_fence_buffer_objects(struct drm_device *dev,
572 struct list_head *list,
573 uint32_t fence_flags,
574 struct drm_fence_object * fence,
575 struct drm_fence_object ** used_fence)
577 struct drm_buffer_manager *bm = &dev->bm;
578 struct drm_buffer_object *entry;
579 uint32_t fence_type = 0;
580 uint32_t fence_class = ~0;
585 mutex_lock(&dev->struct_mutex);
588 list = &bm->unfenced;
591 fence_class = fence->class;
593 list_for_each_entry(entry, list, lru) {
594 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
595 fence_type |= entry->new_fence_type;
596 if (fence_class == ~0)
597 fence_class = entry->new_fence_class;
598 else if (entry->new_fence_class != fence_class) {
599 DRM_ERROR("Unmatching fence classes on unfenced list: "
602 entry->new_fence_class);
615 if ((fence_type & fence->type) != fence_type) {
616 DRM_ERROR("Given fence doesn't match buffers "
617 "on unfenced list.\n");
622 mutex_unlock(&dev->struct_mutex);
623 ret = drm_fence_object_create(dev, fence_class, fence_type,
624 fence_flags | DRM_FENCE_FLAG_EMIT,
626 mutex_lock(&dev->struct_mutex);
635 entry = list_entry(l, struct drm_buffer_object, lru);
636 atomic_inc(&entry->usage);
637 mutex_unlock(&dev->struct_mutex);
638 mutex_lock(&entry->mutex);
639 mutex_lock(&dev->struct_mutex);
641 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
644 drm_fence_usage_deref_locked(&entry->fence);
645 entry->fence = drm_fence_reference_locked(fence);
646 entry->fence_class = entry->new_fence_class;
647 entry->fence_type = entry->new_fence_type;
648 DRM_FLAG_MASKED(entry->priv_flags, 0,
649 _DRM_BO_FLAG_UNFENCED);
650 DRM_WAKEUP(&entry->event_queue);
651 drm_bo_add_to_lru(entry);
653 mutex_unlock(&entry->mutex);
654 drm_bo_usage_deref_locked(&entry);
657 DRM_DEBUG("Fenced %d buffers\n", count);
659 mutex_unlock(&dev->struct_mutex);
663 EXPORT_SYMBOL(drm_fence_buffer_objects);
669 static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
673 struct drm_device *dev = bo->dev;
674 struct drm_bo_mem_reg evict_mem;
677 * Someone might have modified the buffer before we took the buffer mutex.
680 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
682 if (bo->mem.mem_type != mem_type)
685 ret = drm_bo_wait(bo, 0, 0, no_wait);
687 if (ret && ret != -EAGAIN) {
688 DRM_ERROR("Failed to expire fence before "
689 "buffer eviction.\n");
694 evict_mem.mm_node = NULL;
696 if (bo->type == drm_bo_type_fake) {
697 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
698 bo->mem.mm_node = NULL;
703 evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
704 ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
708 DRM_ERROR("Failed to find memory space for "
709 "buffer 0x%p eviction.\n", bo);
713 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
717 DRM_ERROR("Buffer eviction failed\n");
722 mutex_lock(&dev->struct_mutex);
723 if (evict_mem.mm_node) {
724 if (evict_mem.mm_node != bo->pinned_node)
725 drm_mm_put_block(evict_mem.mm_node);
726 evict_mem.mm_node = NULL;
729 drm_bo_add_to_lru(bo);
730 mutex_unlock(&dev->struct_mutex);
732 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
733 _DRM_BO_FLAG_EVICTED);
740 * Repeatedly evict memory from the LRU for @mem_type until we create enough
741 * space, or we've evicted everything and there isn't enough space.
743 static int drm_bo_mem_force_space(struct drm_device * dev,
744 struct drm_bo_mem_reg * mem,
745 uint32_t mem_type, int no_wait)
747 struct drm_mm_node *node;
748 struct drm_buffer_manager *bm = &dev->bm;
749 struct drm_buffer_object *entry;
750 struct drm_mem_type_manager *man = &bm->man[mem_type];
751 struct list_head *lru;
752 unsigned long num_pages = mem->num_pages;
755 mutex_lock(&dev->struct_mutex);
757 node = drm_mm_search_free(&man->manager, num_pages,
758 mem->page_alignment, 1);
763 if (lru->next == lru)
766 entry = list_entry(lru->next, struct drm_buffer_object, lru);
767 atomic_inc(&entry->usage);
768 mutex_unlock(&dev->struct_mutex);
769 mutex_lock(&entry->mutex);
770 BUG_ON(entry->pinned);
772 ret = drm_bo_evict(entry, mem_type, no_wait);
773 mutex_unlock(&entry->mutex);
774 drm_bo_usage_deref_unlocked(&entry);
777 mutex_lock(&dev->struct_mutex);
781 mutex_unlock(&dev->struct_mutex);
785 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
786 mutex_unlock(&dev->struct_mutex);
788 mem->mem_type = mem_type;
792 static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
794 uint32_t mask, uint32_t * res_mask)
796 uint32_t cur_flags = drm_bo_type_flags(mem_type);
799 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
800 cur_flags |= DRM_BO_FLAG_CACHED;
801 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
802 cur_flags |= DRM_BO_FLAG_MAPPABLE;
803 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
804 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
806 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
809 if (mem_type == DRM_BO_MEM_LOCAL) {
810 *res_mask = cur_flags;
814 flag_diff = (mask ^ cur_flags);
815 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
816 (!(mask & DRM_BO_FLAG_CACHED) ||
817 (mask & DRM_BO_FLAG_FORCE_CACHING)))
820 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
821 ((mask & DRM_BO_FLAG_MAPPABLE) ||
822 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
825 *res_mask = cur_flags;
830 * Creates space for memory region @mem according to its type.
832 * This function first searches for free space in compatible memory types in
833 * the priority order defined by the driver. If free space isn't found, then
834 * drm_bo_mem_force_space is attempted in priority order to evict and find
837 int drm_bo_mem_space(struct drm_buffer_object * bo,
838 struct drm_bo_mem_reg * mem, int no_wait)
840 struct drm_device *dev = bo->dev;
841 struct drm_buffer_manager *bm = &dev->bm;
842 struct drm_mem_type_manager *man;
844 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
845 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
847 uint32_t mem_type = DRM_BO_MEM_LOCAL;
852 struct drm_mm_node *node = NULL;
856 for (i = 0; i < num_prios; ++i) {
858 man = &bm->man[mem_type];
860 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
866 if (mem_type == DRM_BO_MEM_LOCAL)
869 if ((mem_type == bo->pinned_mem_type) &&
870 (bo->pinned_node != NULL)) {
871 node = bo->pinned_node;
875 mutex_lock(&dev->struct_mutex);
876 if (man->has_type && man->use_type) {
878 node = drm_mm_search_free(&man->manager, mem->num_pages,
879 mem->page_alignment, 1);
881 node = drm_mm_get_block(node, mem->num_pages,
882 mem->page_alignment);
884 mutex_unlock(&dev->struct_mutex);
889 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
891 mem->mem_type = mem_type;
892 mem->flags = cur_flags;
899 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
900 prios = dev->driver->bo_driver->mem_busy_prio;
902 for (i = 0; i < num_prios; ++i) {
904 man = &bm->man[mem_type];
909 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
912 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
915 mem->flags = cur_flags;
923 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
927 EXPORT_SYMBOL(drm_bo_mem_space);
929 static int drm_bo_new_mask(struct drm_buffer_object * bo,
930 uint64_t new_mask, uint32_t hint)
934 if (bo->type == drm_bo_type_user) {
935 DRM_ERROR("User buffers are not supported yet\n");
939 new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
943 DRM_ERROR("Invalid buffer object rwx properties\n");
947 bo->mem.mask = new_mask;
952 * Call dev->struct_mutex locked.
955 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
956 uint32_t handle, int check_owner)
958 struct drm_user_object *uo;
959 struct drm_buffer_object *bo;
961 uo = drm_lookup_user_object(file_priv, handle);
963 if (!uo || (uo->type != drm_buffer_type)) {
964 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
968 if (check_owner && file_priv != uo->owner) {
969 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
973 bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
974 atomic_inc(&bo->usage);
977 EXPORT_SYMBOL(drm_lookup_buffer_object);
980 * Call bo->mutex locked.
981 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
982 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
985 static int drm_bo_quick_busy(struct drm_buffer_object * bo)
987 struct drm_fence_object *fence = bo->fence;
989 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
991 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
992 drm_fence_usage_deref_unlocked(&bo->fence);
1001 * Call bo->mutex locked.
1002 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1005 static int drm_bo_busy(struct drm_buffer_object * bo)
1007 struct drm_fence_object *fence = bo->fence;
1009 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1011 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
1012 drm_fence_usage_deref_unlocked(&bo->fence);
1015 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
1016 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
1017 drm_fence_usage_deref_unlocked(&bo->fence);
1025 static int drm_bo_read_cached(struct drm_buffer_object * bo)
1029 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1030 if (bo->mem.mm_node)
1031 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1036 * Wait until a buffer is unmapped.
1039 static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
1043 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1046 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1047 atomic_read(&bo->mapped) == -1);
1055 static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
1059 mutex_lock(&bo->mutex);
1060 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1061 mutex_unlock(&bo->mutex);
1066 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1067 * Until then, we cannot really do anything with it except delete it.
1068 * The unfenced list is a PITA, and the operations
1070 * 2) submitting commands
1072 * Should really be an atomic operation.
1073 * We now "solve" this problem by keeping
1074 * the buffer "unfenced" after validating, but before fencing.
1077 static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
1080 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1088 mutex_unlock(&bo->mutex);
1089 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1090 !drm_bo_check_unfenced(bo));
1091 mutex_lock(&bo->mutex);
1094 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1096 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1106 * Fill in the ioctl reply argument with buffer info.
1110 static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
1111 struct drm_bo_info_rep *rep)
1116 rep->handle = bo->base.hash.key;
1117 rep->flags = bo->mem.flags;
1118 rep->size = bo->num_pages * PAGE_SIZE;
1119 rep->offset = bo->offset;
1120 rep->arg_handle = bo->map_list.user_token;
1121 rep->mask = bo->mem.mask;
1122 rep->buffer_start = bo->buffer_start;
1123 rep->fence_flags = bo->fence_type;
1125 rep->page_alignment = bo->mem.page_alignment;
1127 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1128 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1134 * Wait for buffer idle and register that we've mapped the buffer.
1135 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1136 * so that if the client dies, the mapping is automatically
1140 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1141 uint32_t map_flags, unsigned hint,
1142 struct drm_bo_info_rep *rep)
1144 struct drm_buffer_object *bo;
1145 struct drm_device *dev = file_priv->head->dev;
1147 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1149 mutex_lock(&dev->struct_mutex);
1150 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1151 mutex_unlock(&dev->struct_mutex);
1156 mutex_lock(&bo->mutex);
1157 if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1158 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1164 * If this returns true, we are currently unmapped.
1165 * We need to do this test, because unmapping can
1166 * be done without the bo->mutex held.
1170 if (atomic_inc_and_test(&bo->mapped)) {
1171 if (no_wait && drm_bo_busy(bo)) {
1172 atomic_dec(&bo->mapped);
1176 ret = drm_bo_wait(bo, 0, 0, no_wait);
1178 atomic_dec(&bo->mapped);
1182 if ((map_flags & DRM_BO_FLAG_READ) &&
1183 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1184 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1185 drm_bo_read_cached(bo);
1188 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1189 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1190 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1193 * We are already mapped with different flags.
1194 * need to wait for unmap.
1197 ret = drm_bo_wait_unmapped(bo, no_wait);
1206 mutex_lock(&dev->struct_mutex);
1207 ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1208 mutex_unlock(&dev->struct_mutex);
1210 if (atomic_add_negative(-1, &bo->mapped))
1211 DRM_WAKEUP(&bo->event_queue);
1214 drm_bo_fill_rep_arg(bo, rep);
1216 mutex_unlock(&bo->mutex);
1217 drm_bo_usage_deref_unlocked(&bo);
1221 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1223 struct drm_device *dev = file_priv->head->dev;
1224 struct drm_buffer_object *bo;
1225 struct drm_ref_object *ro;
1228 mutex_lock(&dev->struct_mutex);
1230 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1236 ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1242 drm_remove_ref_object(file_priv, ro);
1243 drm_bo_usage_deref_locked(&bo);
1245 mutex_unlock(&dev->struct_mutex);
1250 * Call struct-sem locked.
1253 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1254 struct drm_user_object * uo,
1255 enum drm_ref_type action)
1257 struct drm_buffer_object *bo =
1258 drm_user_object_entry(uo, struct drm_buffer_object, base);
1261 * We DON'T want to take the bo->lock here, because we want to
1262 * hold it when we wait for unmapped buffer.
1265 BUG_ON(action != _DRM_REF_TYPE1);
1267 if (atomic_add_negative(-1, &bo->mapped))
1268 DRM_WAKEUP(&bo->event_queue);
1273 * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1276 int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
1277 int no_wait, int move_unfenced)
1279 struct drm_device *dev = bo->dev;
1280 struct drm_buffer_manager *bm = &dev->bm;
1282 struct drm_bo_mem_reg mem;
1284 * Flush outstanding fences.
1290 * Wait for outstanding fences.
1293 ret = drm_bo_wait(bo, 0, 0, no_wait);
1297 mem.num_pages = bo->num_pages;
1298 mem.size = mem.num_pages << PAGE_SHIFT;
1299 mem.mask = new_mem_flags;
1300 mem.page_alignment = bo->mem.page_alignment;
1302 mutex_lock(&bm->evict_mutex);
1303 mutex_lock(&dev->struct_mutex);
1305 list_add_tail(&bo->lru, &bm->unfenced);
1306 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1307 _DRM_BO_FLAG_UNFENCED);
1308 mutex_unlock(&dev->struct_mutex);
1311 * Determine where to move the buffer.
1313 ret = drm_bo_mem_space(bo, &mem, no_wait);
1317 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1320 if (ret || !move_unfenced) {
1321 mutex_lock(&dev->struct_mutex);
1323 if (mem.mm_node != bo->pinned_node)
1324 drm_mm_put_block(mem.mm_node);
1327 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1328 DRM_WAKEUP(&bo->event_queue);
1330 drm_bo_add_to_lru(bo);
1331 mutex_unlock(&dev->struct_mutex);
1334 mutex_unlock(&bm->evict_mutex);
1338 static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
1340 uint32_t flag_diff = (mem->mask ^ mem->flags);
1342 if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1344 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1345 (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
1346 (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1349 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1350 ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1351 (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1356 static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem)
1358 struct drm_buffer_manager *bm = &dev->bm;
1359 struct drm_mem_type_manager *man;
1360 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1361 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1364 uint32_t mem_type = 0;
1367 if (drm_bo_mem_compat(mem))
1370 BUG_ON(mem->mm_node);
1372 for (i = 0; i < num_prios; ++i) {
1373 mem_type = prios[i];
1374 man = &bm->man[mem_type];
1375 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1382 mem->mm_node = NULL;
1383 mem->mem_type = mem_type;
1384 mem->flags = cur_flags;
1385 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1389 DRM_ERROR("Illegal fake buffer flags 0x%016llx\n",
1390 (unsigned long long) mem->mask);
1398 static int drm_buffer_object_validate(struct drm_buffer_object * bo,
1399 uint32_t fence_class,
1400 int move_unfenced, int no_wait)
1402 struct drm_device *dev = bo->dev;
1403 struct drm_buffer_manager *bm = &dev->bm;
1404 struct drm_bo_driver *driver = dev->driver->bo_driver;
1408 DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
1409 (unsigned long long) bo->mem.mask,
1410 (unsigned long long) bo->mem.flags);
1412 ret = driver->fence_type(bo, &fence_class, &ftype);
1415 DRM_ERROR("Driver did not support given buffer permissions\n");
1419 if (bo->pinned && bo->pinned_mem_type != bo->mem.mem_type) {
1420 DRM_ERROR("Attempt to validate pinned buffer into different memory "
1426 * We're switching command submission mechanism,
1427 * or cannot simply rely on the hardware serializing for us.
1429 * Wait for buffer idle.
1432 if ((fence_class != bo->fence_class) ||
1433 ((ftype ^ bo->fence_type) & bo->fence_type)) {
1435 ret = drm_bo_wait(bo, 0, 0, no_wait);
1442 bo->new_fence_class = fence_class;
1443 bo->new_fence_type = ftype;
1445 ret = drm_bo_wait_unmapped(bo, no_wait);
1447 DRM_ERROR("Timed out waiting for buffer unmap.\n");
1450 if (bo->type == drm_bo_type_fake) {
1451 ret = drm_bo_check_fake(dev, &bo->mem);
1457 * Check whether we need to move buffer.
1460 if (!drm_bo_mem_compat(&bo->mem)) {
1461 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1465 DRM_ERROR("Failed moving buffer.\n");
1471 * We might need to add a TTM.
1474 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1475 ret = drm_bo_add_ttm(bo);
1479 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1482 * Finally, adjust lru to be sure.
1485 mutex_lock(&dev->struct_mutex);
1487 if (move_unfenced) {
1488 list_add_tail(&bo->lru, &bm->unfenced);
1489 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1490 _DRM_BO_FLAG_UNFENCED);
1492 drm_bo_add_to_lru(bo);
1493 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1494 DRM_WAKEUP(&bo->event_queue);
1495 DRM_FLAG_MASKED(bo->priv_flags, 0,
1496 _DRM_BO_FLAG_UNFENCED);
1499 mutex_unlock(&dev->struct_mutex);
1504 int drm_bo_do_validate(struct drm_buffer_object *bo,
1505 uint64_t flags, uint64_t mask, uint32_t hint,
1506 uint32_t fence_class,
1508 struct drm_bo_info_rep *rep)
1512 mutex_lock(&bo->mutex);
1513 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1519 DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1520 ret = drm_bo_new_mask(bo, flags, hint);
1524 ret = drm_buffer_object_validate(bo,
1526 !(hint & DRM_BO_HINT_DONT_FENCE),
1530 drm_bo_fill_rep_arg(bo, rep);
1532 mutex_unlock(&bo->mutex);
1535 EXPORT_SYMBOL(drm_bo_do_validate);
1538 int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
1539 uint32_t fence_class,
1540 uint64_t flags, uint64_t mask, uint32_t hint,
1541 struct drm_bo_info_rep * rep,
1542 struct drm_buffer_object **bo_rep)
1544 struct drm_device *dev = file_priv->head->dev;
1545 struct drm_buffer_object *bo;
1547 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1549 mutex_lock(&dev->struct_mutex);
1550 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1551 mutex_unlock(&dev->struct_mutex);
1557 ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
1563 drm_bo_usage_deref_unlocked(&bo);
1567 EXPORT_SYMBOL(drm_bo_handle_validate);
1570 * Fills out the generic buffer object ioctl reply with the information for
1571 * the BO with id of handle.
1573 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1574 struct drm_bo_info_rep *rep)
1576 struct drm_device *dev = file_priv->head->dev;
1577 struct drm_buffer_object *bo;
1579 mutex_lock(&dev->struct_mutex);
1580 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1581 mutex_unlock(&dev->struct_mutex);
1586 mutex_lock(&bo->mutex);
1587 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1588 (void)drm_bo_busy(bo);
1589 drm_bo_fill_rep_arg(bo, rep);
1590 mutex_unlock(&bo->mutex);
1591 drm_bo_usage_deref_unlocked(&bo);
1595 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1597 struct drm_bo_info_rep *rep)
1599 struct drm_device *dev = file_priv->head->dev;
1600 struct drm_buffer_object *bo;
1601 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1604 mutex_lock(&dev->struct_mutex);
1605 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1606 mutex_unlock(&dev->struct_mutex);
1612 mutex_lock(&bo->mutex);
1613 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1616 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1620 drm_bo_fill_rep_arg(bo, rep);
1623 mutex_unlock(&bo->mutex);
1624 drm_bo_usage_deref_unlocked(&bo);
1628 int drm_buffer_object_create(struct drm_device *dev,
1630 enum drm_bo_type type,
1633 uint32_t page_alignment,
1634 unsigned long buffer_start,
1635 struct drm_buffer_object ** buf_obj)
1637 struct drm_buffer_manager *bm = &dev->bm;
1638 struct drm_buffer_object *bo;
1639 struct drm_bo_driver *driver = dev->driver->bo_driver;
1641 unsigned long num_pages;
1643 if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1644 DRM_ERROR("Invalid buffer object start.\n");
1647 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1648 if (num_pages == 0) {
1649 DRM_ERROR("Illegal buffer object size.\n");
1653 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1658 mutex_init(&bo->mutex);
1659 mutex_lock(&bo->mutex);
1661 atomic_set(&bo->usage, 1);
1662 atomic_set(&bo->mapped, -1);
1663 DRM_INIT_WAITQUEUE(&bo->event_queue);
1664 INIT_LIST_HEAD(&bo->lru);
1665 INIT_LIST_HEAD(&bo->pinned_lru);
1666 INIT_LIST_HEAD(&bo->ddestroy);
1667 #ifdef DRM_ODD_MM_COMPAT
1668 INIT_LIST_HEAD(&bo->p_mm_list);
1669 INIT_LIST_HEAD(&bo->vma_list);
1673 bo->num_pages = num_pages;
1674 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1675 bo->mem.num_pages = bo->num_pages;
1676 bo->mem.mm_node = NULL;
1677 bo->mem.page_alignment = page_alignment;
1678 if (bo->type == drm_bo_type_fake) {
1679 bo->offset = buffer_start;
1680 bo->buffer_start = 0;
1682 bo->buffer_start = buffer_start;
1685 bo->mem.flags = 0ULL;
1686 bo->mem.mask = 0ULL;
1687 atomic_inc(&bm->count);
1688 ret = drm_bo_new_mask(bo, mask, hint);
1693 if (bo->type == drm_bo_type_dc) {
1694 mutex_lock(&dev->struct_mutex);
1695 ret = drm_bo_setup_vm_locked(bo);
1696 mutex_unlock(&dev->struct_mutex);
1701 bo->fence_class = 0;
1702 ret = driver->fence_type(bo, &bo->fence_class, &bo->fence_type);
1704 DRM_ERROR("Driver did not support given buffer permissions\n");
1708 if (bo->type == drm_bo_type_fake) {
1709 ret = drm_bo_check_fake(dev, &bo->mem);
1714 ret = drm_bo_add_ttm(bo);
1718 mutex_lock(&dev->struct_mutex);
1719 drm_bo_add_to_lru(bo);
1720 mutex_unlock(&dev->struct_mutex);
1722 mutex_unlock(&bo->mutex);
1727 mutex_unlock(&bo->mutex);
1729 drm_bo_usage_deref_unlocked(&bo);
1732 EXPORT_SYMBOL(drm_buffer_object_create);
1734 int drm_bo_add_user_object(struct drm_file *file_priv,
1735 struct drm_buffer_object *bo, int shareable)
1737 struct drm_device *dev = file_priv->head->dev;
1740 mutex_lock(&dev->struct_mutex);
1741 ret = drm_add_user_object(file_priv, &bo->base, shareable);
1745 bo->base.remove = drm_bo_base_deref_locked;
1746 bo->base.type = drm_buffer_type;
1747 bo->base.ref_struct_locked = NULL;
1748 bo->base.unref = drm_buffer_user_object_unmap;
1751 mutex_unlock(&dev->struct_mutex);
1754 EXPORT_SYMBOL(drm_bo_add_user_object);
1756 static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv)
1758 LOCK_TEST_WITH_RETURN(dev, file_priv);
1762 int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1764 struct drm_bo_op_arg curarg;
1765 struct drm_bo_op_arg *arg = data;
1766 struct drm_bo_op_req *req = &arg->d.req;
1767 struct drm_bo_info_rep rep;
1768 struct drm_buffer_object *dummy;
1769 unsigned long next = 0;
1770 void __user *curuserarg = NULL;
1773 DRM_DEBUG("drm_bo_op_ioctl\n");
1775 if (!dev->bm.initialized) {
1776 DRM_ERROR("Buffer object manager is not initialized.\n");
1782 curuserarg = (void __user *)next;
1783 if (copy_from_user(&curarg, curuserarg,
1784 sizeof(curarg)) != 0)
1796 case drm_bo_validate:
1797 ret = drm_bo_lock_test(dev, file_priv);
1800 ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
1801 req->bo_req.fence_class,
1809 DRM_ERROR("Function is not implemented yet.\n");
1811 case drm_bo_ref_fence:
1813 DRM_ERROR("Function is not implemented yet.\n");
1821 * A signal interrupted us. Make sure the ioctl is restartable.
1828 arg->d.rep.ret = ret;
1829 arg->d.rep.bo_info = rep;
1831 if (copy_to_user(curuserarg, &curarg,
1832 sizeof(curarg)) != 0)
1835 } while (next != 0);
1839 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1841 struct drm_bo_create_arg *arg = data;
1842 struct drm_bo_create_req *req = &arg->d.req;
1843 struct drm_bo_info_rep *rep = &arg->d.rep;
1844 struct drm_buffer_object *entry;
1847 DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align, %d type\n",
1848 (int)(req->size / 1024), req->page_alignment * 4, req->type);
1850 if (!dev->bm.initialized) {
1851 DRM_ERROR("Buffer object manager is not initialized.\n");
1854 if (req->type == drm_bo_type_fake)
1855 LOCK_TEST_WITH_RETURN(dev, file_priv);
1857 ret = drm_buffer_object_create(file_priv->head->dev,
1858 req->size, req->type, req->mask,
1859 req->hint, req->page_alignment,
1860 req->buffer_start, &entry);
1864 ret = drm_bo_add_user_object(file_priv, entry,
1865 req->mask & DRM_BO_FLAG_SHAREABLE);
1867 drm_bo_usage_deref_unlocked(&entry);
1871 mutex_lock(&entry->mutex);
1872 drm_bo_fill_rep_arg(entry, rep);
1873 mutex_unlock(&entry->mutex);
1880 int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1882 struct drm_bo_handle_arg *arg = data;
1883 struct drm_user_object *uo;
1886 DRM_DEBUG("drm_bo_destroy_ioctl: buffer %d\n", arg->handle);
1888 if (!dev->bm.initialized) {
1889 DRM_ERROR("Buffer object manager is not initialized.\n");
1893 mutex_lock(&dev->struct_mutex);
1894 uo = drm_lookup_user_object(file_priv, arg->handle);
1895 if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) {
1896 mutex_unlock(&dev->struct_mutex);
1899 ret = drm_remove_user_object(file_priv, uo);
1900 mutex_unlock(&dev->struct_mutex);
1905 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1907 struct drm_bo_map_wait_idle_arg *arg = data;
1908 struct drm_bo_info_req *req = &arg->d.req;
1909 struct drm_bo_info_rep *rep = &arg->d.rep;
1912 DRM_DEBUG("drm_bo_map_ioctl: buffer %d\n", req->handle);
1914 if (!dev->bm.initialized) {
1915 DRM_ERROR("Buffer object manager is not initialized.\n");
1919 ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1927 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1929 struct drm_bo_handle_arg *arg = data;
1932 DRM_DEBUG("drm_bo_unmap_ioctl: buffer %d\n", arg->handle);
1934 if (!dev->bm.initialized) {
1935 DRM_ERROR("Buffer object manager is not initialized.\n");
1939 ret = drm_buffer_object_unmap(file_priv, arg->handle);
1944 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1946 struct drm_bo_reference_info_arg *arg = data;
1947 struct drm_bo_handle_arg *req = &arg->d.req;
1948 struct drm_bo_info_rep *rep = &arg->d.rep;
1949 struct drm_user_object *uo;
1952 DRM_DEBUG("drm_bo_reference_ioctl: buffer %d\n", req->handle);
1954 if (!dev->bm.initialized) {
1955 DRM_ERROR("Buffer object manager is not initialized.\n");
1959 ret = drm_user_object_ref(file_priv, req->handle,
1960 drm_buffer_type, &uo);
1964 ret = drm_bo_handle_info(file_priv, req->handle, rep);
1971 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1973 struct drm_bo_handle_arg *arg = data;
1976 DRM_DEBUG("drm_bo_unreference_ioctl: buffer %d\n", arg->handle);
1978 if (!dev->bm.initialized) {
1979 DRM_ERROR("Buffer object manager is not initialized.\n");
1983 ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
1987 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1989 struct drm_bo_reference_info_arg *arg = data;
1990 struct drm_bo_handle_arg *req = &arg->d.req;
1991 struct drm_bo_info_rep *rep = &arg->d.rep;
1994 DRM_DEBUG("drm_bo_info_ioctl: buffer %d\n", req->handle);
1996 if (!dev->bm.initialized) {
1997 DRM_ERROR("Buffer object manager is not initialized.\n");
2001 ret = drm_bo_handle_info(file_priv, req->handle, rep);
2008 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2010 struct drm_bo_map_wait_idle_arg *arg = data;
2011 struct drm_bo_info_req *req = &arg->d.req;
2012 struct drm_bo_info_rep *rep = &arg->d.rep;
2015 DRM_DEBUG("drm_bo_wait_idle_ioctl: buffer %d\n", req->handle);
2017 if (!dev->bm.initialized) {
2018 DRM_ERROR("Buffer object manager is not initialized.\n");
2022 ret = drm_bo_handle_wait(file_priv, req->handle,
2031 * Pins or unpins the given buffer object in the given memory area.
2033 * Pinned buffers will not be evicted from or move within their memory area.
2034 * Must be called with the hardware lock held for pinning.
2037 drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo,
2042 mutex_lock(&bo->mutex);
2043 if (bo->pinned == pin) {
2044 mutex_unlock(&bo->mutex);
2049 ret = drm_bo_wait_unfenced(bo, 0, 0);
2051 mutex_unlock(&bo->mutex);
2055 /* Validate the buffer into its pinned location, with no
2058 ret = drm_buffer_object_validate(bo, 0, 0, 0);
2060 mutex_unlock(&bo->mutex);
2064 /* Pull the buffer off of the LRU and add it to the pinned
2067 bo->pinned_mem_type = bo->mem.mem_type;
2068 mutex_lock(&dev->struct_mutex);
2069 list_del_init(&bo->lru);
2070 list_del_init(&bo->pinned_lru);
2071 drm_bo_add_to_pinned_lru(bo);
2073 if (bo->pinned_node != bo->mem.mm_node) {
2074 if (bo->pinned_node != NULL)
2075 drm_mm_put_block(bo->pinned_node);
2076 bo->pinned_node = bo->mem.mm_node;
2080 mutex_unlock(&dev->struct_mutex);
2083 mutex_lock(&dev->struct_mutex);
2085 /* Remove our buffer from the pinned list */
2086 if (bo->pinned_node != bo->mem.mm_node)
2087 drm_mm_put_block(bo->pinned_node);
2089 list_del_init(&bo->pinned_lru);
2090 bo->pinned_node = NULL;
2092 mutex_unlock(&dev->struct_mutex);
2094 mutex_unlock(&bo->mutex);
2098 int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data,
2099 struct drm_file *file_priv)
2101 struct drm_bo_set_pin_arg *arg = data;
2102 struct drm_bo_set_pin_req *req = &arg->d.req;
2103 struct drm_bo_info_rep *rep = &arg->d.rep;
2104 struct drm_buffer_object *bo;
2107 DRM_DEBUG("drm_bo_set_pin_ioctl: buffer %d, pin %d\n",
2108 req->handle, req->pin);
2110 if (!dev->bm.initialized) {
2111 DRM_ERROR("Buffer object manager is not initialized.\n");
2115 if (req->pin < 0 || req->pin > 1) {
2116 DRM_ERROR("Bad arguments to set_pin\n");
2121 LOCK_TEST_WITH_RETURN(dev, file_priv);
2123 mutex_lock(&dev->struct_mutex);
2124 bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
2125 mutex_unlock(&dev->struct_mutex);
2130 ret = drm_bo_set_pin(dev, bo, req->pin);
2132 drm_bo_usage_deref_unlocked(&bo);
2136 drm_bo_fill_rep_arg(bo, rep);
2137 drm_bo_usage_deref_unlocked(&bo);
2144 *Clean the unfenced list and put on regular LRU.
2145 *This is part of the memory manager cleanup and should only be
2146 *called with the DRI lock held.
2147 *Call dev->struct_sem locked.
2150 static void drm_bo_clean_unfenced(struct drm_device *dev)
2152 struct drm_buffer_manager *bm = &dev->bm;
2153 struct list_head *head, *list;
2154 struct drm_buffer_object *entry;
2155 struct drm_fence_object *fence;
2157 head = &bm->unfenced;
2159 if (list_empty(head))
2162 DRM_ERROR("Clean unfenced\n");
2164 if (drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence)) {
2167 * Fixme: Should really wait here.
2172 drm_fence_usage_deref_locked(&fence);
2174 if (list_empty(head))
2177 DRM_ERROR("Really clean unfenced\n");
2180 while(list != head) {
2181 prefetch(list->next);
2182 entry = list_entry(list, struct drm_buffer_object, lru);
2184 atomic_inc(&entry->usage);
2185 mutex_unlock(&dev->struct_mutex);
2186 mutex_lock(&entry->mutex);
2187 mutex_lock(&dev->struct_mutex);
2189 list_del(&entry->lru);
2190 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
2191 drm_bo_add_to_lru(entry);
2192 mutex_unlock(&entry->mutex);
2197 static int drm_bo_leave_list(struct drm_buffer_object * bo,
2199 int free_pinned, int allow_errors)
2201 struct drm_device *dev = bo->dev;
2204 mutex_lock(&bo->mutex);
2206 ret = drm_bo_expire_fence(bo, allow_errors);
2211 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2212 mutex_lock(&dev->struct_mutex);
2213 list_del_init(&bo->pinned_lru);
2214 if (bo->pinned_node == bo->mem.mm_node)
2215 bo->pinned_node = NULL;
2216 if (bo->pinned_node != NULL) {
2217 drm_mm_put_block(bo->pinned_node);
2218 bo->pinned_node = NULL;
2220 mutex_unlock(&dev->struct_mutex);
2224 DRM_ERROR("A pinned buffer was present at "
2225 "cleanup. Removing flag and evicting.\n");
2229 if (bo->mem.mem_type == mem_type)
2230 ret = drm_bo_evict(bo, mem_type, 0);
2237 DRM_ERROR("Cleanup eviction failed\n");
2242 mutex_unlock(&bo->mutex);
2247 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2251 return list_entry(list, struct drm_buffer_object, pinned_lru);
2253 return list_entry(list, struct drm_buffer_object, lru);
2257 * dev->struct_mutex locked.
2260 static int drm_bo_force_list_clean(struct drm_device * dev,
2261 struct list_head *head,
2267 struct list_head *list, *next, *prev;
2268 struct drm_buffer_object *entry, *nentry;
2273 * The list traversal is a bit odd here, because an item may
2274 * disappear from the list when we release the struct_mutex or
2275 * when we decrease the usage count. Also we're not guaranteed
2276 * to drain pinned lists, so we can't always restart.
2281 list_for_each_safe(list, next, head) {
2284 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2285 atomic_inc(&entry->usage);
2287 atomic_dec(&nentry->usage);
2292 * Protect the next item from destruction, so we can check
2293 * its list pointers later on.
2297 nentry = drm_bo_entry(next, pinned_list);
2298 atomic_inc(&nentry->usage);
2300 mutex_unlock(&dev->struct_mutex);
2302 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2304 mutex_lock(&dev->struct_mutex);
2306 drm_bo_usage_deref_locked(&entry);
2311 * Has the next item disappeared from the list?
2314 do_restart = ((next->prev != list) && (next->prev != prev));
2316 if (nentry != NULL && do_restart)
2317 drm_bo_usage_deref_locked(&nentry);
2325 int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
2327 struct drm_buffer_manager *bm = &dev->bm;
2328 struct drm_mem_type_manager *man = &bm->man[mem_type];
2331 if (mem_type >= DRM_BO_MEM_TYPES) {
2332 DRM_ERROR("Illegal memory type %d\n", mem_type);
2336 if (!man->has_type) {
2337 DRM_ERROR("Trying to take down uninitialized "
2338 "memory manager type %u\n", mem_type);
2347 drm_bo_clean_unfenced(dev);
2348 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2349 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2351 if (drm_mm_clean(&man->manager)) {
2352 drm_mm_takedown(&man->manager);
2360 EXPORT_SYMBOL(drm_bo_clean_mm);
2363 *Evict all buffers of a particular mem_type, but leave memory manager
2364 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2365 *point since we have the hardware lock.
2368 static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
2371 struct drm_buffer_manager *bm = &dev->bm;
2372 struct drm_mem_type_manager *man = &bm->man[mem_type];
2374 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2375 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2379 if (!man->has_type) {
2380 DRM_ERROR("Memory type %u has not been initialized.\n",
2385 drm_bo_clean_unfenced(dev);
2386 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2389 ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2394 int drm_bo_init_mm(struct drm_device * dev,
2396 unsigned long p_offset, unsigned long p_size)
2398 struct drm_buffer_manager *bm = &dev->bm;
2400 struct drm_mem_type_manager *man;
2402 if (type >= DRM_BO_MEM_TYPES) {
2403 DRM_ERROR("Illegal memory type %d\n", type);
2407 man = &bm->man[type];
2408 if (man->has_type) {
2409 DRM_ERROR("Memory manager already initialized for type %d\n",
2414 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2419 if (type != DRM_BO_MEM_LOCAL) {
2421 DRM_ERROR("Zero size memory manager type %d\n", type);
2424 ret = drm_mm_init(&man->manager, p_offset, p_size);
2431 INIT_LIST_HEAD(&man->lru);
2432 INIT_LIST_HEAD(&man->pinned);
2436 EXPORT_SYMBOL(drm_bo_init_mm);
2439 * This is called from lastclose, so we don't need to bother about
2440 * any clients still running when we set the initialized flag to zero.
2443 int drm_bo_driver_finish(struct drm_device * dev)
2445 struct drm_buffer_manager *bm = &dev->bm;
2447 unsigned i = DRM_BO_MEM_TYPES;
2448 struct drm_mem_type_manager *man;
2450 mutex_lock(&dev->bm.init_mutex);
2451 mutex_lock(&dev->struct_mutex);
2453 if (!bm->initialized)
2455 bm->initialized = 0;
2459 if (man->has_type) {
2461 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2463 DRM_ERROR("DRM memory manager type %d "
2464 "is not clean.\n", i);
2469 mutex_unlock(&dev->struct_mutex);
2471 if (!cancel_delayed_work(&bm->wq)) {
2472 flush_scheduled_work();
2474 mutex_lock(&dev->struct_mutex);
2475 drm_bo_delayed_delete(dev, 1);
2476 if (list_empty(&bm->ddestroy)) {
2477 DRM_DEBUG("Delayed destroy list was clean\n");
2479 if (list_empty(&bm->man[0].lru)) {
2480 DRM_DEBUG("Swap list was clean\n");
2482 if (list_empty(&bm->man[0].pinned)) {
2483 DRM_DEBUG("NO_MOVE list was clean\n");
2485 if (list_empty(&bm->unfenced)) {
2486 DRM_DEBUG("Unfenced list was clean\n");
2489 mutex_unlock(&dev->struct_mutex);
2490 mutex_unlock(&dev->bm.init_mutex);
2494 int drm_bo_driver_init(struct drm_device * dev)
2496 struct drm_bo_driver *driver = dev->driver->bo_driver;
2497 struct drm_buffer_manager *bm = &dev->bm;
2500 mutex_lock(&dev->bm.init_mutex);
2501 mutex_lock(&dev->struct_mutex);
2506 * Initialize the system memory buffer type.
2507 * Other types need to be driver / IOCTL initialized.
2509 ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
2513 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2514 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2516 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2518 bm->initialized = 1;
2520 atomic_set(&bm->count, 0);
2522 INIT_LIST_HEAD(&bm->unfenced);
2523 INIT_LIST_HEAD(&bm->ddestroy);
2525 mutex_unlock(&dev->struct_mutex);
2526 mutex_unlock(&dev->bm.init_mutex);
2530 EXPORT_SYMBOL(drm_bo_driver_init);
2532 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2534 struct drm_mm_init_arg *arg = data;
2535 struct drm_buffer_manager *bm = &dev->bm;
2536 struct drm_bo_driver *driver = dev->driver->bo_driver;
2539 DRM_DEBUG("drm_mm_init_ioctl: type %d, 0x%08llx offset, %dkb\n",
2540 arg->mem_type, arg->p_offset * PAGE_SIZE, (int)(arg->p_size * 4));
2543 DRM_ERROR("Buffer objects are not supported by this driver\n");
2548 if (arg->magic != DRM_BO_INIT_MAGIC) {
2549 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2550 "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2553 if (arg->major != DRM_BO_INIT_MAJOR) {
2554 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2555 "\tversion don't match. Got %d, expected %d,\n",
2556 arg->major, DRM_BO_INIT_MAJOR);
2559 if (arg->minor > DRM_BO_INIT_MINOR) {
2560 DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n"
2561 "\tlibdrm buffer object interface version is %d.%d.\n"
2562 "\tkernel DRM buffer object interface version is %d.%d\n",
2563 arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR);
2567 mutex_lock(&dev->bm.init_mutex);
2568 mutex_lock(&dev->struct_mutex);
2569 if (!bm->initialized) {
2570 DRM_ERROR("DRM memory manager was not initialized.\n");
2573 if (arg->mem_type == 0) {
2574 DRM_ERROR("System memory buffers already initialized.\n");
2577 ret = drm_bo_init_mm(dev, arg->mem_type,
2578 arg->p_offset, arg->p_size);
2581 mutex_unlock(&dev->struct_mutex);
2582 mutex_unlock(&dev->bm.init_mutex);
2589 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2591 struct drm_mm_type_arg *arg = data;
2592 struct drm_buffer_manager *bm = &dev->bm;
2593 struct drm_bo_driver *driver = dev->driver->bo_driver;
2596 DRM_DEBUG("drm_mm_takedown_ioctl: %d type\n", arg->mem_type);
2599 DRM_ERROR("Buffer objects are not supported by this driver\n");
2603 LOCK_TEST_WITH_RETURN(dev, file_priv);
2604 mutex_lock(&dev->bm.init_mutex);
2605 mutex_lock(&dev->struct_mutex);
2607 if (!bm->initialized) {
2608 DRM_ERROR("DRM memory manager was not initialized\n");
2611 if (arg->mem_type == 0) {
2612 DRM_ERROR("No takedown for System memory buffers.\n");
2616 if (drm_bo_clean_mm(dev, arg->mem_type)) {
2617 DRM_ERROR("Memory manager type %d not clean. "
2618 "Delaying takedown\n", arg->mem_type);
2621 mutex_unlock(&dev->struct_mutex);
2622 mutex_unlock(&dev->bm.init_mutex);
2629 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2631 struct drm_mm_type_arg *arg = data;
2632 struct drm_bo_driver *driver = dev->driver->bo_driver;
2635 DRM_DEBUG("drm_mm_lock_ioctl: %d type\n", arg->mem_type);
2638 DRM_ERROR("Buffer objects are not supported by this driver\n");
2642 LOCK_TEST_WITH_RETURN(dev, file_priv);
2643 mutex_lock(&dev->bm.init_mutex);
2644 mutex_lock(&dev->struct_mutex);
2645 ret = drm_bo_lock_mm(dev, arg->mem_type);
2646 mutex_unlock(&dev->struct_mutex);
2647 mutex_unlock(&dev->bm.init_mutex);
2654 int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2656 struct drm_bo_driver *driver = dev->driver->bo_driver;
2659 DRM_DEBUG("drm_mm_unlock_ioctl\n");
2662 DRM_ERROR("Buffer objects are not supported by this driver\n");
2666 LOCK_TEST_WITH_RETURN(dev, file_priv);
2667 mutex_lock(&dev->bm.init_mutex);
2668 mutex_lock(&dev->struct_mutex);
2671 mutex_unlock(&dev->struct_mutex);
2672 mutex_unlock(&dev->bm.init_mutex);
2680 * buffer object vm functions.
2683 int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
2685 struct drm_buffer_manager *bm = &dev->bm;
2686 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2688 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2689 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2692 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2695 if (mem->flags & DRM_BO_FLAG_CACHED)
2701 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2704 * \c Get the PCI offset for the buffer object memory.
2706 * \param bo The buffer object.
2707 * \param bus_base On return the base of the PCI region
2708 * \param bus_offset On return the byte offset into the PCI region
2709 * \param bus_size On return the byte size of the buffer object or zero if
2710 * the buffer object memory is not accessible through a PCI region.
2711 * \return Failure indication.
2713 * Returns -EINVAL if the buffer object is currently not mappable.
2714 * Otherwise returns zero.
2717 int drm_bo_pci_offset(struct drm_device *dev,
2718 struct drm_bo_mem_reg *mem,
2719 unsigned long *bus_base,
2720 unsigned long *bus_offset, unsigned long *bus_size)
2722 struct drm_buffer_manager *bm = &dev->bm;
2723 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2726 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2729 if (drm_mem_reg_is_pci(dev, mem)) {
2730 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2731 *bus_size = mem->num_pages << PAGE_SHIFT;
2732 *bus_base = man->io_offset;
2739 * \c Kill all user-space virtual mappings of this buffer object.
2741 * \param bo The buffer object.
2743 * Call bo->mutex locked.
2746 void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
2748 struct drm_device *dev = bo->dev;
2749 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2750 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2752 if (!dev->dev_mapping)
2755 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2758 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
2760 struct drm_map_list *list = &bo->map_list;
2761 drm_local_map_t *map;
2762 struct drm_device *dev = bo->dev;
2764 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2765 if (list->user_token) {
2766 drm_ht_remove_item(&dev->map_hash, &list->hash);
2767 list->user_token = 0;
2769 if (list->file_offset_node) {
2770 drm_mm_put_block(list->file_offset_node);
2771 list->file_offset_node = NULL;
2778 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2780 list->user_token = 0ULL;
2781 drm_bo_usage_deref_locked(&bo);
2784 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
2786 struct drm_map_list *list = &bo->map_list;
2787 drm_local_map_t *map;
2788 struct drm_device *dev = bo->dev;
2790 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2791 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2797 map->type = _DRM_TTM;
2798 map->flags = _DRM_REMOVABLE;
2799 map->size = bo->mem.num_pages * PAGE_SIZE;
2800 atomic_inc(&bo->usage);
2801 map->handle = (void *)bo;
2803 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2804 bo->mem.num_pages, 0, 0);
2806 if (!list->file_offset_node) {
2807 drm_bo_takedown_vm_locked(bo);
2811 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2812 bo->mem.num_pages, 0);
2814 list->hash.key = list->file_offset_node->start;
2815 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2816 drm_bo_takedown_vm_locked(bo);
2820 list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;