1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44 * both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47 * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48 * traversal will, in general, need to be restarted.
52 static void drm_bo_destroy_locked(struct drm_buffer_object * bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object * bo);
57 static inline uint32_t drm_bo_type_flags(unsigned type)
59 return (1 << (24 + type));
63 * bo locked. dev->struct_mutex locked.
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
68 struct drm_mem_type_manager *man;
70 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71 DRM_ASSERT_LOCKED(&bo->mutex);
73 man = &bo->dev->bm.man[bo->pinned_mem_type];
74 list_add_tail(&bo->pinned_lru, &man->pinned);
77 void drm_bo_add_to_lru(struct drm_buffer_object * bo)
79 struct drm_mem_type_manager *man;
81 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
83 if (!bo->pinned || bo->mem.mem_type != bo->pinned_mem_type) {
84 man = &bo->dev->bm.man[bo->mem.mem_type];
85 list_add_tail(&bo->lru, &man->lru);
87 INIT_LIST_HEAD(&bo->lru);
91 static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
93 #ifdef DRM_ODD_MM_COMPAT
96 if (!bo->map_list.map)
99 ret = drm_bo_lock_kmm(bo);
102 drm_bo_unmap_virtual(bo);
104 drm_bo_finish_unmap(bo);
106 if (!bo->map_list.map)
109 drm_bo_unmap_virtual(bo);
114 static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
116 #ifdef DRM_ODD_MM_COMPAT
119 if (!bo->map_list.map)
122 ret = drm_bo_remap_bound(bo);
124 DRM_ERROR("Failed to remap a bound buffer object.\n"
125 "\tThis might cause a sigbus later.\n");
127 drm_bo_unlock_kmm(bo);
132 * Call bo->mutex locked.
135 static int drm_bo_add_ttm(struct drm_buffer_object * bo)
137 struct drm_device *dev = bo->dev;
141 DRM_ASSERT_LOCKED(&bo->mutex);
145 case drm_bo_type_kernel:
146 bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
150 case drm_bo_type_user:
151 case drm_bo_type_fake:
154 DRM_ERROR("Illegal buffer object type\n");
162 static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
163 struct drm_bo_mem_reg * mem,
164 int evict, int no_wait)
166 struct drm_device *dev = bo->dev;
167 struct drm_buffer_manager *bm = &dev->bm;
168 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
169 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
170 struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
171 struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
174 if (old_is_pci || new_is_pci ||
175 ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
176 ret = drm_bo_vm_pre_move(bo, old_is_pci);
181 * Create and bind a ttm if required.
184 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
185 ret = drm_bo_add_ttm(bo);
189 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
190 ret = drm_bind_ttm(bo->ttm, mem);
196 if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
198 struct drm_bo_mem_reg *old_mem = &bo->mem;
199 uint64_t save_flags = old_mem->flags;
200 uint64_t save_mask = old_mem->mask;
204 old_mem->mask = save_mask;
205 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
207 } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
208 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
210 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
212 } else if (dev->driver->bo_driver->move) {
213 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
217 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
224 if (old_is_pci || new_is_pci)
225 drm_bo_vm_post_move(bo);
227 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
229 dev->driver->bo_driver->invalidate_caches(dev,
232 DRM_ERROR("Can not flush read caches\n");
235 DRM_FLAG_MASKED(bo->priv_flags,
236 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
237 _DRM_BO_FLAG_EVICTED);
240 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
241 bm->man[bo->mem.mem_type].gpu_offset;
247 if (old_is_pci || new_is_pci)
248 drm_bo_vm_post_move(bo);
250 new_man = &bm->man[bo->mem.mem_type];
251 if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
252 drm_ttm_unbind(bo->ttm);
253 drm_destroy_ttm(bo->ttm);
261 * Call bo->mutex locked.
262 * Wait until the buffer is idle.
265 int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
270 DRM_ASSERT_LOCKED(&bo->mutex);
273 if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
274 drm_fence_usage_deref_unlocked(&bo->fence);
281 drm_fence_object_wait(bo->fence, lazy, ignore_signals,
286 drm_fence_usage_deref_unlocked(&bo->fence);
290 EXPORT_SYMBOL(drm_bo_wait);
292 static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
294 struct drm_device *dev = bo->dev;
295 struct drm_buffer_manager *bm = &dev->bm;
299 unsigned long _end = jiffies + 3 * DRM_HZ;
302 ret = drm_bo_wait(bo, 0, 1, 0);
303 if (ret && allow_errors)
306 } while (ret && !time_after_eq(jiffies, _end));
310 DRM_ERROR("Detected GPU lockup or "
311 "fence driver was taken down. "
312 "Evicting buffer.\n");
316 drm_fence_usage_deref_unlocked(&bo->fence);
322 * Call dev->struct_mutex locked.
323 * Attempts to remove all private references to a buffer by expiring its
324 * fence object and removing from lru lists and memory managers.
327 static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
329 struct drm_device *dev = bo->dev;
330 struct drm_buffer_manager *bm = &dev->bm;
332 DRM_ASSERT_LOCKED(&dev->struct_mutex);
334 atomic_inc(&bo->usage);
335 mutex_unlock(&dev->struct_mutex);
336 mutex_lock(&bo->mutex);
338 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
340 if (bo->fence && drm_fence_object_signaled(bo->fence,
342 drm_fence_usage_deref_unlocked(&bo->fence);
344 if (bo->fence && remove_all)
345 (void)drm_bo_expire_fence(bo, 0);
347 mutex_lock(&dev->struct_mutex);
349 if (!atomic_dec_and_test(&bo->usage)) {
354 list_del_init(&bo->lru);
355 if (bo->mem.mm_node) {
356 drm_mm_put_block(bo->mem.mm_node);
357 if (bo->pinned_node == bo->mem.mm_node)
358 bo->pinned_node = NULL;
359 bo->mem.mm_node = NULL;
361 list_del_init(&bo->pinned_lru);
362 if (bo->pinned_node) {
363 drm_mm_put_block(bo->pinned_node);
364 bo->pinned_node = NULL;
366 list_del_init(&bo->ddestroy);
367 mutex_unlock(&bo->mutex);
368 drm_bo_destroy_locked(bo);
372 if (list_empty(&bo->ddestroy)) {
373 drm_fence_object_flush(bo->fence, bo->fence_type);
374 list_add_tail(&bo->ddestroy, &bm->ddestroy);
375 schedule_delayed_work(&bm->wq,
376 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
380 mutex_unlock(&bo->mutex);
385 * Verify that refcount is 0 and that there are no internal references
386 * to the buffer object. Then destroy it.
389 static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
391 struct drm_device *dev = bo->dev;
392 struct drm_buffer_manager *bm = &dev->bm;
394 DRM_ASSERT_LOCKED(&dev->struct_mutex);
396 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
397 list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
398 list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
399 if (bo->fence != NULL) {
400 DRM_ERROR("Fence was non-zero.\n");
401 drm_bo_cleanup_refs(bo, 0);
405 #ifdef DRM_ODD_MM_COMPAT
406 BUG_ON(!list_empty(&bo->vma_list));
407 BUG_ON(!list_empty(&bo->p_mm_list));
411 drm_ttm_unbind(bo->ttm);
412 drm_destroy_ttm(bo->ttm);
416 atomic_dec(&bm->count);
418 // BUG_ON(!list_empty(&bo->base.list));
419 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
425 * Some stuff is still trying to reference the buffer object.
426 * Get rid of those references.
429 drm_bo_cleanup_refs(bo, 0);
435 * Call dev->struct_mutex locked.
438 static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
440 struct drm_buffer_manager *bm = &dev->bm;
442 struct drm_buffer_object *entry, *nentry;
443 struct list_head *list, *next;
445 list_for_each_safe(list, next, &bm->ddestroy) {
446 entry = list_entry(list, struct drm_buffer_object, ddestroy);
449 if (next != &bm->ddestroy) {
450 nentry = list_entry(next, struct drm_buffer_object,
452 atomic_inc(&nentry->usage);
455 drm_bo_cleanup_refs(entry, remove_all);
458 atomic_dec(&nentry->usage);
463 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
464 static void drm_bo_delayed_workqueue(void *data)
466 static void drm_bo_delayed_workqueue(struct work_struct *work)
469 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
470 struct drm_device *dev = (struct drm_device *) data;
471 struct drm_buffer_manager *bm = &dev->bm;
473 struct drm_buffer_manager *bm =
474 container_of(work, struct drm_buffer_manager, wq.work);
475 struct drm_device *dev = container_of(bm, struct drm_device, bm);
478 DRM_DEBUG("Delayed delete Worker\n");
480 mutex_lock(&dev->struct_mutex);
481 if (!bm->initialized) {
482 mutex_unlock(&dev->struct_mutex);
485 drm_bo_delayed_delete(dev, 0);
486 if (bm->initialized && !list_empty(&bm->ddestroy)) {
487 schedule_delayed_work(&bm->wq,
488 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
490 mutex_unlock(&dev->struct_mutex);
493 void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
495 struct drm_buffer_object *tmp_bo = *bo;
498 DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
500 if (atomic_dec_and_test(&tmp_bo->usage)) {
501 drm_bo_destroy_locked(tmp_bo);
504 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
506 static void drm_bo_base_deref_locked(struct drm_file * file_priv,
507 struct drm_user_object * uo)
509 struct drm_buffer_object *bo =
510 drm_user_object_entry(uo, struct drm_buffer_object, base);
512 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
514 drm_bo_takedown_vm_locked(bo);
515 drm_bo_usage_deref_locked(&bo);
518 void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
520 struct drm_buffer_object *tmp_bo = *bo;
521 struct drm_device *dev = tmp_bo->dev;
524 if (atomic_dec_and_test(&tmp_bo->usage)) {
525 mutex_lock(&dev->struct_mutex);
526 if (atomic_read(&tmp_bo->usage) == 0)
527 drm_bo_destroy_locked(tmp_bo);
528 mutex_unlock(&dev->struct_mutex);
531 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
533 void drm_putback_buffer_objects(struct drm_device *dev)
535 struct drm_buffer_manager *bm = &dev->bm;
536 struct list_head *list = &bm->unfenced;
537 struct drm_buffer_object *entry, *next;
539 mutex_lock(&dev->struct_mutex);
540 list_for_each_entry_safe(entry, next, list, lru) {
541 atomic_inc(&entry->usage);
542 mutex_unlock(&dev->struct_mutex);
544 mutex_lock(&entry->mutex);
545 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
546 mutex_lock(&dev->struct_mutex);
548 list_del_init(&entry->lru);
549 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
550 DRM_WAKEUP(&entry->event_queue);
553 * FIXME: Might want to put back on head of list
554 * instead of tail here.
557 drm_bo_add_to_lru(entry);
558 mutex_unlock(&entry->mutex);
559 drm_bo_usage_deref_locked(&entry);
561 mutex_unlock(&dev->struct_mutex);
563 EXPORT_SYMBOL(drm_putback_buffer_objects);
567 * Note. The caller has to register (if applicable)
568 * and deregister fence object usage.
571 int drm_fence_buffer_objects(struct drm_device *dev,
572 struct list_head *list,
573 uint32_t fence_flags,
574 struct drm_fence_object * fence,
575 struct drm_fence_object ** used_fence)
577 struct drm_buffer_manager *bm = &dev->bm;
578 struct drm_buffer_object *entry;
579 uint32_t fence_type = 0;
580 uint32_t fence_class = ~0;
585 mutex_lock(&dev->struct_mutex);
588 list = &bm->unfenced;
591 fence_class = fence->class;
593 list_for_each_entry(entry, list, lru) {
594 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
595 fence_type |= entry->new_fence_type;
596 if (fence_class == ~0)
597 fence_class = entry->new_fence_class;
598 else if (entry->new_fence_class != fence_class) {
599 DRM_ERROR("Unmatching fence classes on unfenced list: "
602 entry->new_fence_class);
615 if ((fence_type & fence->type) != fence_type) {
616 DRM_ERROR("Given fence doesn't match buffers "
617 "on unfenced list.\n");
622 mutex_unlock(&dev->struct_mutex);
623 ret = drm_fence_object_create(dev, fence_class, fence_type,
624 fence_flags | DRM_FENCE_FLAG_EMIT,
626 mutex_lock(&dev->struct_mutex);
635 entry = list_entry(l, struct drm_buffer_object, lru);
636 atomic_inc(&entry->usage);
637 mutex_unlock(&dev->struct_mutex);
638 mutex_lock(&entry->mutex);
639 mutex_lock(&dev->struct_mutex);
641 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
644 drm_fence_usage_deref_locked(&entry->fence);
645 entry->fence = drm_fence_reference_locked(fence);
646 entry->fence_class = entry->new_fence_class;
647 entry->fence_type = entry->new_fence_type;
648 DRM_FLAG_MASKED(entry->priv_flags, 0,
649 _DRM_BO_FLAG_UNFENCED);
650 DRM_WAKEUP(&entry->event_queue);
651 drm_bo_add_to_lru(entry);
653 mutex_unlock(&entry->mutex);
654 drm_bo_usage_deref_locked(&entry);
657 DRM_DEBUG("Fenced %d buffers\n", count);
659 mutex_unlock(&dev->struct_mutex);
663 EXPORT_SYMBOL(drm_fence_buffer_objects);
669 static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
673 struct drm_device *dev = bo->dev;
674 struct drm_bo_mem_reg evict_mem;
677 * Someone might have modified the buffer before we took the buffer mutex.
680 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
682 if (bo->mem.mem_type != mem_type)
685 ret = drm_bo_wait(bo, 0, 0, no_wait);
687 if (ret && ret != -EAGAIN) {
688 DRM_ERROR("Failed to expire fence before "
689 "buffer eviction.\n");
694 evict_mem.mm_node = NULL;
696 if (bo->type == drm_bo_type_fake) {
697 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
698 bo->mem.mm_node = NULL;
703 evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
704 ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
708 DRM_ERROR("Failed to find memory space for "
709 "buffer 0x%p eviction.\n", bo);
713 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
717 DRM_ERROR("Buffer eviction failed\n");
722 mutex_lock(&dev->struct_mutex);
723 if (evict_mem.mm_node) {
724 if (evict_mem.mm_node != bo->pinned_node)
725 drm_mm_put_block(evict_mem.mm_node);
726 evict_mem.mm_node = NULL;
729 drm_bo_add_to_lru(bo);
730 mutex_unlock(&dev->struct_mutex);
732 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
733 _DRM_BO_FLAG_EVICTED);
740 * Repeatedly evict memory from the LRU for @mem_type until we create enough
741 * space, or we've evicted everything and there isn't enough space.
743 static int drm_bo_mem_force_space(struct drm_device * dev,
744 struct drm_bo_mem_reg * mem,
745 uint32_t mem_type, int no_wait)
747 struct drm_mm_node *node;
748 struct drm_buffer_manager *bm = &dev->bm;
749 struct drm_buffer_object *entry;
750 struct drm_mem_type_manager *man = &bm->man[mem_type];
751 struct list_head *lru;
752 unsigned long num_pages = mem->num_pages;
755 mutex_lock(&dev->struct_mutex);
757 node = drm_mm_search_free(&man->manager, num_pages,
758 mem->page_alignment, 1);
763 if (lru->next == lru)
766 entry = list_entry(lru->next, struct drm_buffer_object, lru);
767 atomic_inc(&entry->usage);
768 mutex_unlock(&dev->struct_mutex);
769 mutex_lock(&entry->mutex);
770 BUG_ON(entry->pinned);
772 ret = drm_bo_evict(entry, mem_type, no_wait);
773 mutex_unlock(&entry->mutex);
774 drm_bo_usage_deref_unlocked(&entry);
777 mutex_lock(&dev->struct_mutex);
781 mutex_unlock(&dev->struct_mutex);
785 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
786 mutex_unlock(&dev->struct_mutex);
788 mem->mem_type = mem_type;
792 static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
794 uint32_t mask, uint32_t * res_mask)
796 uint32_t cur_flags = drm_bo_type_flags(mem_type);
799 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
800 cur_flags |= DRM_BO_FLAG_CACHED;
801 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
802 cur_flags |= DRM_BO_FLAG_MAPPABLE;
803 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
804 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
806 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
809 if (mem_type == DRM_BO_MEM_LOCAL) {
810 *res_mask = cur_flags;
814 flag_diff = (mask ^ cur_flags);
815 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
816 (!(mask & DRM_BO_FLAG_CACHED) ||
817 (mask & DRM_BO_FLAG_FORCE_CACHING)))
820 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
821 ((mask & DRM_BO_FLAG_MAPPABLE) ||
822 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
825 *res_mask = cur_flags;
830 * Creates space for memory region @mem according to its type.
832 * This function first searches for free space in compatible memory types in
833 * the priority order defined by the driver. If free space isn't found, then
834 * drm_bo_mem_force_space is attempted in priority order to evict and find
837 int drm_bo_mem_space(struct drm_buffer_object * bo,
838 struct drm_bo_mem_reg * mem, int no_wait)
840 struct drm_device *dev = bo->dev;
841 struct drm_buffer_manager *bm = &dev->bm;
842 struct drm_mem_type_manager *man;
844 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
845 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
847 uint32_t mem_type = DRM_BO_MEM_LOCAL;
852 struct drm_mm_node *node = NULL;
856 for (i = 0; i < num_prios; ++i) {
858 man = &bm->man[mem_type];
860 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
866 if (mem_type == DRM_BO_MEM_LOCAL)
869 if ((mem_type == bo->pinned_mem_type) &&
870 (bo->pinned_node != NULL)) {
871 node = bo->pinned_node;
875 mutex_lock(&dev->struct_mutex);
876 if (man->has_type && man->use_type) {
878 node = drm_mm_search_free(&man->manager, mem->num_pages,
879 mem->page_alignment, 1);
881 node = drm_mm_get_block(node, mem->num_pages,
882 mem->page_alignment);
884 mutex_unlock(&dev->struct_mutex);
889 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
891 mem->mem_type = mem_type;
892 mem->flags = cur_flags;
899 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
900 prios = dev->driver->bo_driver->mem_busy_prio;
902 for (i = 0; i < num_prios; ++i) {
904 man = &bm->man[mem_type];
909 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
912 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
915 mem->flags = cur_flags;
923 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
927 EXPORT_SYMBOL(drm_bo_mem_space);
929 static int drm_bo_new_mask(struct drm_buffer_object * bo,
930 uint64_t new_mask, uint32_t hint)
934 if (bo->type == drm_bo_type_user) {
935 DRM_ERROR("User buffers are not supported yet\n");
939 new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
943 DRM_ERROR("Invalid buffer object rwx properties\n");
947 bo->mem.mask = new_mask;
952 * Call dev->struct_mutex locked.
955 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
956 uint32_t handle, int check_owner)
958 struct drm_user_object *uo;
959 struct drm_buffer_object *bo;
961 uo = drm_lookup_user_object(file_priv, handle);
963 if (!uo || (uo->type != drm_buffer_type)) {
964 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
968 if (check_owner && file_priv != uo->owner) {
969 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
973 bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
974 atomic_inc(&bo->usage);
977 EXPORT_SYMBOL(drm_lookup_buffer_object);
980 * Call bo->mutex locked.
981 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
982 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
985 static int drm_bo_quick_busy(struct drm_buffer_object * bo)
987 struct drm_fence_object *fence = bo->fence;
989 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
991 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
992 drm_fence_usage_deref_unlocked(&bo->fence);
1001 * Call bo->mutex locked.
1002 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1005 static int drm_bo_busy(struct drm_buffer_object * bo)
1007 struct drm_fence_object *fence = bo->fence;
1009 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1011 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
1012 drm_fence_usage_deref_unlocked(&bo->fence);
1015 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
1016 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
1017 drm_fence_usage_deref_unlocked(&bo->fence);
1025 static int drm_bo_read_cached(struct drm_buffer_object * bo)
1029 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1030 if (bo->mem.mm_node)
1031 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1036 * Wait until a buffer is unmapped.
1039 static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
1043 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1046 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1047 atomic_read(&bo->mapped) == -1);
1055 static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
1059 mutex_lock(&bo->mutex);
1060 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1061 mutex_unlock(&bo->mutex);
1066 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1067 * Until then, we cannot really do anything with it except delete it.
1068 * The unfenced list is a PITA, and the operations
1070 * 2) submitting commands
1072 * Should really be an atomic operation.
1073 * We now "solve" this problem by keeping
1074 * the buffer "unfenced" after validating, but before fencing.
1077 static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
1080 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1088 mutex_unlock(&bo->mutex);
1089 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1090 !drm_bo_check_unfenced(bo));
1091 mutex_lock(&bo->mutex);
1094 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1096 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1106 * Fill in the ioctl reply argument with buffer info.
1110 static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
1111 struct drm_bo_info_rep *rep)
1116 rep->handle = bo->base.hash.key;
1117 rep->flags = bo->mem.flags;
1118 rep->size = bo->num_pages * PAGE_SIZE;
1119 rep->offset = bo->offset;
1120 rep->arg_handle = bo->map_list.user_token;
1121 rep->mask = bo->mem.mask;
1122 rep->buffer_start = bo->buffer_start;
1123 rep->fence_flags = bo->fence_type;
1125 rep->page_alignment = bo->mem.page_alignment;
1127 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1128 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1134 * Wait for buffer idle and register that we've mapped the buffer.
1135 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1136 * so that if the client dies, the mapping is automatically
1140 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1141 uint32_t map_flags, unsigned hint,
1142 struct drm_bo_info_rep *rep)
1144 struct drm_buffer_object *bo;
1145 struct drm_device *dev = file_priv->head->dev;
1147 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1149 mutex_lock(&dev->struct_mutex);
1150 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1151 mutex_unlock(&dev->struct_mutex);
1156 mutex_lock(&bo->mutex);
1157 if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1158 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1164 * If this returns true, we are currently unmapped.
1165 * We need to do this test, because unmapping can
1166 * be done without the bo->mutex held.
1170 if (atomic_inc_and_test(&bo->mapped)) {
1171 if (no_wait && drm_bo_busy(bo)) {
1172 atomic_dec(&bo->mapped);
1176 ret = drm_bo_wait(bo, 0, 0, no_wait);
1178 atomic_dec(&bo->mapped);
1182 if ((map_flags & DRM_BO_FLAG_READ) &&
1183 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1184 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1185 drm_bo_read_cached(bo);
1188 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1189 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1190 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1193 * We are already mapped with different flags.
1194 * need to wait for unmap.
1197 ret = drm_bo_wait_unmapped(bo, no_wait);
1206 mutex_lock(&dev->struct_mutex);
1207 ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1208 mutex_unlock(&dev->struct_mutex);
1210 if (atomic_add_negative(-1, &bo->mapped))
1211 DRM_WAKEUP(&bo->event_queue);
1214 drm_bo_fill_rep_arg(bo, rep);
1216 mutex_unlock(&bo->mutex);
1217 drm_bo_usage_deref_unlocked(&bo);
1221 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1223 struct drm_device *dev = file_priv->head->dev;
1224 struct drm_buffer_object *bo;
1225 struct drm_ref_object *ro;
1228 mutex_lock(&dev->struct_mutex);
1230 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1236 ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1242 drm_remove_ref_object(file_priv, ro);
1243 drm_bo_usage_deref_locked(&bo);
1245 mutex_unlock(&dev->struct_mutex);
1250 * Call struct-sem locked.
1253 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1254 struct drm_user_object * uo,
1255 enum drm_ref_type action)
1257 struct drm_buffer_object *bo =
1258 drm_user_object_entry(uo, struct drm_buffer_object, base);
1261 * We DON'T want to take the bo->lock here, because we want to
1262 * hold it when we wait for unmapped buffer.
1265 BUG_ON(action != _DRM_REF_TYPE1);
1267 if (atomic_add_negative(-1, &bo->mapped))
1268 DRM_WAKEUP(&bo->event_queue);
1273 * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1276 int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
1277 int no_wait, int move_unfenced)
1279 struct drm_device *dev = bo->dev;
1280 struct drm_buffer_manager *bm = &dev->bm;
1282 struct drm_bo_mem_reg mem;
1284 * Flush outstanding fences.
1290 * Wait for outstanding fences.
1293 ret = drm_bo_wait(bo, 0, 0, no_wait);
1297 mem.num_pages = bo->num_pages;
1298 mem.size = mem.num_pages << PAGE_SHIFT;
1299 mem.mask = new_mem_flags;
1300 mem.page_alignment = bo->mem.page_alignment;
1302 mutex_lock(&bm->evict_mutex);
1303 mutex_lock(&dev->struct_mutex);
1305 list_add_tail(&bo->lru, &bm->unfenced);
1306 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1307 _DRM_BO_FLAG_UNFENCED);
1308 mutex_unlock(&dev->struct_mutex);
1311 * Determine where to move the buffer.
1313 ret = drm_bo_mem_space(bo, &mem, no_wait);
1317 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1320 if (ret || !move_unfenced) {
1321 mutex_lock(&dev->struct_mutex);
1323 if (mem.mm_node != bo->pinned_node)
1324 drm_mm_put_block(mem.mm_node);
1327 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1328 DRM_WAKEUP(&bo->event_queue);
1330 drm_bo_add_to_lru(bo);
1331 mutex_unlock(&dev->struct_mutex);
1334 mutex_unlock(&bm->evict_mutex);
1338 static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
1340 uint32_t flag_diff = (mem->mask ^ mem->flags);
1342 if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1344 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1345 (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
1346 (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1349 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1350 ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1351 (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1356 static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem)
1358 struct drm_buffer_manager *bm = &dev->bm;
1359 struct drm_mem_type_manager *man;
1360 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1361 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1364 uint32_t mem_type = 0;
1367 if (drm_bo_mem_compat(mem))
1370 BUG_ON(mem->mm_node);
1372 for (i = 0; i < num_prios; ++i) {
1373 mem_type = prios[i];
1374 man = &bm->man[mem_type];
1375 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1382 mem->mm_node = NULL;
1383 mem->mem_type = mem_type;
1384 mem->flags = cur_flags;
1385 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1389 DRM_ERROR("Illegal fake buffer flags 0x%016llx\n",
1390 (unsigned long long) mem->mask);
1398 static int drm_buffer_object_validate(struct drm_buffer_object * bo,
1399 uint32_t fence_class,
1400 int move_unfenced, int no_wait)
1402 struct drm_device *dev = bo->dev;
1403 struct drm_buffer_manager *bm = &dev->bm;
1404 struct drm_bo_driver *driver = dev->driver->bo_driver;
1408 DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
1409 (unsigned long long) bo->mem.mask,
1410 (unsigned long long) bo->mem.flags);
1412 ret = driver->fence_type(bo, &fence_class, &ftype);
1415 DRM_ERROR("Driver did not support given buffer permissions\n");
1419 if (bo->pinned && bo->pinned_mem_type != bo->mem.mem_type) {
1420 DRM_ERROR("Attempt to validate pinned buffer into different memory "
1426 * We're switching command submission mechanism,
1427 * or cannot simply rely on the hardware serializing for us.
1429 * Wait for buffer idle.
1432 if ((fence_class != bo->fence_class) ||
1433 ((ftype ^ bo->fence_type) & bo->fence_type)) {
1435 ret = drm_bo_wait(bo, 0, 0, no_wait);
1442 bo->new_fence_class = fence_class;
1443 bo->new_fence_type = ftype;
1445 ret = drm_bo_wait_unmapped(bo, no_wait);
1447 DRM_ERROR("Timed out waiting for buffer unmap.\n");
1450 if (bo->type == drm_bo_type_fake) {
1451 ret = drm_bo_check_fake(dev, &bo->mem);
1457 * Check whether we need to move buffer.
1460 if (!drm_bo_mem_compat(&bo->mem)) {
1461 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1465 DRM_ERROR("Failed moving buffer.\n");
1471 * We might need to add a TTM.
1474 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1475 ret = drm_bo_add_ttm(bo);
1479 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1482 * Finally, adjust lru to be sure.
1485 mutex_lock(&dev->struct_mutex);
1487 if (move_unfenced) {
1488 list_add_tail(&bo->lru, &bm->unfenced);
1489 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1490 _DRM_BO_FLAG_UNFENCED);
1492 drm_bo_add_to_lru(bo);
1493 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1494 DRM_WAKEUP(&bo->event_queue);
1495 DRM_FLAG_MASKED(bo->priv_flags, 0,
1496 _DRM_BO_FLAG_UNFENCED);
1499 mutex_unlock(&dev->struct_mutex);
1504 int drm_bo_do_validate(struct drm_buffer_object *bo,
1505 uint64_t flags, uint64_t mask, uint32_t hint,
1506 uint32_t fence_class,
1508 struct drm_bo_info_rep *rep)
1512 mutex_lock(&bo->mutex);
1513 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1518 if ((mask & flags & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
1520 ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
1526 DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1527 ret = drm_bo_new_mask(bo, flags, hint);
1531 ret = drm_buffer_object_validate(bo,
1533 !(hint & DRM_BO_HINT_DONT_FENCE),
1537 drm_bo_fill_rep_arg(bo, rep);
1539 mutex_unlock(&bo->mutex);
1542 EXPORT_SYMBOL(drm_bo_do_validate);
1545 int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
1546 uint32_t fence_class,
1547 uint64_t flags, uint64_t mask, uint32_t hint,
1548 struct drm_bo_info_rep * rep,
1549 struct drm_buffer_object **bo_rep)
1551 struct drm_device *dev = file_priv->head->dev;
1552 struct drm_buffer_object *bo;
1554 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1556 mutex_lock(&dev->struct_mutex);
1557 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1558 mutex_unlock(&dev->struct_mutex);
1564 ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
1570 drm_bo_usage_deref_unlocked(&bo);
1574 EXPORT_SYMBOL(drm_bo_handle_validate);
1577 * Fills out the generic buffer object ioctl reply with the information for
1578 * the BO with id of handle.
1580 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1581 struct drm_bo_info_rep *rep)
1583 struct drm_device *dev = file_priv->head->dev;
1584 struct drm_buffer_object *bo;
1586 mutex_lock(&dev->struct_mutex);
1587 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1588 mutex_unlock(&dev->struct_mutex);
1593 mutex_lock(&bo->mutex);
1594 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1595 (void)drm_bo_busy(bo);
1596 drm_bo_fill_rep_arg(bo, rep);
1597 mutex_unlock(&bo->mutex);
1598 drm_bo_usage_deref_unlocked(&bo);
1602 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1604 struct drm_bo_info_rep *rep)
1606 struct drm_device *dev = file_priv->head->dev;
1607 struct drm_buffer_object *bo;
1608 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1611 mutex_lock(&dev->struct_mutex);
1612 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1613 mutex_unlock(&dev->struct_mutex);
1619 mutex_lock(&bo->mutex);
1620 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1623 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1627 drm_bo_fill_rep_arg(bo, rep);
1630 mutex_unlock(&bo->mutex);
1631 drm_bo_usage_deref_unlocked(&bo);
1635 int drm_buffer_object_create(struct drm_device *dev,
1637 enum drm_bo_type type,
1640 uint32_t page_alignment,
1641 unsigned long buffer_start,
1642 struct drm_buffer_object ** buf_obj)
1644 struct drm_buffer_manager *bm = &dev->bm;
1645 struct drm_buffer_object *bo;
1646 struct drm_bo_driver *driver = dev->driver->bo_driver;
1648 unsigned long num_pages;
1650 if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1651 DRM_ERROR("Invalid buffer object start.\n");
1654 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1655 if (num_pages == 0) {
1656 DRM_ERROR("Illegal buffer object size.\n");
1660 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1665 mutex_init(&bo->mutex);
1666 mutex_lock(&bo->mutex);
1668 atomic_set(&bo->usage, 1);
1669 atomic_set(&bo->mapped, -1);
1670 DRM_INIT_WAITQUEUE(&bo->event_queue);
1671 INIT_LIST_HEAD(&bo->lru);
1672 INIT_LIST_HEAD(&bo->pinned_lru);
1673 INIT_LIST_HEAD(&bo->ddestroy);
1674 #ifdef DRM_ODD_MM_COMPAT
1675 INIT_LIST_HEAD(&bo->p_mm_list);
1676 INIT_LIST_HEAD(&bo->vma_list);
1680 bo->num_pages = num_pages;
1681 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1682 bo->mem.num_pages = bo->num_pages;
1683 bo->mem.mm_node = NULL;
1684 bo->mem.page_alignment = page_alignment;
1685 if (bo->type == drm_bo_type_fake) {
1686 bo->offset = buffer_start;
1687 bo->buffer_start = 0;
1689 bo->buffer_start = buffer_start;
1692 bo->mem.flags = 0ULL;
1693 bo->mem.mask = 0ULL;
1694 atomic_inc(&bm->count);
1695 ret = drm_bo_new_mask(bo, mask, hint);
1700 if (bo->type == drm_bo_type_dc) {
1701 mutex_lock(&dev->struct_mutex);
1702 ret = drm_bo_setup_vm_locked(bo);
1703 mutex_unlock(&dev->struct_mutex);
1708 bo->fence_class = 0;
1709 ret = driver->fence_type(bo, &bo->fence_type);
1711 DRM_ERROR("Driver did not support given buffer permissions\n");
1715 if (bo->type == drm_bo_type_fake) {
1716 ret = drm_bo_check_fake(dev, &bo->mem);
1721 ret = drm_bo_add_ttm(bo);
1725 mutex_lock(&dev->struct_mutex);
1726 drm_bo_add_to_lru(bo);
1727 mutex_unlock(&dev->struct_mutex);
1729 mutex_unlock(&bo->mutex);
1734 mutex_unlock(&bo->mutex);
1736 drm_bo_usage_deref_unlocked(&bo);
1739 EXPORT_SYMBOL(drm_buffer_object_create);
1741 int drm_bo_add_user_object(struct drm_file *file_priv,
1742 struct drm_buffer_object *bo, int shareable)
1744 struct drm_device *dev = file_priv->head->dev;
1747 mutex_lock(&dev->struct_mutex);
1748 ret = drm_add_user_object(file_priv, &bo->base, shareable);
1752 bo->base.remove = drm_bo_base_deref_locked;
1753 bo->base.type = drm_buffer_type;
1754 bo->base.ref_struct_locked = NULL;
1755 bo->base.unref = drm_buffer_user_object_unmap;
1758 mutex_unlock(&dev->struct_mutex);
1761 EXPORT_SYMBOL(drm_bo_add_user_object);
1763 static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv)
1765 LOCK_TEST_WITH_RETURN(dev, file_priv);
1769 int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1771 struct drm_bo_op_arg curarg;
1772 struct drm_bo_op_arg *arg = data;
1773 struct drm_bo_op_req *req = &arg->d.req;
1774 struct drm_bo_info_rep rep;
1775 struct drm_buffer_object *dummy;
1776 unsigned long next = 0;
1777 void __user *curuserarg = NULL;
1780 DRM_DEBUG("drm_bo_op_ioctl\n");
1782 if (!dev->bm.initialized) {
1783 DRM_ERROR("Buffer object manager is not initialized.\n");
1789 curuserarg = (void __user *)next;
1790 if (copy_from_user(&curarg, curuserarg,
1791 sizeof(curarg)) != 0)
1803 case drm_bo_validate:
1804 ret = drm_bo_lock_test(dev, file_priv);
1807 ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
1808 req->bo_req.fence_class,
1816 DRM_ERROR("Function is not implemented yet.\n");
1818 case drm_bo_ref_fence:
1820 DRM_ERROR("Function is not implemented yet.\n");
1828 * A signal interrupted us. Make sure the ioctl is restartable.
1835 arg->d.rep.ret = ret;
1836 arg->d.rep.bo_info = rep;
1838 if (copy_to_user(curuserarg, &curarg,
1839 sizeof(curarg)) != 0)
1842 } while (next != 0);
1846 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1848 struct drm_bo_create_arg *arg = data;
1849 struct drm_bo_create_req *req = &arg->d.req;
1850 struct drm_bo_info_rep *rep = &arg->d.rep;
1851 struct drm_buffer_object *entry;
1854 DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align, %d type\n",
1855 (int)(req->size / 1024), req->page_alignment * 4, req->type);
1857 if (!dev->bm.initialized) {
1858 DRM_ERROR("Buffer object manager is not initialized.\n");
1861 if (req->type == drm_bo_type_fake)
1862 LOCK_TEST_WITH_RETURN(dev, file_priv);
1864 ret = drm_buffer_object_create(file_priv->head->dev,
1865 req->size, req->type, req->mask,
1866 req->hint, req->page_alignment,
1867 req->buffer_start, &entry);
1871 ret = drm_bo_add_user_object(file_priv, entry,
1872 req->mask & DRM_BO_FLAG_SHAREABLE);
1874 drm_bo_usage_deref_unlocked(&entry);
1878 mutex_lock(&entry->mutex);
1879 drm_bo_fill_rep_arg(entry, rep);
1880 mutex_unlock(&entry->mutex);
1887 int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1889 struct drm_bo_handle_arg *arg = data;
1890 struct drm_user_object *uo;
1893 DRM_DEBUG("drm_bo_destroy_ioctl: buffer %d\n", arg->handle);
1895 if (!dev->bm.initialized) {
1896 DRM_ERROR("Buffer object manager is not initialized.\n");
1900 mutex_lock(&dev->struct_mutex);
1901 uo = drm_lookup_user_object(file_priv, arg->handle);
1902 if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) {
1903 mutex_unlock(&dev->struct_mutex);
1906 ret = drm_remove_user_object(file_priv, uo);
1907 mutex_unlock(&dev->struct_mutex);
1912 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1914 struct drm_bo_map_wait_idle_arg *arg = data;
1915 struct drm_bo_info_req *req = &arg->d.req;
1916 struct drm_bo_info_rep *rep = &arg->d.rep;
1919 DRM_DEBUG("drm_bo_map_ioctl: buffer %d\n", req->handle);
1921 if (!dev->bm.initialized) {
1922 DRM_ERROR("Buffer object manager is not initialized.\n");
1926 ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1934 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1936 struct drm_bo_handle_arg *arg = data;
1939 DRM_DEBUG("drm_bo_unmap_ioctl: buffer %d\n", arg->handle);
1941 if (!dev->bm.initialized) {
1942 DRM_ERROR("Buffer object manager is not initialized.\n");
1946 ret = drm_buffer_object_unmap(file_priv, arg->handle);
1951 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1953 struct drm_bo_reference_info_arg *arg = data;
1954 struct drm_bo_handle_arg *req = &arg->d.req;
1955 struct drm_bo_info_rep *rep = &arg->d.rep;
1956 struct drm_user_object *uo;
1959 DRM_DEBUG("drm_bo_reference_ioctl: buffer %d\n", req->handle);
1961 if (!dev->bm.initialized) {
1962 DRM_ERROR("Buffer object manager is not initialized.\n");
1966 ret = drm_user_object_ref(file_priv, req->handle,
1967 drm_buffer_type, &uo);
1971 ret = drm_bo_handle_info(file_priv, req->handle, rep);
1978 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1980 struct drm_bo_handle_arg *arg = data;
1983 DRM_DEBUG("drm_bo_unreference_ioctl: buffer %d\n", arg->handle);
1985 if (!dev->bm.initialized) {
1986 DRM_ERROR("Buffer object manager is not initialized.\n");
1990 ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
1994 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1996 struct drm_bo_reference_info_arg *arg = data;
1997 struct drm_bo_handle_arg *req = &arg->d.req;
1998 struct drm_bo_info_rep *rep = &arg->d.rep;
2001 DRM_DEBUG("drm_bo_info_ioctl: buffer %d\n", req->handle);
2003 if (!dev->bm.initialized) {
2004 DRM_ERROR("Buffer object manager is not initialized.\n");
2008 ret = drm_bo_handle_info(file_priv, req->handle, rep);
2015 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2017 struct drm_bo_map_wait_idle_arg *arg = data;
2018 struct drm_bo_info_req *req = &arg->d.req;
2019 struct drm_bo_info_rep *rep = &arg->d.rep;
2022 DRM_DEBUG("drm_bo_wait_idle_ioctl: buffer %d\n", req->handle);
2024 if (!dev->bm.initialized) {
2025 DRM_ERROR("Buffer object manager is not initialized.\n");
2029 ret = drm_bo_handle_wait(file_priv, req->handle,
2038 * Pins or unpins the given buffer object in the given memory area.
2040 * Pinned buffers will not be evicted from or move within their memory area.
2041 * Must be called with the hardware lock held for pinning.
2044 drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo,
2049 mutex_lock(&bo->mutex);
2050 if (bo->pinned == pin) {
2051 mutex_unlock(&bo->mutex);
2056 ret = drm_bo_wait_unfenced(bo, 0, 0);
2058 mutex_unlock(&bo->mutex);
2062 /* Validate the buffer into its pinned location, with no
2065 ret = drm_buffer_object_validate(bo, 0, 0, 0);
2067 mutex_unlock(&bo->mutex);
2071 /* Pull the buffer off of the LRU and add it to the pinned
2074 bo->pinned_mem_type = bo->mem.mem_type;
2075 mutex_lock(&dev->struct_mutex);
2076 list_del_init(&bo->lru);
2077 list_del_init(&bo->pinned_lru);
2078 drm_bo_add_to_pinned_lru(bo);
2080 if (bo->pinned_node != bo->mem.mm_node) {
2081 if (bo->pinned_node != NULL)
2082 drm_mm_put_block(bo->pinned_node);
2083 bo->pinned_node = bo->mem.mm_node;
2087 mutex_unlock(&dev->struct_mutex);
2090 mutex_lock(&dev->struct_mutex);
2092 /* Remove our buffer from the pinned list */
2093 if (bo->pinned_node != bo->mem.mm_node)
2094 drm_mm_put_block(bo->pinned_node);
2096 list_del_init(&bo->pinned_lru);
2097 bo->pinned_node = NULL;
2099 mutex_unlock(&dev->struct_mutex);
2101 mutex_unlock(&bo->mutex);
2105 int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data,
2106 struct drm_file *file_priv)
2108 struct drm_bo_set_pin_arg *arg = data;
2109 struct drm_bo_set_pin_req *req = &arg->d.req;
2110 struct drm_bo_info_rep *rep = &arg->d.rep;
2111 struct drm_buffer_object *bo;
2114 DRM_DEBUG("drm_bo_set_pin_ioctl: buffer %d, pin %d\n",
2115 req->handle, req->pin);
2117 if (!dev->bm.initialized) {
2118 DRM_ERROR("Buffer object manager is not initialized.\n");
2122 if (req->pin < 0 || req->pin > 1) {
2123 DRM_ERROR("Bad arguments to set_pin\n");
2128 LOCK_TEST_WITH_RETURN(dev, file_priv);
2130 mutex_lock(&dev->struct_mutex);
2131 bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
2132 mutex_unlock(&dev->struct_mutex);
2137 ret = drm_bo_set_pin(dev, bo, req->pin);
2139 drm_bo_usage_deref_unlocked(&bo);
2143 drm_bo_fill_rep_arg(bo, rep);
2144 drm_bo_usage_deref_unlocked(&bo);
2151 *Clean the unfenced list and put on regular LRU.
2152 *This is part of the memory manager cleanup and should only be
2153 *called with the DRI lock held.
2154 *Call dev->struct_sem locked.
2157 static void drm_bo_clean_unfenced(struct drm_device *dev)
2159 struct drm_buffer_manager *bm = &dev->bm;
2160 struct list_head *head, *list;
2161 struct drm_buffer_object *entry;
2162 struct drm_fence_object *fence;
2164 head = &bm->unfenced;
2166 if (list_empty(head))
2169 DRM_ERROR("Clean unfenced\n");
2171 if (drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence)) {
2174 * Fixme: Should really wait here.
2179 drm_fence_usage_deref_locked(&fence);
2181 if (list_empty(head))
2184 DRM_ERROR("Really clean unfenced\n");
2187 while(list != head) {
2188 prefetch(list->next);
2189 entry = list_entry(list, struct drm_buffer_object, lru);
2191 atomic_inc(&entry->usage);
2192 mutex_unlock(&dev->struct_mutex);
2193 mutex_lock(&entry->mutex);
2194 mutex_lock(&dev->struct_mutex);
2196 list_del(&entry->lru);
2197 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
2198 drm_bo_add_to_lru(entry);
2199 mutex_unlock(&entry->mutex);
2204 static int drm_bo_leave_list(struct drm_buffer_object * bo,
2206 int free_pinned, int allow_errors)
2208 struct drm_device *dev = bo->dev;
2211 mutex_lock(&bo->mutex);
2213 ret = drm_bo_expire_fence(bo, allow_errors);
2218 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2219 mutex_lock(&dev->struct_mutex);
2220 list_del_init(&bo->pinned_lru);
2221 if (bo->pinned_node == bo->mem.mm_node)
2222 bo->pinned_node = NULL;
2223 if (bo->pinned_node != NULL) {
2224 drm_mm_put_block(bo->pinned_node);
2225 bo->pinned_node = NULL;
2227 mutex_unlock(&dev->struct_mutex);
2231 DRM_ERROR("A pinned buffer was present at "
2232 "cleanup. Removing flag and evicting.\n");
2236 if (bo->mem.mem_type == mem_type)
2237 ret = drm_bo_evict(bo, mem_type, 0);
2244 DRM_ERROR("Cleanup eviction failed\n");
2249 mutex_unlock(&bo->mutex);
2254 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2258 return list_entry(list, struct drm_buffer_object, pinned_lru);
2260 return list_entry(list, struct drm_buffer_object, lru);
2264 * dev->struct_mutex locked.
2267 static int drm_bo_force_list_clean(struct drm_device * dev,
2268 struct list_head *head,
2274 struct list_head *list, *next, *prev;
2275 struct drm_buffer_object *entry, *nentry;
2280 * The list traversal is a bit odd here, because an item may
2281 * disappear from the list when we release the struct_mutex or
2282 * when we decrease the usage count. Also we're not guaranteed
2283 * to drain pinned lists, so we can't always restart.
2288 list_for_each_safe(list, next, head) {
2291 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2292 atomic_inc(&entry->usage);
2294 atomic_dec(&nentry->usage);
2299 * Protect the next item from destruction, so we can check
2300 * its list pointers later on.
2304 nentry = drm_bo_entry(next, pinned_list);
2305 atomic_inc(&nentry->usage);
2307 mutex_unlock(&dev->struct_mutex);
2309 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2311 mutex_lock(&dev->struct_mutex);
2313 drm_bo_usage_deref_locked(&entry);
2318 * Has the next item disappeared from the list?
2321 do_restart = ((next->prev != list) && (next->prev != prev));
2323 if (nentry != NULL && do_restart)
2324 drm_bo_usage_deref_locked(&nentry);
2332 int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
2334 struct drm_buffer_manager *bm = &dev->bm;
2335 struct drm_mem_type_manager *man = &bm->man[mem_type];
2338 if (mem_type >= DRM_BO_MEM_TYPES) {
2339 DRM_ERROR("Illegal memory type %d\n", mem_type);
2343 if (!man->has_type) {
2344 DRM_ERROR("Trying to take down uninitialized "
2345 "memory manager type %u\n", mem_type);
2354 drm_bo_clean_unfenced(dev);
2355 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2356 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2358 if (drm_mm_clean(&man->manager)) {
2359 drm_mm_takedown(&man->manager);
2367 EXPORT_SYMBOL(drm_bo_clean_mm);
2370 *Evict all buffers of a particular mem_type, but leave memory manager
2371 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2372 *point since we have the hardware lock.
2375 static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
2378 struct drm_buffer_manager *bm = &dev->bm;
2379 struct drm_mem_type_manager *man = &bm->man[mem_type];
2381 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2382 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2386 if (!man->has_type) {
2387 DRM_ERROR("Memory type %u has not been initialized.\n",
2392 drm_bo_clean_unfenced(dev);
2393 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2396 ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2401 int drm_bo_init_mm(struct drm_device * dev,
2403 unsigned long p_offset, unsigned long p_size)
2405 struct drm_buffer_manager *bm = &dev->bm;
2407 struct drm_mem_type_manager *man;
2409 if (type >= DRM_BO_MEM_TYPES) {
2410 DRM_ERROR("Illegal memory type %d\n", type);
2414 man = &bm->man[type];
2415 if (man->has_type) {
2416 DRM_ERROR("Memory manager already initialized for type %d\n",
2421 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2426 if (type != DRM_BO_MEM_LOCAL) {
2428 DRM_ERROR("Zero size memory manager type %d\n", type);
2431 ret = drm_mm_init(&man->manager, p_offset, p_size);
2438 INIT_LIST_HEAD(&man->lru);
2439 INIT_LIST_HEAD(&man->pinned);
2443 EXPORT_SYMBOL(drm_bo_init_mm);
2446 * This is called from lastclose, so we don't need to bother about
2447 * any clients still running when we set the initialized flag to zero.
2450 int drm_bo_driver_finish(struct drm_device * dev)
2452 struct drm_buffer_manager *bm = &dev->bm;
2454 unsigned i = DRM_BO_MEM_TYPES;
2455 struct drm_mem_type_manager *man;
2457 mutex_lock(&dev->bm.init_mutex);
2458 mutex_lock(&dev->struct_mutex);
2460 if (!bm->initialized)
2462 bm->initialized = 0;
2466 if (man->has_type) {
2468 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2470 DRM_ERROR("DRM memory manager type %d "
2471 "is not clean.\n", i);
2476 mutex_unlock(&dev->struct_mutex);
2478 if (!cancel_delayed_work(&bm->wq)) {
2479 flush_scheduled_work();
2481 mutex_lock(&dev->struct_mutex);
2482 drm_bo_delayed_delete(dev, 1);
2483 if (list_empty(&bm->ddestroy)) {
2484 DRM_DEBUG("Delayed destroy list was clean\n");
2486 if (list_empty(&bm->man[0].lru)) {
2487 DRM_DEBUG("Swap list was clean\n");
2489 if (list_empty(&bm->man[0].pinned)) {
2490 DRM_DEBUG("NO_MOVE list was clean\n");
2492 if (list_empty(&bm->unfenced)) {
2493 DRM_DEBUG("Unfenced list was clean\n");
2496 mutex_unlock(&dev->struct_mutex);
2497 mutex_unlock(&dev->bm.init_mutex);
2501 int drm_bo_driver_init(struct drm_device * dev)
2503 struct drm_bo_driver *driver = dev->driver->bo_driver;
2504 struct drm_buffer_manager *bm = &dev->bm;
2507 mutex_lock(&dev->bm.init_mutex);
2508 mutex_lock(&dev->struct_mutex);
2513 * Initialize the system memory buffer type.
2514 * Other types need to be driver / IOCTL initialized.
2516 ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
2520 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2521 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2523 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2525 bm->initialized = 1;
2527 atomic_set(&bm->count, 0);
2529 INIT_LIST_HEAD(&bm->unfenced);
2530 INIT_LIST_HEAD(&bm->ddestroy);
2532 mutex_unlock(&dev->struct_mutex);
2533 mutex_unlock(&dev->bm.init_mutex);
2537 EXPORT_SYMBOL(drm_bo_driver_init);
2539 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2541 struct drm_mm_init_arg *arg = data;
2542 struct drm_buffer_manager *bm = &dev->bm;
2543 struct drm_bo_driver *driver = dev->driver->bo_driver;
2546 DRM_DEBUG("drm_mm_init_ioctl: type %d, 0x%08llx offset, %dkb\n",
2547 arg->mem_type, arg->p_offset * PAGE_SIZE, (int)(arg->p_size * 4));
2550 DRM_ERROR("Buffer objects are not supported by this driver\n");
2555 if (arg->magic != DRM_BO_INIT_MAGIC) {
2556 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2557 "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2560 if (arg->major != DRM_BO_INIT_MAJOR) {
2561 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2562 "\tversion don't match. Got %d, expected %d,\n",
2563 arg->major, DRM_BO_INIT_MAJOR);
2566 if (arg->minor > DRM_BO_INIT_MINOR) {
2567 DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n"
2568 "\tlibdrm buffer object interface version is %d.%d.\n"
2569 "\tkernel DRM buffer object interface version is %d.%d\n",
2570 arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR);
2574 mutex_lock(&dev->bm.init_mutex);
2575 mutex_lock(&dev->struct_mutex);
2576 if (!bm->initialized) {
2577 DRM_ERROR("DRM memory manager was not initialized.\n");
2580 if (arg->mem_type == 0) {
2581 DRM_ERROR("System memory buffers already initialized.\n");
2584 ret = drm_bo_init_mm(dev, arg->mem_type,
2585 arg->p_offset, arg->p_size);
2588 mutex_unlock(&dev->struct_mutex);
2589 mutex_unlock(&dev->bm.init_mutex);
2596 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2598 struct drm_mm_type_arg *arg = data;
2599 struct drm_buffer_manager *bm = &dev->bm;
2600 struct drm_bo_driver *driver = dev->driver->bo_driver;
2603 DRM_DEBUG("drm_mm_takedown_ioctl: %d type\n", arg->mem_type);
2606 DRM_ERROR("Buffer objects are not supported by this driver\n");
2610 LOCK_TEST_WITH_RETURN(dev, file_priv);
2611 mutex_lock(&dev->bm.init_mutex);
2612 mutex_lock(&dev->struct_mutex);
2614 if (!bm->initialized) {
2615 DRM_ERROR("DRM memory manager was not initialized\n");
2618 if (arg->mem_type == 0) {
2619 DRM_ERROR("No takedown for System memory buffers.\n");
2623 if (drm_bo_clean_mm(dev, arg->mem_type)) {
2624 DRM_ERROR("Memory manager type %d not clean. "
2625 "Delaying takedown\n", arg->mem_type);
2628 mutex_unlock(&dev->struct_mutex);
2629 mutex_unlock(&dev->bm.init_mutex);
2636 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2638 struct drm_mm_type_arg *arg = data;
2639 struct drm_bo_driver *driver = dev->driver->bo_driver;
2642 DRM_DEBUG("drm_mm_lock_ioctl: %d type\n", arg->mem_type);
2645 DRM_ERROR("Buffer objects are not supported by this driver\n");
2649 LOCK_TEST_WITH_RETURN(dev, file_priv);
2650 mutex_lock(&dev->bm.init_mutex);
2651 mutex_lock(&dev->struct_mutex);
2652 ret = drm_bo_lock_mm(dev, arg->mem_type);
2653 mutex_unlock(&dev->struct_mutex);
2654 mutex_unlock(&dev->bm.init_mutex);
2661 int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2663 struct drm_bo_driver *driver = dev->driver->bo_driver;
2666 DRM_DEBUG("drm_mm_unlock_ioctl\n");
2669 DRM_ERROR("Buffer objects are not supported by this driver\n");
2673 LOCK_TEST_WITH_RETURN(dev, file_priv);
2674 mutex_lock(&dev->bm.init_mutex);
2675 mutex_lock(&dev->struct_mutex);
2678 mutex_unlock(&dev->struct_mutex);
2679 mutex_unlock(&dev->bm.init_mutex);
2687 * buffer object vm functions.
2690 int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
2692 struct drm_buffer_manager *bm = &dev->bm;
2693 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2695 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2696 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2699 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2702 if (mem->flags & DRM_BO_FLAG_CACHED)
2708 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2711 * \c Get the PCI offset for the buffer object memory.
2713 * \param bo The buffer object.
2714 * \param bus_base On return the base of the PCI region
2715 * \param bus_offset On return the byte offset into the PCI region
2716 * \param bus_size On return the byte size of the buffer object or zero if
2717 * the buffer object memory is not accessible through a PCI region.
2718 * \return Failure indication.
2720 * Returns -EINVAL if the buffer object is currently not mappable.
2721 * Otherwise returns zero.
2724 int drm_bo_pci_offset(struct drm_device *dev,
2725 struct drm_bo_mem_reg *mem,
2726 unsigned long *bus_base,
2727 unsigned long *bus_offset, unsigned long *bus_size)
2729 struct drm_buffer_manager *bm = &dev->bm;
2730 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2733 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2736 if (drm_mem_reg_is_pci(dev, mem)) {
2737 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2738 *bus_size = mem->num_pages << PAGE_SHIFT;
2739 *bus_base = man->io_offset;
2746 * \c Kill all user-space virtual mappings of this buffer object.
2748 * \param bo The buffer object.
2750 * Call bo->mutex locked.
2753 void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
2755 struct drm_device *dev = bo->dev;
2756 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2757 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2759 if (!dev->dev_mapping)
2762 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2765 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
2767 struct drm_map_list *list = &bo->map_list;
2768 drm_local_map_t *map;
2769 struct drm_device *dev = bo->dev;
2771 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2772 if (list->user_token) {
2773 drm_ht_remove_item(&dev->map_hash, &list->hash);
2774 list->user_token = 0;
2776 if (list->file_offset_node) {
2777 drm_mm_put_block(list->file_offset_node);
2778 list->file_offset_node = NULL;
2785 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2787 list->user_token = 0ULL;
2788 drm_bo_usage_deref_locked(&bo);
2791 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
2793 struct drm_map_list *list = &bo->map_list;
2794 drm_local_map_t *map;
2795 struct drm_device *dev = bo->dev;
2797 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2798 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2804 map->type = _DRM_TTM;
2805 map->flags = _DRM_REMOVABLE;
2806 map->size = bo->mem.num_pages * PAGE_SIZE;
2807 atomic_inc(&bo->usage);
2808 map->handle = (void *)bo;
2810 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2811 bo->mem.num_pages, 0, 0);
2813 if (!list->file_offset_node) {
2814 drm_bo_takedown_vm_locked(bo);
2818 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2819 bo->mem.num_pages, 0);
2821 list->hash.key = list->file_offset_node->start;
2822 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2823 drm_bo_takedown_vm_locked(bo);
2827 list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;