1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads,
40 * Hash tables and hash heads.
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those,
44 * we need both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
47 * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
48 * the list traversal will, in general, need to be restarted.
52 static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
54 static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
56 static inline uint64_t drm_bo_type_flags(unsigned type)
58 return (1ULL << (24 + type));
62 * bo locked. dev->struct_mutex locked.
65 void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
67 struct drm_mem_type_manager *man;
69 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
70 DRM_ASSERT_LOCKED(&bo->mutex);
72 man = &bo->dev->bm.man[bo->pinned_mem_type];
73 list_add_tail(&bo->pinned_lru, &man->pinned);
76 void drm_bo_add_to_lru(struct drm_buffer_object *bo)
78 struct drm_mem_type_manager *man;
80 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
82 if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
83 || bo->mem.mem_type != bo->pinned_mem_type) {
84 man = &bo->dev->bm.man[bo->mem.mem_type];
85 list_add_tail(&bo->lru, &man->lru);
87 INIT_LIST_HEAD(&bo->lru);
91 static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
93 #ifdef DRM_ODD_MM_COMPAT
96 if (!bo->map_list.map)
99 ret = drm_bo_lock_kmm(bo);
102 drm_bo_unmap_virtual(bo);
104 drm_bo_finish_unmap(bo);
106 if (!bo->map_list.map)
109 drm_bo_unmap_virtual(bo);
114 static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
116 #ifdef DRM_ODD_MM_COMPAT
119 if (!bo->map_list.map)
122 ret = drm_bo_remap_bound(bo);
124 DRM_ERROR("Failed to remap a bound buffer object.\n"
125 "\tThis might cause a sigbus later.\n");
127 drm_bo_unlock_kmm(bo);
132 * Call bo->mutex locked.
135 static int drm_bo_add_ttm(struct drm_buffer_object *bo)
137 struct drm_device *dev = bo->dev;
139 uint32_t page_flags = 0;
141 DRM_ASSERT_LOCKED(&bo->mutex);
144 if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
145 page_flags |= DRM_TTM_PAGE_WRITE;
148 case drm_bo_type_device:
149 case drm_bo_type_kernel:
150 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
151 page_flags, dev->bm.dummy_read_page);
155 case drm_bo_type_user:
156 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
157 page_flags | DRM_TTM_PAGE_USER,
158 dev->bm.dummy_read_page);
162 ret = drm_ttm_set_user(bo->ttm, current,
170 DRM_ERROR("Illegal buffer object type\n");
178 static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
179 struct drm_bo_mem_reg *mem,
180 int evict, int no_wait)
182 struct drm_device *dev = bo->dev;
183 struct drm_buffer_manager *bm = &dev->bm;
184 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
185 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
186 struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
187 struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
190 if (old_is_pci || new_is_pci ||
191 ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
192 ret = drm_bo_vm_pre_move(bo, old_is_pci);
197 * Create and bind a ttm if required.
200 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
201 ret = drm_bo_add_ttm(bo);
205 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
206 ret = drm_ttm_bind(bo->ttm, mem);
211 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
213 struct drm_bo_mem_reg *old_mem = &bo->mem;
214 uint64_t save_flags = old_mem->flags;
215 uint64_t save_proposed_flags = old_mem->proposed_flags;
219 old_mem->proposed_flags = save_proposed_flags;
220 DRM_FLAG_MASKED(save_flags, mem->flags,
221 DRM_BO_MASK_MEMTYPE);
227 if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
228 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED))
229 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
230 else if (dev->driver->bo_driver->move)
231 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
233 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
239 if (old_is_pci || new_is_pci)
240 drm_bo_vm_post_move(bo);
242 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
244 dev->driver->bo_driver->invalidate_caches(dev,
247 DRM_ERROR("Can not flush read caches\n");
250 DRM_FLAG_MASKED(bo->priv_flags,
251 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
252 _DRM_BO_FLAG_EVICTED);
255 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
256 bm->man[bo->mem.mem_type].gpu_offset;
262 if (old_is_pci || new_is_pci)
263 drm_bo_vm_post_move(bo);
265 new_man = &bm->man[bo->mem.mem_type];
266 if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
267 drm_ttm_unbind(bo->ttm);
268 drm_ttm_destroy(bo->ttm);
276 * Call bo->mutex locked.
277 * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
280 static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced)
282 struct drm_fence_object *fence = bo->fence;
284 if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
288 if (drm_fence_object_signaled(fence, bo->fence_type)) {
289 drm_fence_usage_deref_unlocked(&bo->fence);
292 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
293 if (drm_fence_object_signaled(fence, bo->fence_type)) {
294 drm_fence_usage_deref_unlocked(&bo->fence);
302 static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
306 mutex_lock(&bo->mutex);
307 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
308 mutex_unlock(&bo->mutex);
314 * Call bo->mutex locked.
315 * Wait until the buffer is idle.
318 int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
319 int no_wait, int check_unfenced)
323 DRM_ASSERT_LOCKED(&bo->mutex);
324 while(unlikely(drm_bo_busy(bo, check_unfenced))) {
328 if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) {
329 mutex_unlock(&bo->mutex);
330 wait_event(bo->event_queue, !drm_bo_check_unfenced(bo));
331 mutex_lock(&bo->mutex);
332 bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
336 struct drm_fence_object *fence;
337 uint32_t fence_type = bo->fence_type;
339 drm_fence_reference_unlocked(&fence, bo->fence);
340 mutex_unlock(&bo->mutex);
342 ret = drm_fence_object_wait(fence, lazy, !interruptible,
345 drm_fence_usage_deref_unlocked(&fence);
346 mutex_lock(&bo->mutex);
347 bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
355 EXPORT_SYMBOL(drm_bo_wait);
357 static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
359 struct drm_device *dev = bo->dev;
360 struct drm_buffer_manager *bm = &dev->bm;
364 unsigned long _end = jiffies + 3 * DRM_HZ;
367 ret = drm_bo_wait(bo, 0, 0, 0, 0);
368 if (ret && allow_errors)
371 } while (ret && !time_after_eq(jiffies, _end));
375 DRM_ERROR("Detected GPU lockup or "
376 "fence driver was taken down. "
377 "Evicting buffer.\n");
381 drm_fence_usage_deref_unlocked(&bo->fence);
387 * Call dev->struct_mutex locked.
388 * Attempts to remove all private references to a buffer by expiring its
389 * fence object and removing from lru lists and memory managers.
392 static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
394 struct drm_device *dev = bo->dev;
395 struct drm_buffer_manager *bm = &dev->bm;
397 DRM_ASSERT_LOCKED(&dev->struct_mutex);
399 atomic_inc(&bo->usage);
400 mutex_unlock(&dev->struct_mutex);
401 mutex_lock(&bo->mutex);
403 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
405 if (bo->fence && drm_fence_object_signaled(bo->fence,
407 drm_fence_usage_deref_unlocked(&bo->fence);
409 if (bo->fence && remove_all)
410 (void)drm_bo_expire_fence(bo, 0);
412 mutex_lock(&dev->struct_mutex);
414 if (!atomic_dec_and_test(&bo->usage))
418 list_del_init(&bo->lru);
419 if (bo->mem.mm_node) {
420 drm_mm_put_block(bo->mem.mm_node);
421 if (bo->pinned_node == bo->mem.mm_node)
422 bo->pinned_node = NULL;
423 bo->mem.mm_node = NULL;
425 list_del_init(&bo->pinned_lru);
426 if (bo->pinned_node) {
427 drm_mm_put_block(bo->pinned_node);
428 bo->pinned_node = NULL;
430 list_del_init(&bo->ddestroy);
431 mutex_unlock(&bo->mutex);
432 drm_bo_destroy_locked(bo);
436 if (list_empty(&bo->ddestroy)) {
437 drm_fence_object_flush(bo->fence, bo->fence_type);
438 list_add_tail(&bo->ddestroy, &bm->ddestroy);
439 schedule_delayed_work(&bm->wq,
440 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
444 mutex_unlock(&bo->mutex);
449 * Verify that refcount is 0 and that there are no internal references
450 * to the buffer object. Then destroy it.
453 static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
455 struct drm_device *dev = bo->dev;
456 struct drm_buffer_manager *bm = &dev->bm;
458 DRM_ASSERT_LOCKED(&dev->struct_mutex);
460 DRM_DEBUG("freeing %p\n", bo);
461 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
462 list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
463 list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
464 if (bo->fence != NULL) {
465 DRM_ERROR("Fence was non-zero.\n");
466 drm_bo_cleanup_refs(bo, 0);
470 #ifdef DRM_ODD_MM_COMPAT
471 BUG_ON(!list_empty(&bo->vma_list));
472 BUG_ON(!list_empty(&bo->p_mm_list));
476 drm_ttm_unbind(bo->ttm);
477 drm_ttm_destroy(bo->ttm);
481 atomic_dec(&bm->count);
483 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
489 * Some stuff is still trying to reference the buffer object.
490 * Get rid of those references.
493 drm_bo_cleanup_refs(bo, 0);
499 * Call dev->struct_mutex locked.
502 static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
504 struct drm_buffer_manager *bm = &dev->bm;
506 struct drm_buffer_object *entry, *nentry;
507 struct list_head *list, *next;
509 list_for_each_safe(list, next, &bm->ddestroy) {
510 entry = list_entry(list, struct drm_buffer_object, ddestroy);
513 if (next != &bm->ddestroy) {
514 nentry = list_entry(next, struct drm_buffer_object,
516 atomic_inc(&nentry->usage);
519 drm_bo_cleanup_refs(entry, remove_all);
522 atomic_dec(&nentry->usage);
526 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
527 static void drm_bo_delayed_workqueue(void *data)
529 static void drm_bo_delayed_workqueue(struct work_struct *work)
532 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
533 struct drm_device *dev = (struct drm_device *) data;
534 struct drm_buffer_manager *bm = &dev->bm;
536 struct drm_buffer_manager *bm =
537 container_of(work, struct drm_buffer_manager, wq.work);
538 struct drm_device *dev = container_of(bm, struct drm_device, bm);
541 DRM_DEBUG("Delayed delete Worker\n");
543 mutex_lock(&dev->struct_mutex);
544 if (!bm->initialized) {
545 mutex_unlock(&dev->struct_mutex);
548 drm_bo_delayed_delete(dev, 0);
549 if (bm->initialized && !list_empty(&bm->ddestroy)) {
550 schedule_delayed_work(&bm->wq,
551 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
553 mutex_unlock(&dev->struct_mutex);
556 void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
558 struct drm_buffer_object *tmp_bo = *bo;
561 DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
563 if (atomic_dec_and_test(&tmp_bo->usage))
564 drm_bo_destroy_locked(tmp_bo);
566 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
568 void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
570 struct drm_buffer_object *tmp_bo = *bo;
571 struct drm_device *dev = tmp_bo->dev;
574 if (atomic_dec_and_test(&tmp_bo->usage)) {
575 mutex_lock(&dev->struct_mutex);
576 if (atomic_read(&tmp_bo->usage) == 0)
577 drm_bo_destroy_locked(tmp_bo);
578 mutex_unlock(&dev->struct_mutex);
581 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
583 void drm_putback_buffer_objects(struct drm_device *dev)
585 struct drm_buffer_manager *bm = &dev->bm;
586 struct list_head *list = &bm->unfenced;
587 struct drm_buffer_object *entry, *next;
589 mutex_lock(&dev->struct_mutex);
590 list_for_each_entry_safe(entry, next, list, lru) {
591 atomic_inc(&entry->usage);
592 mutex_unlock(&dev->struct_mutex);
594 mutex_lock(&entry->mutex);
595 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
596 mutex_lock(&dev->struct_mutex);
598 list_del_init(&entry->lru);
599 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
600 wake_up_all(&entry->event_queue);
603 * FIXME: Might want to put back on head of list
604 * instead of tail here.
607 drm_bo_add_to_lru(entry);
608 mutex_unlock(&entry->mutex);
609 drm_bo_usage_deref_locked(&entry);
611 mutex_unlock(&dev->struct_mutex);
613 EXPORT_SYMBOL(drm_putback_buffer_objects);
616 * Note. The caller has to register (if applicable)
617 * and deregister fence object usage.
620 int drm_fence_buffer_objects(struct drm_device *dev,
621 struct list_head *list,
622 uint32_t fence_flags,
623 struct drm_fence_object *fence,
624 struct drm_fence_object **used_fence)
626 struct drm_buffer_manager *bm = &dev->bm;
627 struct drm_buffer_object *entry;
628 uint32_t fence_type = 0;
629 uint32_t fence_class = ~0;
634 mutex_lock(&dev->struct_mutex);
637 list = &bm->unfenced;
640 fence_class = fence->fence_class;
642 list_for_each_entry(entry, list, lru) {
643 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
644 fence_type |= entry->new_fence_type;
645 if (fence_class == ~0)
646 fence_class = entry->new_fence_class;
647 else if (entry->new_fence_class != fence_class) {
648 DRM_ERROR("Unmatching fence classes on unfenced list: "
651 entry->new_fence_class);
664 if ((fence_type & fence->type) != fence_type ||
665 (fence->fence_class != fence_class)) {
666 DRM_ERROR("Given fence doesn't match buffers "
667 "on unfenced list.\n");
672 mutex_unlock(&dev->struct_mutex);
673 ret = drm_fence_object_create(dev, fence_class, fence_type,
674 fence_flags | DRM_FENCE_FLAG_EMIT,
676 mutex_lock(&dev->struct_mutex);
685 entry = list_entry(l, struct drm_buffer_object, lru);
686 atomic_inc(&entry->usage);
687 mutex_unlock(&dev->struct_mutex);
688 mutex_lock(&entry->mutex);
689 mutex_lock(&dev->struct_mutex);
691 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
694 drm_fence_usage_deref_locked(&entry->fence);
695 entry->fence = drm_fence_reference_locked(fence);
696 entry->fence_class = entry->new_fence_class;
697 entry->fence_type = entry->new_fence_type;
698 DRM_FLAG_MASKED(entry->priv_flags, 0,
699 _DRM_BO_FLAG_UNFENCED);
700 wake_up_all(&entry->event_queue);
701 drm_bo_add_to_lru(entry);
703 mutex_unlock(&entry->mutex);
704 drm_bo_usage_deref_locked(&entry);
707 DRM_DEBUG("Fenced %d buffers\n", count);
709 mutex_unlock(&dev->struct_mutex);
713 EXPORT_SYMBOL(drm_fence_buffer_objects);
719 static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
723 struct drm_device *dev = bo->dev;
724 struct drm_bo_mem_reg evict_mem;
727 * Someone might have modified the buffer before we took the
732 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
734 if (unlikely(bo->mem.flags &
735 (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)))
737 if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
739 if (unlikely(bo->mem.mem_type != mem_type))
741 ret = drm_bo_wait(bo, 0, 1, no_wait, 0);
745 } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
748 evict_mem.mm_node = NULL;
751 evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
753 mutex_lock(&dev->struct_mutex);
754 list_del_init(&bo->lru);
755 mutex_unlock(&dev->struct_mutex);
757 ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
761 DRM_ERROR("Failed to find memory space for "
762 "buffer 0x%p eviction.\n", bo);
766 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
770 DRM_ERROR("Buffer eviction failed\n");
774 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
775 _DRM_BO_FLAG_EVICTED);
778 mutex_lock(&dev->struct_mutex);
779 if (evict_mem.mm_node) {
780 if (evict_mem.mm_node != bo->pinned_node)
781 drm_mm_put_block(evict_mem.mm_node);
782 evict_mem.mm_node = NULL;
784 drm_bo_add_to_lru(bo);
785 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
787 mutex_unlock(&dev->struct_mutex);
793 * Repeatedly evict memory from the LRU for @mem_type until we create enough
794 * space, or we've evicted everything and there isn't enough space.
796 static int drm_bo_mem_force_space(struct drm_device *dev,
797 struct drm_bo_mem_reg *mem,
798 uint32_t mem_type, int no_wait)
800 struct drm_mm_node *node;
801 struct drm_buffer_manager *bm = &dev->bm;
802 struct drm_buffer_object *entry;
803 struct drm_mem_type_manager *man = &bm->man[mem_type];
804 struct list_head *lru;
805 unsigned long num_pages = mem->num_pages;
808 mutex_lock(&dev->struct_mutex);
810 node = drm_mm_search_free(&man->manager, num_pages,
811 mem->page_alignment, 1);
816 if (lru->next == lru)
819 entry = list_entry(lru->next, struct drm_buffer_object, lru);
820 atomic_inc(&entry->usage);
821 mutex_unlock(&dev->struct_mutex);
822 mutex_lock(&entry->mutex);
823 ret = drm_bo_evict(entry, mem_type, no_wait);
824 mutex_unlock(&entry->mutex);
825 drm_bo_usage_deref_unlocked(&entry);
828 mutex_lock(&dev->struct_mutex);
832 mutex_unlock(&dev->struct_mutex);
836 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
837 if (unlikely(!node)) {
838 mutex_unlock(&dev->struct_mutex);
842 mutex_unlock(&dev->struct_mutex);
844 mem->mem_type = mem_type;
848 static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
851 uint64_t mask, uint32_t *res_mask)
853 uint64_t cur_flags = drm_bo_type_flags(mem_type);
856 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
858 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
859 cur_flags |= DRM_BO_FLAG_CACHED;
860 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
861 cur_flags |= DRM_BO_FLAG_MAPPABLE;
862 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
863 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
865 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
868 if (mem_type == DRM_BO_MEM_LOCAL) {
869 *res_mask = cur_flags;
873 flag_diff = (mask ^ cur_flags);
874 if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
875 cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
877 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
878 (!(mask & DRM_BO_FLAG_CACHED) ||
879 (mask & DRM_BO_FLAG_FORCE_CACHING)))
882 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
883 ((mask & DRM_BO_FLAG_MAPPABLE) ||
884 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
887 *res_mask = cur_flags;
892 * Creates space for memory region @mem according to its type.
894 * This function first searches for free space in compatible memory types in
895 * the priority order defined by the driver. If free space isn't found, then
896 * drm_bo_mem_force_space is attempted in priority order to evict and find
899 int drm_bo_mem_space(struct drm_buffer_object *bo,
900 struct drm_bo_mem_reg *mem, int no_wait)
902 struct drm_device *dev = bo->dev;
903 struct drm_buffer_manager *bm = &dev->bm;
904 struct drm_mem_type_manager *man;
906 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
907 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
909 uint32_t mem_type = DRM_BO_MEM_LOCAL;
914 struct drm_mm_node *node = NULL;
918 for (i = 0; i < num_prios; ++i) {
920 man = &bm->man[mem_type];
922 type_ok = drm_bo_mt_compatible(man,
923 bo->type == drm_bo_type_user,
924 mem_type, mem->proposed_flags,
930 if (mem_type == DRM_BO_MEM_LOCAL)
933 if ((mem_type == bo->pinned_mem_type) &&
934 (bo->pinned_node != NULL)) {
935 node = bo->pinned_node;
939 mutex_lock(&dev->struct_mutex);
940 if (man->has_type && man->use_type) {
942 node = drm_mm_search_free(&man->manager, mem->num_pages,
943 mem->page_alignment, 1);
945 node = drm_mm_get_block(node, mem->num_pages,
946 mem->page_alignment);
948 mutex_unlock(&dev->struct_mutex);
953 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
955 mem->mem_type = mem_type;
956 mem->flags = cur_flags;
963 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
964 prios = dev->driver->bo_driver->mem_busy_prio;
966 for (i = 0; i < num_prios; ++i) {
968 man = &bm->man[mem_type];
973 if (!drm_bo_mt_compatible(man,
974 bo->type == drm_bo_type_user,
980 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
982 if (ret == 0 && mem->mm_node) {
983 mem->flags = cur_flags;
991 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
994 EXPORT_SYMBOL(drm_bo_mem_space);
997 * drm_bo_propose_flags:
999 * @bo: the buffer object getting new flags
1001 * @new_flags: the new set of proposed flag bits
1003 * @new_mask: the mask of bits changed in new_flags
1005 * Modify the proposed_flag bits in @bo
1007 static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
1008 uint64_t new_flags, uint64_t new_mask)
1010 uint32_t new_access;
1012 /* Copy unchanging bits from existing proposed_flags */
1013 DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
1015 if (bo->type == drm_bo_type_user &&
1016 ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
1017 (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
1018 DRM_ERROR("User buffers require cache-coherent memory.\n");
1022 if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
1023 DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
1027 if (likely(new_mask & DRM_BO_MASK_MEM) &&
1028 (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
1029 !DRM_SUSER(DRM_CURPROC)) {
1030 if (likely(bo->mem.flags & new_flags & new_mask &
1032 new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
1033 (bo->mem.flags & DRM_BO_MASK_MEM);
1035 DRM_ERROR("Incompatible memory type specification "
1036 "for NO_EVICT buffer.\n");
1041 if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
1042 DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
1046 new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
1049 if (new_access == 0) {
1050 DRM_ERROR("Invalid buffer object rwx properties\n");
1054 bo->mem.proposed_flags = new_flags;
1059 * Call bo->mutex locked.
1060 * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
1061 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
1064 int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
1066 struct drm_fence_object *fence = bo->fence;
1068 if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1072 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1073 drm_fence_usage_deref_unlocked(&bo->fence);
1081 int drm_bo_evict_cached(struct drm_buffer_object *bo)
1085 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1086 if (bo->mem.mm_node)
1087 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1091 EXPORT_SYMBOL(drm_bo_evict_cached);
1093 * Wait until a buffer is unmapped.
1096 static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
1100 if (likely(atomic_read(&bo->mapped)) == 0)
1103 if (unlikely(no_wait))
1107 mutex_unlock(&bo->mutex);
1108 ret = wait_event_interruptible(bo->event_queue,
1109 atomic_read(&bo->mapped) == 0);
1110 mutex_lock(&bo->mutex);
1111 bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
1113 if (ret == -ERESTARTSYS)
1115 } while((ret == 0) && atomic_read(&bo->mapped) > 0);
1122 * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
1125 int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
1126 int no_wait, int move_unfenced)
1128 struct drm_device *dev = bo->dev;
1129 struct drm_buffer_manager *bm = &dev->bm;
1131 struct drm_bo_mem_reg mem;
1133 BUG_ON(bo->fence != NULL);
1135 mem.num_pages = bo->num_pages;
1136 mem.size = mem.num_pages << PAGE_SHIFT;
1137 mem.proposed_flags = new_mem_flags;
1138 mem.page_alignment = bo->mem.page_alignment;
1140 mutex_lock(&bm->evict_mutex);
1141 mutex_lock(&dev->struct_mutex);
1142 list_del_init(&bo->lru);
1143 mutex_unlock(&dev->struct_mutex);
1146 * Determine where to move the buffer.
1148 ret = drm_bo_mem_space(bo, &mem, no_wait);
1152 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1155 mutex_lock(&dev->struct_mutex);
1156 if (ret || !move_unfenced) {
1158 if (mem.mm_node != bo->pinned_node)
1159 drm_mm_put_block(mem.mm_node);
1162 drm_bo_add_to_lru(bo);
1163 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1164 wake_up_all(&bo->event_queue);
1165 DRM_FLAG_MASKED(bo->priv_flags, 0,
1166 _DRM_BO_FLAG_UNFENCED);
1169 list_add_tail(&bo->lru, &bm->unfenced);
1170 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1171 _DRM_BO_FLAG_UNFENCED);
1173 mutex_unlock(&dev->struct_mutex);
1174 mutex_unlock(&bm->evict_mutex);
1178 static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
1180 uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
1182 if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
1184 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1185 (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
1186 (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
1189 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1190 ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
1191 (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
1197 * drm_buffer_object_validate:
1199 * @bo: the buffer object to modify
1201 * @fence_class: the new fence class covering this buffer
1203 * @move_unfenced: a boolean indicating whether switching the
1204 * memory space of this buffer should cause the buffer to
1205 * be placed on the unfenced list.
1207 * @no_wait: whether this function should return -EBUSY instead
1210 * Change buffer access parameters. This can involve moving
1211 * the buffer to the correct memory type, pinning the buffer
1212 * or changing the class/type of fence covering this buffer
1214 * Must be called with bo locked.
1217 static int drm_buffer_object_validate(struct drm_buffer_object *bo,
1218 uint32_t fence_class,
1219 int move_unfenced, int no_wait,
1222 struct drm_device *dev = bo->dev;
1223 struct drm_buffer_manager *bm = &dev->bm;
1227 ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
1231 DRM_ERROR("Failed moving buffer.\n");
1233 DRM_ERROR("Out of aperture space or "
1234 "DRM memory quota.\n");
1243 if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1244 bo->pinned_mem_type = bo->mem.mem_type;
1245 mutex_lock(&dev->struct_mutex);
1246 list_del_init(&bo->pinned_lru);
1247 drm_bo_add_to_pinned_lru(bo);
1249 if (bo->pinned_node != bo->mem.mm_node) {
1250 if (bo->pinned_node != NULL)
1251 drm_mm_put_block(bo->pinned_node);
1252 bo->pinned_node = bo->mem.mm_node;
1255 mutex_unlock(&dev->struct_mutex);
1257 } else if (bo->pinned_node != NULL) {
1259 mutex_lock(&dev->struct_mutex);
1261 if (bo->pinned_node != bo->mem.mm_node)
1262 drm_mm_put_block(bo->pinned_node);
1264 list_del_init(&bo->pinned_lru);
1265 bo->pinned_node = NULL;
1266 mutex_unlock(&dev->struct_mutex);
1271 * We might need to add a TTM.
1274 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1275 ret = drm_bo_add_ttm(bo);
1280 * Validation has succeeded, move the access and other
1281 * non-mapping-related flag bits from the proposed flags to
1285 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
1288 * Finally, adjust lru to be sure.
1291 mutex_lock(&dev->struct_mutex);
1293 if (move_unfenced) {
1294 list_add_tail(&bo->lru, &bm->unfenced);
1295 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1296 _DRM_BO_FLAG_UNFENCED);
1298 drm_bo_add_to_lru(bo);
1299 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1300 wake_up_all(&bo->event_queue);
1301 DRM_FLAG_MASKED(bo->priv_flags, 0,
1302 _DRM_BO_FLAG_UNFENCED);
1305 mutex_unlock(&dev->struct_mutex);
1311 * This function is called with bo->mutex locked, but may release it
1312 * temporarily to wait for events.
1315 static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
1319 uint32_t fence_class,
1323 struct drm_device *dev = bo->dev;
1324 struct drm_bo_driver *driver = dev->driver->bo_driver;
1329 DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
1330 (unsigned long long) bo->mem.proposed_flags,
1331 (unsigned long long) bo->mem.flags);
1333 ret = drm_bo_modify_proposed_flags (bo, flags, mask);
1337 ret = drm_bo_wait_unmapped(bo, no_wait);
1341 ret = driver->fence_type(bo, &fence_class, &ftype);
1344 DRM_ERROR("Driver did not support given buffer permissions.\n");
1349 * We're switching command submission mechanism,
1350 * or cannot simply rely on the hardware serializing for us.
1351 * Insert a driver-dependant barrier or wait for buffer idle.
1354 if ((fence_class != bo->fence_class) ||
1355 ((ftype ^ bo->fence_type) & bo->fence_type)) {
1358 if (driver->command_stream_barrier) {
1359 ret = driver->command_stream_barrier(bo,
1364 if (ret && ret != -EAGAIN)
1365 ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1371 bo->new_fence_class = fence_class;
1372 bo->new_fence_type = ftype;
1375 * Check whether we need to move buffer.
1379 if (!drm_bo_mem_compat(&bo->mem)) {
1381 ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1388 * drm_bo_do_validate:
1390 * @bo: the buffer object
1392 * @flags: access rights, mapping parameters and cacheability. See
1393 * the DRM_BO_FLAG_* values in drm.h
1395 * @mask: Which flag values to change; this allows callers to modify
1396 * things without knowing the current state of other flags.
1398 * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1401 * @fence_class: a driver-specific way of doing fences. Presumably,
1402 * this would be used if the driver had more than one submission and
1403 * fencing mechanism. At this point, there isn't any use of this
1404 * from the user mode code.
1406 * @rep: To be stuffed with the reply from validation
1408 * 'validate' a buffer object. This changes where the buffer is
1409 * located, along with changing access modes.
1412 int drm_bo_do_validate(struct drm_buffer_object *bo,
1413 uint64_t flags, uint64_t mask, uint32_t hint,
1414 uint32_t fence_class)
1417 int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
1420 mutex_lock(&bo->mutex);
1423 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
1425 ret = drm_bo_prepare_for_validate(bo, flags, mask, hint,
1426 fence_class, no_wait,
1431 } while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
1433 ret = drm_buffer_object_validate(bo,
1435 !(hint & DRM_BO_HINT_DONT_FENCE),
1439 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
1441 mutex_unlock(&bo->mutex);
1445 EXPORT_SYMBOL(drm_bo_do_validate);
1447 int drm_buffer_object_create(struct drm_device *dev,
1449 enum drm_bo_type type,
1452 uint32_t page_alignment,
1453 unsigned long buffer_start,
1454 struct drm_buffer_object **buf_obj)
1456 struct drm_buffer_manager *bm = &dev->bm;
1457 struct drm_buffer_object *bo;
1459 unsigned long num_pages;
1461 size += buffer_start & ~PAGE_MASK;
1462 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1463 if (num_pages == 0) {
1464 DRM_ERROR("Illegal buffer object size %ld.\n", size);
1468 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1473 mutex_init(&bo->mutex);
1474 mutex_lock(&bo->mutex);
1476 atomic_set(&bo->usage, 1);
1477 atomic_set(&bo->mapped, 0);
1478 DRM_INIT_WAITQUEUE(&bo->event_queue);
1479 INIT_LIST_HEAD(&bo->lru);
1480 INIT_LIST_HEAD(&bo->pinned_lru);
1481 INIT_LIST_HEAD(&bo->ddestroy);
1482 #ifdef DRM_ODD_MM_COMPAT
1483 INIT_LIST_HEAD(&bo->p_mm_list);
1484 INIT_LIST_HEAD(&bo->vma_list);
1488 bo->num_pages = num_pages;
1489 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1490 bo->mem.num_pages = bo->num_pages;
1491 bo->mem.mm_node = NULL;
1492 bo->mem.page_alignment = page_alignment;
1493 bo->buffer_start = buffer_start & PAGE_MASK;
1495 bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1496 DRM_BO_FLAG_MAPPABLE);
1497 bo->mem.proposed_flags = 0;
1498 atomic_inc(&bm->count);
1500 * Use drm_bo_modify_proposed_flags to error-check the proposed flags
1502 ret = drm_bo_modify_proposed_flags (bo, flags, flags);
1507 * For drm_bo_type_device buffers, allocate
1508 * address space from the device so that applications
1509 * can mmap the buffer from there
1511 if (bo->type == drm_bo_type_device) {
1512 mutex_lock(&dev->struct_mutex);
1513 ret = drm_bo_setup_vm_locked(bo);
1514 mutex_unlock(&dev->struct_mutex);
1519 mutex_unlock(&bo->mutex);
1520 ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
1523 goto out_err_unlocked;
1529 mutex_unlock(&bo->mutex);
1531 drm_bo_usage_deref_unlocked(&bo);
1534 EXPORT_SYMBOL(drm_buffer_object_create);
1536 static int drm_bo_leave_list(struct drm_buffer_object *bo,
1541 struct drm_device *dev = bo->dev;
1544 mutex_lock(&bo->mutex);
1546 ret = drm_bo_expire_fence(bo, allow_errors);
1551 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1552 mutex_lock(&dev->struct_mutex);
1553 list_del_init(&bo->pinned_lru);
1554 if (bo->pinned_node == bo->mem.mm_node)
1555 bo->pinned_node = NULL;
1556 if (bo->pinned_node != NULL) {
1557 drm_mm_put_block(bo->pinned_node);
1558 bo->pinned_node = NULL;
1560 mutex_unlock(&dev->struct_mutex);
1563 if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1564 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1565 "cleanup. Removing flag and evicting.\n");
1566 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1567 bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
1570 if (bo->mem.mem_type == mem_type)
1571 ret = drm_bo_evict(bo, mem_type, 0);
1578 DRM_ERROR("Cleanup eviction failed\n");
1583 mutex_unlock(&bo->mutex);
1588 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
1592 return list_entry(list, struct drm_buffer_object, pinned_lru);
1594 return list_entry(list, struct drm_buffer_object, lru);
1598 * dev->struct_mutex locked.
1601 static int drm_bo_force_list_clean(struct drm_device *dev,
1602 struct list_head *head,
1608 struct list_head *list, *next, *prev;
1609 struct drm_buffer_object *entry, *nentry;
1614 * The list traversal is a bit odd here, because an item may
1615 * disappear from the list when we release the struct_mutex or
1616 * when we decrease the usage count. Also we're not guaranteed
1617 * to drain pinned lists, so we can't always restart.
1622 list_for_each_safe(list, next, head) {
1625 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
1626 atomic_inc(&entry->usage);
1628 atomic_dec(&nentry->usage);
1633 * Protect the next item from destruction, so we can check
1634 * its list pointers later on.
1638 nentry = drm_bo_entry(next, pinned_list);
1639 atomic_inc(&nentry->usage);
1641 mutex_unlock(&dev->struct_mutex);
1643 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
1645 mutex_lock(&dev->struct_mutex);
1647 drm_bo_usage_deref_locked(&entry);
1652 * Has the next item disappeared from the list?
1655 do_restart = ((next->prev != list) && (next->prev != prev));
1657 if (nentry != NULL && do_restart)
1658 drm_bo_usage_deref_locked(&nentry);
1666 int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
1668 struct drm_buffer_manager *bm = &dev->bm;
1669 struct drm_mem_type_manager *man = &bm->man[mem_type];
1672 if (mem_type >= DRM_BO_MEM_TYPES) {
1673 DRM_ERROR("Illegal memory type %d\n", mem_type);
1677 if (!man->has_type) {
1678 DRM_ERROR("Trying to take down uninitialized "
1679 "memory manager type %u\n", mem_type);
1683 if ((man->kern_init_type) && (kern_clean == 0)) {
1684 DRM_ERROR("Trying to take down kernel initialized "
1685 "memory manager type %u\n", mem_type);
1694 BUG_ON(!list_empty(&bm->unfenced));
1695 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
1696 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
1698 if (drm_mm_clean(&man->manager)) {
1699 drm_mm_takedown(&man->manager);
1707 EXPORT_SYMBOL(drm_bo_clean_mm);
1710 *Evict all buffers of a particular mem_type, but leave memory manager
1711 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
1712 *point since we have the hardware lock.
1715 int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
1718 struct drm_buffer_manager *bm = &dev->bm;
1719 struct drm_mem_type_manager *man = &bm->man[mem_type];
1721 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1722 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
1726 if (!man->has_type) {
1727 DRM_ERROR("Memory type %u has not been initialized.\n",
1732 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
1735 ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
1740 int drm_bo_init_mm(struct drm_device *dev, unsigned type,
1741 unsigned long p_offset, unsigned long p_size,
1744 struct drm_buffer_manager *bm = &dev->bm;
1746 struct drm_mem_type_manager *man;
1748 if (type >= DRM_BO_MEM_TYPES) {
1749 DRM_ERROR("Illegal memory type %d\n", type);
1753 man = &bm->man[type];
1754 if (man->has_type) {
1755 DRM_ERROR("Memory manager already initialized for type %d\n",
1760 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
1765 if (type != DRM_BO_MEM_LOCAL) {
1767 DRM_ERROR("Zero size memory manager type %d\n", type);
1770 ret = drm_mm_init(&man->manager, p_offset, p_size);
1776 man->kern_init_type = kern_init;
1779 INIT_LIST_HEAD(&man->lru);
1780 INIT_LIST_HEAD(&man->pinned);
1784 EXPORT_SYMBOL(drm_bo_init_mm);
1787 * This function is intended to be called on drm driver unload.
1788 * If you decide to call it from lastclose, you must protect the call
1789 * from a potentially racing drm_bo_driver_init in firstopen.
1790 * (This may happen on X server restart).
1793 int drm_bo_driver_finish(struct drm_device *dev)
1795 struct drm_buffer_manager *bm = &dev->bm;
1797 unsigned i = DRM_BO_MEM_TYPES;
1798 struct drm_mem_type_manager *man;
1800 mutex_lock(&dev->struct_mutex);
1802 if (!bm->initialized)
1804 bm->initialized = 0;
1808 if (man->has_type) {
1810 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) {
1812 DRM_ERROR("DRM memory manager type %d "
1813 "is not clean.\n", i);
1818 mutex_unlock(&dev->struct_mutex);
1820 if (!cancel_delayed_work(&bm->wq))
1821 flush_scheduled_work();
1823 mutex_lock(&dev->struct_mutex);
1824 drm_bo_delayed_delete(dev, 1);
1825 if (list_empty(&bm->ddestroy))
1826 DRM_DEBUG("Delayed destroy list was clean\n");
1828 if (list_empty(&bm->man[0].lru))
1829 DRM_DEBUG("Swap list was clean\n");
1831 if (list_empty(&bm->man[0].pinned))
1832 DRM_DEBUG("NO_MOVE list was clean\n");
1834 if (list_empty(&bm->unfenced))
1835 DRM_DEBUG("Unfenced list was clean\n");
1837 if (bm->dummy_read_page) {
1838 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
1839 ClearPageReserved(bm->dummy_read_page);
1841 __free_page(bm->dummy_read_page);
1845 mutex_unlock(&dev->struct_mutex);
1848 EXPORT_SYMBOL(drm_bo_driver_finish);
1851 * This function is intended to be called on drm driver load.
1852 * If you decide to call it from firstopen, you must protect the call
1853 * from a potentially racing drm_bo_driver_finish in lastclose.
1854 * (This may happen on X server restart).
1857 int drm_bo_driver_init(struct drm_device *dev)
1859 struct drm_bo_driver *driver = dev->driver->bo_driver;
1860 struct drm_buffer_manager *bm = &dev->bm;
1863 bm->dummy_read_page = NULL;
1864 mutex_lock(&dev->struct_mutex);
1868 bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1869 if (!bm->dummy_read_page) {
1874 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
1875 SetPageReserved(bm->dummy_read_page);
1879 * Initialize the system memory buffer type.
1880 * Other types need to be driver / IOCTL initialized.
1882 ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1);
1884 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
1885 ClearPageReserved(bm->dummy_read_page);
1887 __free_page(bm->dummy_read_page);
1888 bm->dummy_read_page = NULL;
1892 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1893 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
1895 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
1897 bm->initialized = 1;
1899 atomic_set(&bm->count, 0);
1901 INIT_LIST_HEAD(&bm->unfenced);
1902 INIT_LIST_HEAD(&bm->ddestroy);
1904 mutex_unlock(&dev->struct_mutex);
1907 EXPORT_SYMBOL(drm_bo_driver_init);
1910 * buffer object vm functions.
1913 int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
1915 struct drm_buffer_manager *bm = &dev->bm;
1916 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
1918 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
1919 if (mem->mem_type == DRM_BO_MEM_LOCAL)
1922 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
1925 if (mem->flags & DRM_BO_FLAG_CACHED)
1930 EXPORT_SYMBOL(drm_mem_reg_is_pci);
1933 * \c Get the PCI offset for the buffer object memory.
1935 * \param bo The buffer object.
1936 * \param bus_base On return the base of the PCI region
1937 * \param bus_offset On return the byte offset into the PCI region
1938 * \param bus_size On return the byte size of the buffer object or zero if
1939 * the buffer object memory is not accessible through a PCI region.
1940 * \return Failure indication.
1942 * Returns -EINVAL if the buffer object is currently not mappable.
1943 * Otherwise returns zero.
1946 int drm_bo_pci_offset(struct drm_device *dev,
1947 struct drm_bo_mem_reg *mem,
1948 unsigned long *bus_base,
1949 unsigned long *bus_offset, unsigned long *bus_size)
1951 struct drm_buffer_manager *bm = &dev->bm;
1952 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
1955 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
1958 if (drm_mem_reg_is_pci(dev, mem)) {
1959 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1960 *bus_size = mem->num_pages << PAGE_SHIFT;
1961 *bus_base = man->io_offset;
1968 * \c Kill all user-space virtual mappings of this buffer object.
1970 * \param bo The buffer object.
1972 * Call bo->mutex locked.
1975 void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
1977 struct drm_device *dev = bo->dev;
1978 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
1979 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1981 if (!dev->dev_mapping)
1984 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
1988 * drm_bo_takedown_vm_locked:
1990 * @bo: the buffer object to remove any drm device mapping
1992 * Remove any associated vm mapping on the drm device node that
1993 * would have been created for a drm_bo_type_device buffer
1995 void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
1997 struct drm_map_list *list;
1998 drm_local_map_t *map;
1999 struct drm_device *dev = bo->dev;
2001 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2002 if (bo->type != drm_bo_type_device)
2005 list = &bo->map_list;
2006 if (list->user_token) {
2007 drm_ht_remove_item(&dev->map_hash, &list->hash);
2008 list->user_token = 0;
2010 if (list->file_offset_node) {
2011 drm_mm_put_block(list->file_offset_node);
2012 list->file_offset_node = NULL;
2019 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2021 list->user_token = 0ULL;
2022 drm_bo_usage_deref_locked(&bo);
2024 EXPORT_SYMBOL(drm_bo_takedown_vm_locked);
2027 * drm_bo_setup_vm_locked:
2029 * @bo: the buffer to allocate address space for
2031 * Allocate address space in the drm device so that applications
2032 * can mmap the buffer and access the contents. This only
2033 * applies to drm_bo_type_device objects as others are not
2034 * placed in the drm device address space.
2036 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
2038 struct drm_map_list *list = &bo->map_list;
2039 drm_local_map_t *map;
2040 struct drm_device *dev = bo->dev;
2042 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2043 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2049 map->type = _DRM_TTM;
2050 map->flags = _DRM_REMOVABLE;
2051 map->size = bo->mem.num_pages * PAGE_SIZE;
2052 atomic_inc(&bo->usage);
2053 map->handle = (void *)bo;
2055 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2056 bo->mem.num_pages, 0, 0);
2058 if (unlikely(!list->file_offset_node)) {
2059 drm_bo_takedown_vm_locked(bo);
2063 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2064 bo->mem.num_pages, 0);
2066 if (unlikely(!list->file_offset_node)) {
2067 drm_bo_takedown_vm_locked(bo);
2071 list->hash.key = list->file_offset_node->start;
2072 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2073 drm_bo_takedown_vm_locked(bo);
2077 list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;