1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44 * both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47 * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48 * traversal will, in general, need to be restarted.
52 static void drm_bo_destroy_locked(struct drm_buffer_object * bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object * bo);
57 static inline uint32_t drm_bo_type_flags(unsigned type)
59 return (1 << (24 + type));
63 * bo locked. dev->struct_mutex locked.
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
68 struct drm_mem_type_manager *man;
70 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71 DRM_ASSERT_LOCKED(&bo->mutex);
73 man = &bo->dev->bm.man[bo->pinned_mem_type];
74 list_add_tail(&bo->pinned_lru, &man->pinned);
77 void drm_bo_add_to_lru(struct drm_buffer_object * bo)
79 struct drm_mem_type_manager *man;
81 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
83 if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84 || bo->mem.mem_type != bo->pinned_mem_type) {
85 man = &bo->dev->bm.man[bo->mem.mem_type];
86 list_add_tail(&bo->lru, &man->lru);
88 INIT_LIST_HEAD(&bo->lru);
92 static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
94 #ifdef DRM_ODD_MM_COMPAT
97 if (!bo->map_list.map)
100 ret = drm_bo_lock_kmm(bo);
103 drm_bo_unmap_virtual(bo);
105 drm_bo_finish_unmap(bo);
107 if (!bo->map_list.map)
110 drm_bo_unmap_virtual(bo);
115 static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
117 #ifdef DRM_ODD_MM_COMPAT
120 if (!bo->map_list.map)
123 ret = drm_bo_remap_bound(bo);
125 DRM_ERROR("Failed to remap a bound buffer object.\n"
126 "\tThis might cause a sigbus later.\n");
128 drm_bo_unlock_kmm(bo);
133 * Call bo->mutex locked.
136 static int drm_bo_add_ttm(struct drm_buffer_object * bo)
138 struct drm_device *dev = bo->dev;
142 DRM_ASSERT_LOCKED(&bo->mutex);
146 case drm_bo_type_kernel:
147 bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
151 case drm_bo_type_user:
154 DRM_ERROR("Illegal buffer object type\n");
162 static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
163 struct drm_bo_mem_reg * mem,
164 int evict, int no_wait)
166 struct drm_device *dev = bo->dev;
167 struct drm_buffer_manager *bm = &dev->bm;
168 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
169 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
170 struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
171 struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
174 if (old_is_pci || new_is_pci ||
175 ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
176 ret = drm_bo_vm_pre_move(bo, old_is_pci);
181 * Create and bind a ttm if required.
184 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
185 ret = drm_bo_add_ttm(bo);
189 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
190 ret = drm_bind_ttm(bo->ttm, mem);
196 if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
198 struct drm_bo_mem_reg *old_mem = &bo->mem;
199 uint64_t save_flags = old_mem->flags;
200 uint64_t save_mask = old_mem->mask;
204 old_mem->mask = save_mask;
205 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
207 } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
208 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
210 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
212 } else if (dev->driver->bo_driver->move) {
213 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
217 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
224 if (old_is_pci || new_is_pci)
225 drm_bo_vm_post_move(bo);
227 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
229 dev->driver->bo_driver->invalidate_caches(dev,
232 DRM_ERROR("Can not flush read caches\n");
235 DRM_FLAG_MASKED(bo->priv_flags,
236 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
237 _DRM_BO_FLAG_EVICTED);
240 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
241 bm->man[bo->mem.mem_type].gpu_offset;
247 if (old_is_pci || new_is_pci)
248 drm_bo_vm_post_move(bo);
250 new_man = &bm->man[bo->mem.mem_type];
251 if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
252 drm_ttm_unbind(bo->ttm);
253 drm_destroy_ttm(bo->ttm);
261 * Call bo->mutex locked.
262 * Wait until the buffer is idle.
265 int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
270 DRM_ASSERT_LOCKED(&bo->mutex);
273 if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
274 drm_fence_usage_deref_unlocked(&bo->fence);
281 drm_fence_object_wait(bo->fence, lazy, ignore_signals,
286 drm_fence_usage_deref_unlocked(&bo->fence);
290 EXPORT_SYMBOL(drm_bo_wait);
292 static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
294 struct drm_device *dev = bo->dev;
295 struct drm_buffer_manager *bm = &dev->bm;
299 unsigned long _end = jiffies + 3 * DRM_HZ;
302 ret = drm_bo_wait(bo, 0, 1, 0);
303 if (ret && allow_errors)
306 } while (ret && !time_after_eq(jiffies, _end));
310 DRM_ERROR("Detected GPU lockup or "
311 "fence driver was taken down. "
312 "Evicting buffer.\n");
316 drm_fence_usage_deref_unlocked(&bo->fence);
322 * Call dev->struct_mutex locked.
323 * Attempts to remove all private references to a buffer by expiring its
324 * fence object and removing from lru lists and memory managers.
327 static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
329 struct drm_device *dev = bo->dev;
330 struct drm_buffer_manager *bm = &dev->bm;
332 DRM_ASSERT_LOCKED(&dev->struct_mutex);
334 atomic_inc(&bo->usage);
335 mutex_unlock(&dev->struct_mutex);
336 mutex_lock(&bo->mutex);
338 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
340 if (bo->fence && drm_fence_object_signaled(bo->fence,
342 drm_fence_usage_deref_unlocked(&bo->fence);
344 if (bo->fence && remove_all)
345 (void)drm_bo_expire_fence(bo, 0);
347 mutex_lock(&dev->struct_mutex);
349 if (!atomic_dec_and_test(&bo->usage)) {
354 list_del_init(&bo->lru);
355 if (bo->mem.mm_node) {
356 drm_mm_put_block(bo->mem.mm_node);
357 if (bo->pinned_node == bo->mem.mm_node)
358 bo->pinned_node = NULL;
359 bo->mem.mm_node = NULL;
361 list_del_init(&bo->pinned_lru);
362 if (bo->pinned_node) {
363 drm_mm_put_block(bo->pinned_node);
364 bo->pinned_node = NULL;
366 list_del_init(&bo->ddestroy);
367 mutex_unlock(&bo->mutex);
368 drm_bo_destroy_locked(bo);
372 if (list_empty(&bo->ddestroy)) {
373 drm_fence_object_flush(bo->fence, bo->fence_type);
374 list_add_tail(&bo->ddestroy, &bm->ddestroy);
375 schedule_delayed_work(&bm->wq,
376 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
380 mutex_unlock(&bo->mutex);
385 * Verify that refcount is 0 and that there are no internal references
386 * to the buffer object. Then destroy it.
389 static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
391 struct drm_device *dev = bo->dev;
392 struct drm_buffer_manager *bm = &dev->bm;
394 DRM_ASSERT_LOCKED(&dev->struct_mutex);
396 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
397 list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
398 list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
399 if (bo->fence != NULL) {
400 DRM_ERROR("Fence was non-zero.\n");
401 drm_bo_cleanup_refs(bo, 0);
405 #ifdef DRM_ODD_MM_COMPAT
406 BUG_ON(!list_empty(&bo->vma_list));
407 BUG_ON(!list_empty(&bo->p_mm_list));
411 drm_ttm_unbind(bo->ttm);
412 drm_destroy_ttm(bo->ttm);
416 atomic_dec(&bm->count);
418 // BUG_ON(!list_empty(&bo->base.list));
419 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
425 * Some stuff is still trying to reference the buffer object.
426 * Get rid of those references.
429 drm_bo_cleanup_refs(bo, 0);
435 * Call dev->struct_mutex locked.
438 static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
440 struct drm_buffer_manager *bm = &dev->bm;
442 struct drm_buffer_object *entry, *nentry;
443 struct list_head *list, *next;
445 list_for_each_safe(list, next, &bm->ddestroy) {
446 entry = list_entry(list, struct drm_buffer_object, ddestroy);
449 if (next != &bm->ddestroy) {
450 nentry = list_entry(next, struct drm_buffer_object,
452 atomic_inc(&nentry->usage);
455 drm_bo_cleanup_refs(entry, remove_all);
458 atomic_dec(&nentry->usage);
463 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
464 static void drm_bo_delayed_workqueue(void *data)
466 static void drm_bo_delayed_workqueue(struct work_struct *work)
469 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
470 struct drm_device *dev = (struct drm_device *) data;
471 struct drm_buffer_manager *bm = &dev->bm;
473 struct drm_buffer_manager *bm =
474 container_of(work, struct drm_buffer_manager, wq.work);
475 struct drm_device *dev = container_of(bm, struct drm_device, bm);
478 DRM_DEBUG("Delayed delete Worker\n");
480 mutex_lock(&dev->struct_mutex);
481 if (!bm->initialized) {
482 mutex_unlock(&dev->struct_mutex);
485 drm_bo_delayed_delete(dev, 0);
486 if (bm->initialized && !list_empty(&bm->ddestroy)) {
487 schedule_delayed_work(&bm->wq,
488 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
490 mutex_unlock(&dev->struct_mutex);
493 void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
495 struct drm_buffer_object *tmp_bo = *bo;
498 DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
500 if (atomic_dec_and_test(&tmp_bo->usage)) {
501 drm_bo_destroy_locked(tmp_bo);
504 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
506 static void drm_bo_base_deref_locked(struct drm_file * file_priv,
507 struct drm_user_object * uo)
509 struct drm_buffer_object *bo =
510 drm_user_object_entry(uo, struct drm_buffer_object, base);
512 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
514 drm_bo_takedown_vm_locked(bo);
515 drm_bo_usage_deref_locked(&bo);
518 void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
520 struct drm_buffer_object *tmp_bo = *bo;
521 struct drm_device *dev = tmp_bo->dev;
524 if (atomic_dec_and_test(&tmp_bo->usage)) {
525 mutex_lock(&dev->struct_mutex);
526 if (atomic_read(&tmp_bo->usage) == 0)
527 drm_bo_destroy_locked(tmp_bo);
528 mutex_unlock(&dev->struct_mutex);
531 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
533 void drm_putback_buffer_objects(struct drm_device *dev)
535 struct drm_buffer_manager *bm = &dev->bm;
536 struct list_head *list = &bm->unfenced;
537 struct drm_buffer_object *entry, *next;
539 mutex_lock(&dev->struct_mutex);
540 list_for_each_entry_safe(entry, next, list, lru) {
541 atomic_inc(&entry->usage);
542 mutex_unlock(&dev->struct_mutex);
544 mutex_lock(&entry->mutex);
545 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
546 mutex_lock(&dev->struct_mutex);
548 list_del_init(&entry->lru);
549 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
550 DRM_WAKEUP(&entry->event_queue);
553 * FIXME: Might want to put back on head of list
554 * instead of tail here.
557 drm_bo_add_to_lru(entry);
558 mutex_unlock(&entry->mutex);
559 drm_bo_usage_deref_locked(&entry);
561 mutex_unlock(&dev->struct_mutex);
563 EXPORT_SYMBOL(drm_putback_buffer_objects);
567 * Note. The caller has to register (if applicable)
568 * and deregister fence object usage.
571 int drm_fence_buffer_objects(struct drm_device *dev,
572 struct list_head *list,
573 uint32_t fence_flags,
574 struct drm_fence_object * fence,
575 struct drm_fence_object ** used_fence)
577 struct drm_buffer_manager *bm = &dev->bm;
578 struct drm_buffer_object *entry;
579 uint32_t fence_type = 0;
580 uint32_t fence_class = ~0;
585 mutex_lock(&dev->struct_mutex);
588 list = &bm->unfenced;
591 fence_class = fence->fence_class;
593 list_for_each_entry(entry, list, lru) {
594 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
595 fence_type |= entry->new_fence_type;
596 if (fence_class == ~0)
597 fence_class = entry->new_fence_class;
598 else if (entry->new_fence_class != fence_class) {
599 DRM_ERROR("Unmatching fence classes on unfenced list: "
602 entry->new_fence_class);
615 if ((fence_type & fence->type) != fence_type ||
616 (fence->fence_class != fence_class)) {
617 DRM_ERROR("Given fence doesn't match buffers "
618 "on unfenced list.\n");
623 mutex_unlock(&dev->struct_mutex);
624 ret = drm_fence_object_create(dev, fence_class, fence_type,
625 fence_flags | DRM_FENCE_FLAG_EMIT,
627 mutex_lock(&dev->struct_mutex);
636 entry = list_entry(l, struct drm_buffer_object, lru);
637 atomic_inc(&entry->usage);
638 mutex_unlock(&dev->struct_mutex);
639 mutex_lock(&entry->mutex);
640 mutex_lock(&dev->struct_mutex);
642 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
645 drm_fence_usage_deref_locked(&entry->fence);
646 entry->fence = drm_fence_reference_locked(fence);
647 entry->fence_class = entry->new_fence_class;
648 entry->fence_type = entry->new_fence_type;
649 DRM_FLAG_MASKED(entry->priv_flags, 0,
650 _DRM_BO_FLAG_UNFENCED);
651 DRM_WAKEUP(&entry->event_queue);
652 drm_bo_add_to_lru(entry);
654 mutex_unlock(&entry->mutex);
655 drm_bo_usage_deref_locked(&entry);
658 DRM_DEBUG("Fenced %d buffers\n", count);
660 mutex_unlock(&dev->struct_mutex);
664 EXPORT_SYMBOL(drm_fence_buffer_objects);
670 static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
674 struct drm_device *dev = bo->dev;
675 struct drm_bo_mem_reg evict_mem;
678 * Someone might have modified the buffer before we took the buffer mutex.
681 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
683 if (bo->mem.mem_type != mem_type)
686 ret = drm_bo_wait(bo, 0, 0, no_wait);
688 if (ret && ret != -EAGAIN) {
689 DRM_ERROR("Failed to expire fence before "
690 "buffer eviction.\n");
695 evict_mem.mm_node = NULL;
698 evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
699 ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
703 DRM_ERROR("Failed to find memory space for "
704 "buffer 0x%p eviction.\n", bo);
708 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
712 DRM_ERROR("Buffer eviction failed\n");
716 mutex_lock(&dev->struct_mutex);
717 if (evict_mem.mm_node) {
718 if (evict_mem.mm_node != bo->pinned_node)
719 drm_mm_put_block(evict_mem.mm_node);
720 evict_mem.mm_node = NULL;
723 drm_bo_add_to_lru(bo);
724 mutex_unlock(&dev->struct_mutex);
726 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
727 _DRM_BO_FLAG_EVICTED);
734 * Repeatedly evict memory from the LRU for @mem_type until we create enough
735 * space, or we've evicted everything and there isn't enough space.
737 static int drm_bo_mem_force_space(struct drm_device * dev,
738 struct drm_bo_mem_reg * mem,
739 uint32_t mem_type, int no_wait)
741 struct drm_mm_node *node;
742 struct drm_buffer_manager *bm = &dev->bm;
743 struct drm_buffer_object *entry;
744 struct drm_mem_type_manager *man = &bm->man[mem_type];
745 struct list_head *lru;
746 unsigned long num_pages = mem->num_pages;
749 mutex_lock(&dev->struct_mutex);
751 node = drm_mm_search_free(&man->manager, num_pages,
752 mem->page_alignment, 1);
757 if (lru->next == lru)
760 entry = list_entry(lru->next, struct drm_buffer_object, lru);
761 atomic_inc(&entry->usage);
762 mutex_unlock(&dev->struct_mutex);
763 mutex_lock(&entry->mutex);
764 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
766 ret = drm_bo_evict(entry, mem_type, no_wait);
767 mutex_unlock(&entry->mutex);
768 drm_bo_usage_deref_unlocked(&entry);
771 mutex_lock(&dev->struct_mutex);
775 mutex_unlock(&dev->struct_mutex);
779 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
780 mutex_unlock(&dev->struct_mutex);
782 mem->mem_type = mem_type;
786 static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
788 uint32_t mask, uint32_t * res_mask)
790 uint32_t cur_flags = drm_bo_type_flags(mem_type);
793 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
794 cur_flags |= DRM_BO_FLAG_CACHED;
795 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
796 cur_flags |= DRM_BO_FLAG_MAPPABLE;
797 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
798 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
800 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
803 if (mem_type == DRM_BO_MEM_LOCAL) {
804 *res_mask = cur_flags;
808 flag_diff = (mask ^ cur_flags);
809 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
810 (!(mask & DRM_BO_FLAG_CACHED) ||
811 (mask & DRM_BO_FLAG_FORCE_CACHING)))
814 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
815 ((mask & DRM_BO_FLAG_MAPPABLE) ||
816 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
819 *res_mask = cur_flags;
824 * Creates space for memory region @mem according to its type.
826 * This function first searches for free space in compatible memory types in
827 * the priority order defined by the driver. If free space isn't found, then
828 * drm_bo_mem_force_space is attempted in priority order to evict and find
831 int drm_bo_mem_space(struct drm_buffer_object * bo,
832 struct drm_bo_mem_reg * mem, int no_wait)
834 struct drm_device *dev = bo->dev;
835 struct drm_buffer_manager *bm = &dev->bm;
836 struct drm_mem_type_manager *man;
838 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
839 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
841 uint32_t mem_type = DRM_BO_MEM_LOCAL;
846 struct drm_mm_node *node = NULL;
850 for (i = 0; i < num_prios; ++i) {
852 man = &bm->man[mem_type];
854 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
860 if (mem_type == DRM_BO_MEM_LOCAL)
863 if ((mem_type == bo->pinned_mem_type) &&
864 (bo->pinned_node != NULL)) {
865 node = bo->pinned_node;
869 mutex_lock(&dev->struct_mutex);
870 if (man->has_type && man->use_type) {
872 node = drm_mm_search_free(&man->manager, mem->num_pages,
873 mem->page_alignment, 1);
875 node = drm_mm_get_block(node, mem->num_pages,
876 mem->page_alignment);
878 mutex_unlock(&dev->struct_mutex);
883 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
885 mem->mem_type = mem_type;
886 mem->flags = cur_flags;
893 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
894 prios = dev->driver->bo_driver->mem_busy_prio;
896 for (i = 0; i < num_prios; ++i) {
898 man = &bm->man[mem_type];
903 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
906 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
909 mem->flags = cur_flags;
917 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
921 EXPORT_SYMBOL(drm_bo_mem_space);
923 static int drm_bo_new_mask(struct drm_buffer_object * bo,
924 uint64_t new_mask, uint32_t hint)
928 if (bo->type == drm_bo_type_user) {
929 DRM_ERROR("User buffers are not supported yet\n");
933 if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
935 ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
940 if ((new_mask & DRM_BO_FLAG_NO_MOVE)) {
942 ("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
946 new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
950 DRM_ERROR("Invalid buffer object rwx properties\n");
954 bo->mem.mask = new_mask;
959 * Call dev->struct_mutex locked.
962 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
963 uint32_t handle, int check_owner)
965 struct drm_user_object *uo;
966 struct drm_buffer_object *bo;
968 uo = drm_lookup_user_object(file_priv, handle);
970 if (!uo || (uo->type != drm_buffer_type)) {
971 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
975 if (check_owner && file_priv != uo->owner) {
976 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
980 bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
981 atomic_inc(&bo->usage);
984 EXPORT_SYMBOL(drm_lookup_buffer_object);
987 * Call bo->mutex locked.
988 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
989 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
992 static int drm_bo_quick_busy(struct drm_buffer_object * bo)
994 struct drm_fence_object *fence = bo->fence;
996 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
998 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
999 drm_fence_usage_deref_unlocked(&bo->fence);
1008 * Call bo->mutex locked.
1009 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1012 static int drm_bo_busy(struct drm_buffer_object * bo)
1014 struct drm_fence_object *fence = bo->fence;
1016 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1018 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
1019 drm_fence_usage_deref_unlocked(&bo->fence);
1022 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
1023 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
1024 drm_fence_usage_deref_unlocked(&bo->fence);
1032 static int drm_bo_read_cached(struct drm_buffer_object * bo)
1036 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1037 if (bo->mem.mm_node)
1038 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1043 * Wait until a buffer is unmapped.
1046 static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
1050 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1053 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1054 atomic_read(&bo->mapped) == -1);
1062 static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
1066 mutex_lock(&bo->mutex);
1067 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1068 mutex_unlock(&bo->mutex);
1073 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1074 * Until then, we cannot really do anything with it except delete it.
1075 * The unfenced list is a PITA, and the operations
1077 * 2) submitting commands
1079 * Should really be an atomic operation.
1080 * We now "solve" this problem by keeping
1081 * the buffer "unfenced" after validating, but before fencing.
1084 static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
1087 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1095 mutex_unlock(&bo->mutex);
1096 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1097 !drm_bo_check_unfenced(bo));
1098 mutex_lock(&bo->mutex);
1101 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1103 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1113 * Fill in the ioctl reply argument with buffer info.
1117 static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
1118 struct drm_bo_info_rep *rep)
1123 rep->handle = bo->base.hash.key;
1124 rep->flags = bo->mem.flags;
1125 rep->size = bo->num_pages * PAGE_SIZE;
1126 rep->offset = bo->offset;
1127 rep->arg_handle = bo->map_list.user_token;
1128 rep->mask = bo->mem.mask;
1129 rep->buffer_start = bo->buffer_start;
1130 rep->fence_flags = bo->fence_type;
1132 rep->page_alignment = bo->mem.page_alignment;
1134 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1135 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1141 * Wait for buffer idle and register that we've mapped the buffer.
1142 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1143 * so that if the client dies, the mapping is automatically
1147 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1148 uint32_t map_flags, unsigned hint,
1149 struct drm_bo_info_rep *rep)
1151 struct drm_buffer_object *bo;
1152 struct drm_device *dev = file_priv->head->dev;
1154 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1156 mutex_lock(&dev->struct_mutex);
1157 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1158 mutex_unlock(&dev->struct_mutex);
1163 mutex_lock(&bo->mutex);
1164 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1169 * If this returns true, we are currently unmapped.
1170 * We need to do this test, because unmapping can
1171 * be done without the bo->mutex held.
1175 if (atomic_inc_and_test(&bo->mapped)) {
1176 if (no_wait && drm_bo_busy(bo)) {
1177 atomic_dec(&bo->mapped);
1181 ret = drm_bo_wait(bo, 0, 0, no_wait);
1183 atomic_dec(&bo->mapped);
1187 if ((map_flags & DRM_BO_FLAG_READ) &&
1188 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1189 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1190 drm_bo_read_cached(bo);
1193 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1194 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1195 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1198 * We are already mapped with different flags.
1199 * need to wait for unmap.
1202 ret = drm_bo_wait_unmapped(bo, no_wait);
1211 mutex_lock(&dev->struct_mutex);
1212 ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1213 mutex_unlock(&dev->struct_mutex);
1215 if (atomic_add_negative(-1, &bo->mapped))
1216 DRM_WAKEUP(&bo->event_queue);
1219 drm_bo_fill_rep_arg(bo, rep);
1221 mutex_unlock(&bo->mutex);
1222 drm_bo_usage_deref_unlocked(&bo);
1226 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1228 struct drm_device *dev = file_priv->head->dev;
1229 struct drm_buffer_object *bo;
1230 struct drm_ref_object *ro;
1233 mutex_lock(&dev->struct_mutex);
1235 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1241 ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1247 drm_remove_ref_object(file_priv, ro);
1248 drm_bo_usage_deref_locked(&bo);
1250 mutex_unlock(&dev->struct_mutex);
1255 * Call struct-sem locked.
1258 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1259 struct drm_user_object * uo,
1260 enum drm_ref_type action)
1262 struct drm_buffer_object *bo =
1263 drm_user_object_entry(uo, struct drm_buffer_object, base);
1266 * We DON'T want to take the bo->lock here, because we want to
1267 * hold it when we wait for unmapped buffer.
1270 BUG_ON(action != _DRM_REF_TYPE1);
1272 if (atomic_add_negative(-1, &bo->mapped))
1273 DRM_WAKEUP(&bo->event_queue);
1278 * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1281 int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
1282 int no_wait, int move_unfenced)
1284 struct drm_device *dev = bo->dev;
1285 struct drm_buffer_manager *bm = &dev->bm;
1287 struct drm_bo_mem_reg mem;
1289 * Flush outstanding fences.
1295 * Wait for outstanding fences.
1298 ret = drm_bo_wait(bo, 0, 0, no_wait);
1302 mem.num_pages = bo->num_pages;
1303 mem.size = mem.num_pages << PAGE_SHIFT;
1304 mem.mask = new_mem_flags;
1305 mem.page_alignment = bo->mem.page_alignment;
1307 mutex_lock(&bm->evict_mutex);
1308 mutex_lock(&dev->struct_mutex);
1310 list_add_tail(&bo->lru, &bm->unfenced);
1311 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1312 _DRM_BO_FLAG_UNFENCED);
1313 mutex_unlock(&dev->struct_mutex);
1316 * Determine where to move the buffer.
1318 ret = drm_bo_mem_space(bo, &mem, no_wait);
1322 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1325 if (ret || !move_unfenced) {
1326 mutex_lock(&dev->struct_mutex);
1328 if (mem.mm_node != bo->pinned_node)
1329 drm_mm_put_block(mem.mm_node);
1332 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1333 DRM_WAKEUP(&bo->event_queue);
1335 drm_bo_add_to_lru(bo);
1336 mutex_unlock(&dev->struct_mutex);
1339 mutex_unlock(&bm->evict_mutex);
1343 static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
1345 uint32_t flag_diff = (mem->mask ^ mem->flags);
1347 if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1349 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1350 (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
1351 (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1354 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1355 ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1356 (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1365 static int drm_buffer_object_validate(struct drm_buffer_object * bo,
1366 uint32_t fence_class,
1367 int move_unfenced, int no_wait)
1369 struct drm_device *dev = bo->dev;
1370 struct drm_buffer_manager *bm = &dev->bm;
1371 struct drm_bo_driver *driver = dev->driver->bo_driver;
1375 DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
1376 (unsigned long long) bo->mem.mask,
1377 (unsigned long long) bo->mem.flags);
1379 ret = driver->fence_type(bo, &fence_class, &ftype);
1382 DRM_ERROR("Driver did not support given buffer permissions\n");
1387 * We're switching command submission mechanism,
1388 * or cannot simply rely on the hardware serializing for us.
1390 * Wait for buffer idle.
1393 if ((fence_class != bo->fence_class) ||
1394 ((ftype ^ bo->fence_type) & bo->fence_type)) {
1396 ret = drm_bo_wait(bo, 0, 0, no_wait);
1403 bo->new_fence_class = fence_class;
1404 bo->new_fence_type = ftype;
1406 ret = drm_bo_wait_unmapped(bo, no_wait);
1408 DRM_ERROR("Timed out waiting for buffer unmap.\n");
1413 * Check whether we need to move buffer.
1416 if (!drm_bo_mem_compat(&bo->mem)) {
1417 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1421 DRM_ERROR("Failed moving buffer.\n");
1430 if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1431 bo->pinned_mem_type = bo->mem.mem_type;
1432 mutex_lock(&dev->struct_mutex);
1433 list_del_init(&bo->pinned_lru);
1434 drm_bo_add_to_pinned_lru(bo);
1436 if (bo->pinned_node != bo->mem.mm_node) {
1437 if (bo->pinned_node != NULL)
1438 drm_mm_put_block(bo->pinned_node);
1439 bo->pinned_node = bo->mem.mm_node;
1442 mutex_unlock(&dev->struct_mutex);
1444 } else if (bo->pinned_node != NULL) {
1446 mutex_lock(&dev->struct_mutex);
1448 if (bo->pinned_node != bo->mem.mm_node)
1449 drm_mm_put_block(bo->pinned_node);
1451 list_del_init(&bo->pinned_lru);
1452 bo->pinned_node = NULL;
1453 mutex_unlock(&dev->struct_mutex);
1458 * We might need to add a TTM.
1461 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1462 ret = drm_bo_add_ttm(bo);
1466 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1469 * Finally, adjust lru to be sure.
1472 mutex_lock(&dev->struct_mutex);
1474 if (move_unfenced) {
1475 list_add_tail(&bo->lru, &bm->unfenced);
1476 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1477 _DRM_BO_FLAG_UNFENCED);
1479 drm_bo_add_to_lru(bo);
1480 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1481 DRM_WAKEUP(&bo->event_queue);
1482 DRM_FLAG_MASKED(bo->priv_flags, 0,
1483 _DRM_BO_FLAG_UNFENCED);
1486 mutex_unlock(&dev->struct_mutex);
1491 int drm_bo_do_validate(struct drm_buffer_object *bo,
1492 uint64_t flags, uint64_t mask, uint32_t hint,
1493 uint32_t fence_class,
1495 struct drm_bo_info_rep *rep)
1499 mutex_lock(&bo->mutex);
1500 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1506 DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1507 ret = drm_bo_new_mask(bo, flags, hint);
1511 ret = drm_buffer_object_validate(bo,
1513 !(hint & DRM_BO_HINT_DONT_FENCE),
1517 drm_bo_fill_rep_arg(bo, rep);
1519 mutex_unlock(&bo->mutex);
1522 EXPORT_SYMBOL(drm_bo_do_validate);
1525 int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
1526 uint32_t fence_class,
1527 uint64_t flags, uint64_t mask, uint32_t hint,
1528 struct drm_bo_info_rep * rep,
1529 struct drm_buffer_object **bo_rep)
1531 struct drm_device *dev = file_priv->head->dev;
1532 struct drm_buffer_object *bo;
1534 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1536 mutex_lock(&dev->struct_mutex);
1537 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1538 mutex_unlock(&dev->struct_mutex);
1545 * Only allow creator to change shared buffer mask.
1548 if (bo->base.owner != file_priv)
1549 mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1552 ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
1558 drm_bo_usage_deref_unlocked(&bo);
1562 EXPORT_SYMBOL(drm_bo_handle_validate);
1564 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1565 struct drm_bo_info_rep *rep)
1567 struct drm_device *dev = file_priv->head->dev;
1568 struct drm_buffer_object *bo;
1570 mutex_lock(&dev->struct_mutex);
1571 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1572 mutex_unlock(&dev->struct_mutex);
1577 mutex_lock(&bo->mutex);
1578 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1579 (void)drm_bo_busy(bo);
1580 drm_bo_fill_rep_arg(bo, rep);
1581 mutex_unlock(&bo->mutex);
1582 drm_bo_usage_deref_unlocked(&bo);
1586 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1588 struct drm_bo_info_rep *rep)
1590 struct drm_device *dev = file_priv->head->dev;
1591 struct drm_buffer_object *bo;
1592 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1595 mutex_lock(&dev->struct_mutex);
1596 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1597 mutex_unlock(&dev->struct_mutex);
1603 mutex_lock(&bo->mutex);
1604 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1607 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1611 drm_bo_fill_rep_arg(bo, rep);
1614 mutex_unlock(&bo->mutex);
1615 drm_bo_usage_deref_unlocked(&bo);
1619 int drm_buffer_object_create(struct drm_device *dev,
1621 enum drm_bo_type type,
1624 uint32_t page_alignment,
1625 unsigned long buffer_start,
1626 struct drm_buffer_object ** buf_obj)
1628 struct drm_buffer_manager *bm = &dev->bm;
1629 struct drm_buffer_object *bo;
1631 unsigned long num_pages;
1633 if (buffer_start & ~PAGE_MASK) {
1634 DRM_ERROR("Invalid buffer object start.\n");
1637 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1638 if (num_pages == 0) {
1639 DRM_ERROR("Illegal buffer object size.\n");
1643 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1648 mutex_init(&bo->mutex);
1649 mutex_lock(&bo->mutex);
1651 atomic_set(&bo->usage, 1);
1652 atomic_set(&bo->mapped, -1);
1653 DRM_INIT_WAITQUEUE(&bo->event_queue);
1654 INIT_LIST_HEAD(&bo->lru);
1655 INIT_LIST_HEAD(&bo->pinned_lru);
1656 INIT_LIST_HEAD(&bo->ddestroy);
1657 #ifdef DRM_ODD_MM_COMPAT
1658 INIT_LIST_HEAD(&bo->p_mm_list);
1659 INIT_LIST_HEAD(&bo->vma_list);
1662 if (buffer_start != 0)
1663 bo->type = drm_bo_type_user;
1666 bo->num_pages = num_pages;
1667 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1668 bo->mem.num_pages = bo->num_pages;
1669 bo->mem.mm_node = NULL;
1670 bo->mem.page_alignment = page_alignment;
1671 bo->buffer_start = buffer_start;
1673 bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1674 DRM_BO_FLAG_MAPPABLE;
1675 bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1676 DRM_BO_FLAG_MAPPABLE;
1677 atomic_inc(&bm->count);
1678 ret = drm_bo_new_mask(bo, mask, hint);
1683 if (bo->type == drm_bo_type_dc) {
1684 mutex_lock(&dev->struct_mutex);
1685 ret = drm_bo_setup_vm_locked(bo);
1686 mutex_unlock(&dev->struct_mutex);
1691 ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1695 mutex_unlock(&bo->mutex);
1700 mutex_unlock(&bo->mutex);
1702 drm_bo_usage_deref_unlocked(&bo);
1705 EXPORT_SYMBOL(drm_buffer_object_create);
1708 static int drm_bo_add_user_object(struct drm_file *file_priv,
1709 struct drm_buffer_object *bo, int shareable)
1711 struct drm_device *dev = file_priv->head->dev;
1714 mutex_lock(&dev->struct_mutex);
1715 ret = drm_add_user_object(file_priv, &bo->base, shareable);
1719 bo->base.remove = drm_bo_base_deref_locked;
1720 bo->base.type = drm_buffer_type;
1721 bo->base.ref_struct_locked = NULL;
1722 bo->base.unref = drm_buffer_user_object_unmap;
1725 mutex_unlock(&dev->struct_mutex);
1729 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1731 struct drm_bo_create_arg *arg = data;
1732 struct drm_bo_create_req *req = &arg->d.req;
1733 struct drm_bo_info_rep *rep = &arg->d.rep;
1734 struct drm_buffer_object *entry;
1737 DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
1738 (int)(req->size / 1024), req->page_alignment * 4);
1740 if (!dev->bm.initialized) {
1741 DRM_ERROR("Buffer object manager is not initialized.\n");
1745 ret = drm_buffer_object_create(file_priv->head->dev,
1746 req->size, drm_bo_type_dc, req->mask,
1747 req->hint, req->page_alignment,
1748 req->buffer_start, &entry);
1752 ret = drm_bo_add_user_object(file_priv, entry,
1753 req->mask & DRM_BO_FLAG_SHAREABLE);
1755 drm_bo_usage_deref_unlocked(&entry);
1759 mutex_lock(&entry->mutex);
1760 drm_bo_fill_rep_arg(entry, rep);
1761 mutex_unlock(&entry->mutex);
1767 int drm_bo_setstatus_ioctl(struct drm_device *dev,
1768 void *data, struct drm_file *file_priv)
1770 struct drm_bo_map_wait_idle_arg *arg = data;
1771 struct drm_bo_info_req *req = &arg->d.req;
1772 struct drm_bo_info_rep *rep = &arg->d.rep;
1774 if (!dev->bm.initialized) {
1775 DRM_ERROR("Buffer object manager is not initialized.\n");
1779 ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
1782 req->hint | DRM_BO_HINT_DONT_FENCE,
1791 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1793 struct drm_bo_map_wait_idle_arg *arg = data;
1794 struct drm_bo_info_req *req = &arg->d.req;
1795 struct drm_bo_info_rep *rep = &arg->d.rep;
1797 if (!dev->bm.initialized) {
1798 DRM_ERROR("Buffer object manager is not initialized.\n");
1802 ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1810 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1812 struct drm_bo_handle_arg *arg = data;
1814 if (!dev->bm.initialized) {
1815 DRM_ERROR("Buffer object manager is not initialized.\n");
1819 ret = drm_buffer_object_unmap(file_priv, arg->handle);
1824 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1826 struct drm_bo_reference_info_arg *arg = data;
1827 struct drm_bo_handle_arg *req = &arg->d.req;
1828 struct drm_bo_info_rep *rep = &arg->d.rep;
1829 struct drm_user_object *uo;
1832 if (!dev->bm.initialized) {
1833 DRM_ERROR("Buffer object manager is not initialized.\n");
1837 ret = drm_user_object_ref(file_priv, req->handle,
1838 drm_buffer_type, &uo);
1842 ret = drm_bo_handle_info(file_priv, req->handle, rep);
1849 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1851 struct drm_bo_handle_arg *arg = data;
1854 if (!dev->bm.initialized) {
1855 DRM_ERROR("Buffer object manager is not initialized.\n");
1859 ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
1863 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1865 struct drm_bo_reference_info_arg *arg = data;
1866 struct drm_bo_handle_arg *req = &arg->d.req;
1867 struct drm_bo_info_rep *rep = &arg->d.rep;
1870 if (!dev->bm.initialized) {
1871 DRM_ERROR("Buffer object manager is not initialized.\n");
1875 ret = drm_bo_handle_info(file_priv, req->handle, rep);
1882 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1884 struct drm_bo_map_wait_idle_arg *arg = data;
1885 struct drm_bo_info_req *req = &arg->d.req;
1886 struct drm_bo_info_rep *rep = &arg->d.rep;
1888 if (!dev->bm.initialized) {
1889 DRM_ERROR("Buffer object manager is not initialized.\n");
1893 ret = drm_bo_handle_wait(file_priv, req->handle,
1901 static int drm_bo_leave_list(struct drm_buffer_object * bo,
1903 int free_pinned, int allow_errors)
1905 struct drm_device *dev = bo->dev;
1908 mutex_lock(&bo->mutex);
1910 ret = drm_bo_expire_fence(bo, allow_errors);
1915 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1916 mutex_lock(&dev->struct_mutex);
1917 list_del_init(&bo->pinned_lru);
1918 if (bo->pinned_node == bo->mem.mm_node)
1919 bo->pinned_node = NULL;
1920 if (bo->pinned_node != NULL) {
1921 drm_mm_put_block(bo->pinned_node);
1922 bo->pinned_node = NULL;
1924 mutex_unlock(&dev->struct_mutex);
1927 if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1928 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1929 "cleanup. Removing flag and evicting.\n");
1930 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1931 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1934 if (bo->mem.mem_type == mem_type)
1935 ret = drm_bo_evict(bo, mem_type, 0);
1942 DRM_ERROR("Cleanup eviction failed\n");
1947 mutex_unlock(&bo->mutex);
1952 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
1956 return list_entry(list, struct drm_buffer_object, pinned_lru);
1958 return list_entry(list, struct drm_buffer_object, lru);
1962 * dev->struct_mutex locked.
1965 static int drm_bo_force_list_clean(struct drm_device * dev,
1966 struct list_head *head,
1972 struct list_head *list, *next, *prev;
1973 struct drm_buffer_object *entry, *nentry;
1978 * The list traversal is a bit odd here, because an item may
1979 * disappear from the list when we release the struct_mutex or
1980 * when we decrease the usage count. Also we're not guaranteed
1981 * to drain pinned lists, so we can't always restart.
1986 list_for_each_safe(list, next, head) {
1989 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
1990 atomic_inc(&entry->usage);
1992 atomic_dec(&nentry->usage);
1997 * Protect the next item from destruction, so we can check
1998 * its list pointers later on.
2002 nentry = drm_bo_entry(next, pinned_list);
2003 atomic_inc(&nentry->usage);
2005 mutex_unlock(&dev->struct_mutex);
2007 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2009 mutex_lock(&dev->struct_mutex);
2011 drm_bo_usage_deref_locked(&entry);
2016 * Has the next item disappeared from the list?
2019 do_restart = ((next->prev != list) && (next->prev != prev));
2021 if (nentry != NULL && do_restart)
2022 drm_bo_usage_deref_locked(&nentry);
2030 int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
2032 struct drm_buffer_manager *bm = &dev->bm;
2033 struct drm_mem_type_manager *man = &bm->man[mem_type];
2036 if (mem_type >= DRM_BO_MEM_TYPES) {
2037 DRM_ERROR("Illegal memory type %d\n", mem_type);
2041 if (!man->has_type) {
2042 DRM_ERROR("Trying to take down uninitialized "
2043 "memory manager type %u\n", mem_type);
2051 BUG_ON(!list_empty(&bm->unfenced));
2052 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2053 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2055 if (drm_mm_clean(&man->manager)) {
2056 drm_mm_takedown(&man->manager);
2064 EXPORT_SYMBOL(drm_bo_clean_mm);
2067 *Evict all buffers of a particular mem_type, but leave memory manager
2068 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2069 *point since we have the hardware lock.
2072 static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
2075 struct drm_buffer_manager *bm = &dev->bm;
2076 struct drm_mem_type_manager *man = &bm->man[mem_type];
2078 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2079 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2083 if (!man->has_type) {
2084 DRM_ERROR("Memory type %u has not been initialized.\n",
2089 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2092 ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2097 int drm_bo_init_mm(struct drm_device * dev,
2099 unsigned long p_offset, unsigned long p_size)
2101 struct drm_buffer_manager *bm = &dev->bm;
2103 struct drm_mem_type_manager *man;
2105 if (type >= DRM_BO_MEM_TYPES) {
2106 DRM_ERROR("Illegal memory type %d\n", type);
2110 man = &bm->man[type];
2111 if (man->has_type) {
2112 DRM_ERROR("Memory manager already initialized for type %d\n",
2117 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2122 if (type != DRM_BO_MEM_LOCAL) {
2124 DRM_ERROR("Zero size memory manager type %d\n", type);
2127 ret = drm_mm_init(&man->manager, p_offset, p_size);
2134 INIT_LIST_HEAD(&man->lru);
2135 INIT_LIST_HEAD(&man->pinned);
2139 EXPORT_SYMBOL(drm_bo_init_mm);
2142 * This is called from lastclose, so we don't need to bother about
2143 * any clients still running when we set the initialized flag to zero.
2146 int drm_bo_driver_finish(struct drm_device * dev)
2148 struct drm_buffer_manager *bm = &dev->bm;
2150 unsigned i = DRM_BO_MEM_TYPES;
2151 struct drm_mem_type_manager *man;
2153 mutex_lock(&dev->bm.init_mutex);
2154 mutex_lock(&dev->struct_mutex);
2156 if (!bm->initialized)
2158 bm->initialized = 0;
2162 if (man->has_type) {
2164 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2166 DRM_ERROR("DRM memory manager type %d "
2167 "is not clean.\n", i);
2172 mutex_unlock(&dev->struct_mutex);
2174 if (!cancel_delayed_work(&bm->wq)) {
2175 flush_scheduled_work();
2177 mutex_lock(&dev->struct_mutex);
2178 drm_bo_delayed_delete(dev, 1);
2179 if (list_empty(&bm->ddestroy)) {
2180 DRM_DEBUG("Delayed destroy list was clean\n");
2182 if (list_empty(&bm->man[0].lru)) {
2183 DRM_DEBUG("Swap list was clean\n");
2185 if (list_empty(&bm->man[0].pinned)) {
2186 DRM_DEBUG("NO_MOVE list was clean\n");
2188 if (list_empty(&bm->unfenced)) {
2189 DRM_DEBUG("Unfenced list was clean\n");
2192 mutex_unlock(&dev->struct_mutex);
2193 mutex_unlock(&dev->bm.init_mutex);
2197 int drm_bo_driver_init(struct drm_device * dev)
2199 struct drm_bo_driver *driver = dev->driver->bo_driver;
2200 struct drm_buffer_manager *bm = &dev->bm;
2203 mutex_lock(&dev->bm.init_mutex);
2204 mutex_lock(&dev->struct_mutex);
2209 * Initialize the system memory buffer type.
2210 * Other types need to be driver / IOCTL initialized.
2212 ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
2216 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2217 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2219 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2221 bm->initialized = 1;
2223 atomic_set(&bm->count, 0);
2225 INIT_LIST_HEAD(&bm->unfenced);
2226 INIT_LIST_HEAD(&bm->ddestroy);
2228 mutex_unlock(&dev->struct_mutex);
2229 mutex_unlock(&dev->bm.init_mutex);
2233 EXPORT_SYMBOL(drm_bo_driver_init);
2235 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2237 struct drm_mm_init_arg *arg = data;
2238 struct drm_buffer_manager *bm = &dev->bm;
2239 struct drm_bo_driver *driver = dev->driver->bo_driver;
2243 DRM_ERROR("Buffer objects are not supported by this driver\n");
2248 if (arg->magic != DRM_BO_INIT_MAGIC) {
2249 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2250 "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2253 if (arg->major != DRM_BO_INIT_MAJOR) {
2254 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2255 "\tversion don't match. Got %d, expected %d,\n",
2256 arg->major, DRM_BO_INIT_MAJOR);
2259 if (arg->minor > DRM_BO_INIT_MINOR) {
2260 DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n"
2261 "\tlibdrm buffer object interface version is %d.%d.\n"
2262 "\tkernel DRM buffer object interface version is %d.%d\n",
2263 arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR);
2267 mutex_lock(&dev->bm.init_mutex);
2268 mutex_lock(&dev->struct_mutex);
2269 if (!bm->initialized) {
2270 DRM_ERROR("DRM memory manager was not initialized.\n");
2273 if (arg->mem_type == 0) {
2274 DRM_ERROR("System memory buffers already initialized.\n");
2277 ret = drm_bo_init_mm(dev, arg->mem_type,
2278 arg->p_offset, arg->p_size);
2281 mutex_unlock(&dev->struct_mutex);
2282 mutex_unlock(&dev->bm.init_mutex);
2289 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2291 struct drm_mm_type_arg *arg = data;
2292 struct drm_buffer_manager *bm = &dev->bm;
2293 struct drm_bo_driver *driver = dev->driver->bo_driver;
2297 DRM_ERROR("Buffer objects are not supported by this driver\n");
2301 LOCK_TEST_WITH_RETURN(dev, file_priv);
2302 mutex_lock(&dev->bm.init_mutex);
2303 mutex_lock(&dev->struct_mutex);
2305 if (!bm->initialized) {
2306 DRM_ERROR("DRM memory manager was not initialized\n");
2309 if (arg->mem_type == 0) {
2310 DRM_ERROR("No takedown for System memory buffers.\n");
2314 if (drm_bo_clean_mm(dev, arg->mem_type)) {
2315 DRM_ERROR("Memory manager type %d not clean. "
2316 "Delaying takedown\n", arg->mem_type);
2319 mutex_unlock(&dev->struct_mutex);
2320 mutex_unlock(&dev->bm.init_mutex);
2327 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2329 struct drm_mm_type_arg *arg = data;
2330 struct drm_bo_driver *driver = dev->driver->bo_driver;
2334 DRM_ERROR("Buffer objects are not supported by this driver\n");
2338 LOCK_TEST_WITH_RETURN(dev, file_priv);
2339 mutex_lock(&dev->bm.init_mutex);
2340 mutex_lock(&dev->struct_mutex);
2341 ret = drm_bo_lock_mm(dev, arg->mem_type);
2342 mutex_unlock(&dev->struct_mutex);
2343 mutex_unlock(&dev->bm.init_mutex);
2350 int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2352 struct drm_bo_driver *driver = dev->driver->bo_driver;
2356 DRM_ERROR("Buffer objects are not supported by this driver\n");
2360 LOCK_TEST_WITH_RETURN(dev, file_priv);
2361 mutex_lock(&dev->bm.init_mutex);
2362 mutex_lock(&dev->struct_mutex);
2365 mutex_unlock(&dev->struct_mutex);
2366 mutex_unlock(&dev->bm.init_mutex);
2374 * buffer object vm functions.
2377 int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
2379 struct drm_buffer_manager *bm = &dev->bm;
2380 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2382 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2383 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2386 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2389 if (mem->flags & DRM_BO_FLAG_CACHED)
2395 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2398 * \c Get the PCI offset for the buffer object memory.
2400 * \param bo The buffer object.
2401 * \param bus_base On return the base of the PCI region
2402 * \param bus_offset On return the byte offset into the PCI region
2403 * \param bus_size On return the byte size of the buffer object or zero if
2404 * the buffer object memory is not accessible through a PCI region.
2405 * \return Failure indication.
2407 * Returns -EINVAL if the buffer object is currently not mappable.
2408 * Otherwise returns zero.
2411 int drm_bo_pci_offset(struct drm_device *dev,
2412 struct drm_bo_mem_reg *mem,
2413 unsigned long *bus_base,
2414 unsigned long *bus_offset, unsigned long *bus_size)
2416 struct drm_buffer_manager *bm = &dev->bm;
2417 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2420 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2423 if (drm_mem_reg_is_pci(dev, mem)) {
2424 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2425 *bus_size = mem->num_pages << PAGE_SHIFT;
2426 *bus_base = man->io_offset;
2433 * \c Kill all user-space virtual mappings of this buffer object.
2435 * \param bo The buffer object.
2437 * Call bo->mutex locked.
2440 void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
2442 struct drm_device *dev = bo->dev;
2443 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2444 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2446 if (!dev->dev_mapping)
2449 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2452 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
2454 struct drm_map_list *list = &bo->map_list;
2455 drm_local_map_t *map;
2456 struct drm_device *dev = bo->dev;
2458 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2459 if (list->user_token) {
2460 drm_ht_remove_item(&dev->map_hash, &list->hash);
2461 list->user_token = 0;
2463 if (list->file_offset_node) {
2464 drm_mm_put_block(list->file_offset_node);
2465 list->file_offset_node = NULL;
2472 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2474 list->user_token = 0ULL;
2475 drm_bo_usage_deref_locked(&bo);
2478 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
2480 struct drm_map_list *list = &bo->map_list;
2481 drm_local_map_t *map;
2482 struct drm_device *dev = bo->dev;
2484 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2485 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2491 map->type = _DRM_TTM;
2492 map->flags = _DRM_REMOVABLE;
2493 map->size = bo->mem.num_pages * PAGE_SIZE;
2494 atomic_inc(&bo->usage);
2495 map->handle = (void *)bo;
2497 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2498 bo->mem.num_pages, 0, 0);
2500 if (!list->file_offset_node) {
2501 drm_bo_takedown_vm_locked(bo);
2505 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2506 bo->mem.num_pages, 0);
2508 list->hash.key = list->file_offset_node->start;
2509 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2510 drm_bo_takedown_vm_locked(bo);
2514 list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;