1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44 * both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47 * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48 * traversal will, in general, need to be restarted.
52 static void drm_bo_destroy_locked(struct drm_buffer_object * bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object * bo);
57 static inline uint32_t drm_bo_type_flags(unsigned type)
59 return (1 << (24 + type));
63 * bo locked. dev->struct_mutex locked.
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
68 struct drm_mem_type_manager *man;
70 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71 DRM_ASSERT_LOCKED(&bo->mutex);
73 man = &bo->dev->bm.man[bo->pinned_mem_type];
74 list_add_tail(&bo->pinned_lru, &man->pinned);
77 void drm_bo_add_to_lru(struct drm_buffer_object * bo)
79 struct drm_mem_type_manager *man;
81 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
83 if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84 || bo->mem.mem_type != bo->pinned_mem_type) {
85 man = &bo->dev->bm.man[bo->mem.mem_type];
86 list_add_tail(&bo->lru, &man->lru);
88 INIT_LIST_HEAD(&bo->lru);
92 static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
94 #ifdef DRM_ODD_MM_COMPAT
97 if (!bo->map_list.map)
100 ret = drm_bo_lock_kmm(bo);
103 drm_bo_unmap_virtual(bo);
105 drm_bo_finish_unmap(bo);
107 if (!bo->map_list.map)
110 drm_bo_unmap_virtual(bo);
115 static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
117 #ifdef DRM_ODD_MM_COMPAT
120 if (!bo->map_list.map)
123 ret = drm_bo_remap_bound(bo);
125 DRM_ERROR("Failed to remap a bound buffer object.\n"
126 "\tThis might cause a sigbus later.\n");
128 drm_bo_unlock_kmm(bo);
133 * Call bo->mutex locked.
136 static int drm_bo_add_ttm(struct drm_buffer_object * bo)
138 struct drm_device *dev = bo->dev;
142 DRM_ASSERT_LOCKED(&bo->mutex);
146 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
150 case drm_bo_type_kernel:
151 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
155 case drm_bo_type_user:
156 case drm_bo_type_fake:
159 DRM_ERROR("Illegal buffer object type\n");
167 static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
168 struct drm_bo_mem_reg * mem,
169 int evict, int no_wait)
171 struct drm_device *dev = bo->dev;
172 struct drm_buffer_manager *bm = &dev->bm;
173 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
174 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
175 struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
176 struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
179 if (old_is_pci || new_is_pci)
180 ret = drm_bo_vm_pre_move(bo, old_is_pci);
185 * Create and bind a ttm if required.
188 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
189 ret = drm_bo_add_ttm(bo);
193 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
194 ret = drm_bind_ttm(bo->ttm, new_man->flags &
196 mem->mm_node->start);
202 if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
204 struct drm_bo_mem_reg *old_mem = &bo->mem;
205 uint64_t save_flags = old_mem->flags;
206 uint64_t save_mask = old_mem->mask;
210 old_mem->mask = save_mask;
211 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
213 } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
214 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
216 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
218 } else if (dev->driver->bo_driver->move) {
219 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
223 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
230 if (old_is_pci || new_is_pci)
231 drm_bo_vm_post_move(bo);
233 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
235 dev->driver->bo_driver->invalidate_caches(dev,
238 DRM_ERROR("Can not flush read caches\n");
241 DRM_FLAG_MASKED(bo->priv_flags,
242 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
243 _DRM_BO_FLAG_EVICTED);
246 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
251 if (old_is_pci || new_is_pci)
252 drm_bo_vm_post_move(bo);
254 new_man = &bm->man[bo->mem.mem_type];
255 if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
256 drm_ttm_unbind(bo->ttm);
257 drm_destroy_ttm(bo->ttm);
265 * Call bo->mutex locked.
266 * Wait until the buffer is idle.
269 int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
274 DRM_ASSERT_LOCKED(&bo->mutex);
277 if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
278 drm_fence_usage_deref_unlocked(&bo->fence);
285 drm_fence_object_wait(bo->fence, lazy, ignore_signals,
290 drm_fence_usage_deref_unlocked(&bo->fence);
295 static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
297 struct drm_device *dev = bo->dev;
298 struct drm_buffer_manager *bm = &dev->bm;
302 unsigned long _end = jiffies + 3 * DRM_HZ;
305 ret = drm_bo_wait(bo, 0, 1, 0);
306 if (ret && allow_errors)
309 } while (ret && !time_after_eq(jiffies, _end));
313 DRM_ERROR("Detected GPU lockup or "
314 "fence driver was taken down. "
315 "Evicting buffer.\n");
319 drm_fence_usage_deref_unlocked(&bo->fence);
325 * Call dev->struct_mutex locked.
326 * Attempts to remove all private references to a buffer by expiring its
327 * fence object and removing from lru lists and memory managers.
330 static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
332 struct drm_device *dev = bo->dev;
333 struct drm_buffer_manager *bm = &dev->bm;
335 DRM_ASSERT_LOCKED(&dev->struct_mutex);
337 atomic_inc(&bo->usage);
338 mutex_unlock(&dev->struct_mutex);
339 mutex_lock(&bo->mutex);
341 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
343 if (bo->fence && drm_fence_object_signaled(bo->fence,
345 drm_fence_usage_deref_unlocked(&bo->fence);
347 if (bo->fence && remove_all)
348 (void)drm_bo_expire_fence(bo, 0);
350 mutex_lock(&dev->struct_mutex);
352 if (!atomic_dec_and_test(&bo->usage)) {
357 list_del_init(&bo->lru);
358 if (bo->mem.mm_node) {
359 drm_mm_put_block(bo->mem.mm_node);
360 if (bo->pinned_node == bo->mem.mm_node)
361 bo->pinned_node = NULL;
362 bo->mem.mm_node = NULL;
364 list_del_init(&bo->pinned_lru);
365 if (bo->pinned_node) {
366 drm_mm_put_block(bo->pinned_node);
367 bo->pinned_node = NULL;
369 list_del_init(&bo->ddestroy);
370 mutex_unlock(&bo->mutex);
371 drm_bo_destroy_locked(bo);
375 if (list_empty(&bo->ddestroy)) {
376 drm_fence_object_flush(bo->fence, bo->fence_type);
377 list_add_tail(&bo->ddestroy, &bm->ddestroy);
378 schedule_delayed_work(&bm->wq,
379 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
383 mutex_unlock(&bo->mutex);
388 * Verify that refcount is 0 and that there are no internal references
389 * to the buffer object. Then destroy it.
392 static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
394 struct drm_device *dev = bo->dev;
395 struct drm_buffer_manager *bm = &dev->bm;
397 DRM_ASSERT_LOCKED(&dev->struct_mutex);
399 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
400 list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
401 list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
402 if (bo->fence != NULL) {
403 DRM_ERROR("Fence was non-zero.\n");
404 drm_bo_cleanup_refs(bo, 0);
408 #ifdef DRM_ODD_MM_COMPAT
409 BUG_ON(!list_empty(&bo->vma_list));
410 BUG_ON(!list_empty(&bo->p_mm_list));
414 drm_ttm_unbind(bo->ttm);
415 drm_destroy_ttm(bo->ttm);
419 atomic_dec(&bm->count);
421 BUG_ON(!list_empty(&bo->base.list));
422 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
428 * Some stuff is still trying to reference the buffer object.
429 * Get rid of those references.
432 drm_bo_cleanup_refs(bo, 0);
438 * Call dev->struct_mutex locked.
441 static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
443 struct drm_buffer_manager *bm = &dev->bm;
445 struct drm_buffer_object *entry, *nentry;
446 struct list_head *list, *next;
448 list_for_each_safe(list, next, &bm->ddestroy) {
449 entry = list_entry(list, struct drm_buffer_object, ddestroy);
452 if (next != &bm->ddestroy) {
453 nentry = list_entry(next, struct drm_buffer_object,
455 atomic_inc(&nentry->usage);
458 drm_bo_cleanup_refs(entry, remove_all);
461 atomic_dec(&nentry->usage);
466 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
467 static void drm_bo_delayed_workqueue(void *data)
469 static void drm_bo_delayed_workqueue(struct work_struct *work)
472 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
473 struct drm_device *dev = (struct drm_device *) data;
474 struct drm_buffer_manager *bm = &dev->bm;
476 struct drm_buffer_manager *bm =
477 container_of(work, struct drm_buffer_manager, wq.work);
478 struct drm_device *dev = container_of(bm, struct drm_device, bm);
481 DRM_DEBUG("Delayed delete Worker\n");
483 mutex_lock(&dev->struct_mutex);
484 if (!bm->initialized) {
485 mutex_unlock(&dev->struct_mutex);
488 drm_bo_delayed_delete(dev, 0);
489 if (bm->initialized && !list_empty(&bm->ddestroy)) {
490 schedule_delayed_work(&bm->wq,
491 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
493 mutex_unlock(&dev->struct_mutex);
496 void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
498 struct drm_buffer_object *tmp_bo = *bo;
501 DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
503 if (atomic_dec_and_test(&tmp_bo->usage)) {
504 drm_bo_destroy_locked(tmp_bo);
508 static void drm_bo_base_deref_locked(struct drm_file * file_priv,
509 struct drm_user_object * uo)
511 struct drm_buffer_object *bo =
512 drm_user_object_entry(uo, struct drm_buffer_object, base);
514 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
516 drm_bo_takedown_vm_locked(bo);
517 drm_bo_usage_deref_locked(&bo);
520 static void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
522 struct drm_buffer_object *tmp_bo = *bo;
523 struct drm_device *dev = tmp_bo->dev;
526 if (atomic_dec_and_test(&tmp_bo->usage)) {
527 mutex_lock(&dev->struct_mutex);
528 if (atomic_read(&tmp_bo->usage) == 0)
529 drm_bo_destroy_locked(tmp_bo);
530 mutex_unlock(&dev->struct_mutex);
535 * Note. The caller has to register (if applicable)
536 * and deregister fence object usage.
539 int drm_fence_buffer_objects(struct drm_file * file_priv,
540 struct list_head *list,
541 uint32_t fence_flags,
542 struct drm_fence_object * fence,
543 struct drm_fence_object ** used_fence)
545 struct drm_device *dev = file_priv->head->dev;
546 struct drm_buffer_manager *bm = &dev->bm;
548 struct drm_buffer_object *entry;
549 uint32_t fence_type = 0;
555 mutex_lock(&dev->struct_mutex);
558 list = &bm->unfenced;
560 list_for_each_entry(entry, list, lru) {
561 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
562 fence_type |= entry->fence_type;
563 if (entry->fence_class != 0) {
564 DRM_ERROR("Fence class %d is not implemented yet.\n",
578 * Transfer to a local list before we release the dev->struct_mutex;
579 * This is so we don't get any new unfenced objects while fencing
580 * the ones we already have..
583 list_splice_init(list, &f_list);
586 if ((fence_type & fence->type) != fence_type) {
587 DRM_ERROR("Given fence doesn't match buffers "
588 "on unfenced list.\n");
593 mutex_unlock(&dev->struct_mutex);
594 ret = drm_fence_object_create(dev, 0, fence_type,
595 fence_flags | DRM_FENCE_FLAG_EMIT,
597 mutex_lock(&dev->struct_mutex);
604 while (l != &f_list) {
606 entry = list_entry(l, struct drm_buffer_object, lru);
607 atomic_inc(&entry->usage);
608 mutex_unlock(&dev->struct_mutex);
609 mutex_lock(&entry->mutex);
610 mutex_lock(&dev->struct_mutex);
612 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
615 drm_fence_usage_deref_locked(&entry->fence);
616 entry->fence = drm_fence_reference_locked(fence);
617 DRM_FLAG_MASKED(entry->priv_flags, 0,
618 _DRM_BO_FLAG_UNFENCED);
619 DRM_WAKEUP(&entry->event_queue);
620 drm_bo_add_to_lru(entry);
622 mutex_unlock(&entry->mutex);
623 drm_bo_usage_deref_locked(&entry);
626 DRM_DEBUG("Fenced %d buffers\n", count);
628 mutex_unlock(&dev->struct_mutex);
633 EXPORT_SYMBOL(drm_fence_buffer_objects);
639 static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
643 struct drm_device *dev = bo->dev;
644 struct drm_bo_mem_reg evict_mem;
647 * Someone might have modified the buffer before we took the buffer mutex.
650 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
652 if (bo->mem.mem_type != mem_type)
655 ret = drm_bo_wait(bo, 0, 0, no_wait);
657 if (ret && ret != -EAGAIN) {
658 DRM_ERROR("Failed to expire fence before "
659 "buffer eviction.\n");
664 evict_mem.mm_node = NULL;
666 if (bo->type == drm_bo_type_fake) {
667 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
668 bo->mem.mm_node = NULL;
673 evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
674 ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
678 DRM_ERROR("Failed to find memory space for "
679 "buffer 0x%p eviction.\n", bo);
683 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
687 DRM_ERROR("Buffer eviction failed\n");
692 mutex_lock(&dev->struct_mutex);
693 if (evict_mem.mm_node) {
694 if (evict_mem.mm_node != bo->pinned_node)
695 drm_mm_put_block(evict_mem.mm_node);
696 evict_mem.mm_node = NULL;
699 drm_bo_add_to_lru(bo);
700 mutex_unlock(&dev->struct_mutex);
702 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
703 _DRM_BO_FLAG_EVICTED);
709 static int drm_bo_mem_force_space(struct drm_device * dev,
710 struct drm_bo_mem_reg * mem,
711 uint32_t mem_type, int no_wait)
713 struct drm_mm_node *node;
714 struct drm_buffer_manager *bm = &dev->bm;
715 struct drm_buffer_object *entry;
716 struct drm_mem_type_manager *man = &bm->man[mem_type];
717 struct list_head *lru;
718 unsigned long num_pages = mem->num_pages;
721 mutex_lock(&dev->struct_mutex);
723 node = drm_mm_search_free(&man->manager, num_pages,
724 mem->page_alignment, 1);
729 if (lru->next == lru)
732 entry = list_entry(lru->next, struct drm_buffer_object, lru);
733 atomic_inc(&entry->usage);
734 mutex_unlock(&dev->struct_mutex);
735 mutex_lock(&entry->mutex);
736 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
738 ret = drm_bo_evict(entry, mem_type, no_wait);
739 mutex_unlock(&entry->mutex);
740 drm_bo_usage_deref_unlocked(&entry);
743 mutex_lock(&dev->struct_mutex);
747 mutex_unlock(&dev->struct_mutex);
751 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
752 mutex_unlock(&dev->struct_mutex);
754 mem->mem_type = mem_type;
758 static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
760 uint32_t mask, uint32_t * res_mask)
762 uint32_t cur_flags = drm_bo_type_flags(mem_type);
765 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
766 cur_flags |= DRM_BO_FLAG_CACHED;
767 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
768 cur_flags |= DRM_BO_FLAG_MAPPABLE;
769 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
770 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
772 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
775 if (mem_type == DRM_BO_MEM_LOCAL) {
776 *res_mask = cur_flags;
780 flag_diff = (mask ^ cur_flags);
781 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
782 (!(mask & DRM_BO_FLAG_CACHED) ||
783 (mask & DRM_BO_FLAG_FORCE_CACHING)))
786 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
787 ((mask & DRM_BO_FLAG_MAPPABLE) ||
788 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
791 *res_mask = cur_flags;
795 int drm_bo_mem_space(struct drm_buffer_object * bo,
796 struct drm_bo_mem_reg * mem, int no_wait)
798 struct drm_device *dev = bo->dev;
799 struct drm_buffer_manager *bm = &dev->bm;
800 struct drm_mem_type_manager *man;
802 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
803 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
805 uint32_t mem_type = DRM_BO_MEM_LOCAL;
810 struct drm_mm_node *node = NULL;
814 for (i = 0; i < num_prios; ++i) {
816 man = &bm->man[mem_type];
818 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
824 if (mem_type == DRM_BO_MEM_LOCAL)
827 if ((mem_type == bo->pinned_mem_type) &&
828 (bo->pinned_node != NULL)) {
829 node = bo->pinned_node;
833 mutex_lock(&dev->struct_mutex);
834 if (man->has_type && man->use_type) {
836 node = drm_mm_search_free(&man->manager, mem->num_pages,
837 mem->page_alignment, 1);
839 node = drm_mm_get_block(node, mem->num_pages,
840 mem->page_alignment);
842 mutex_unlock(&dev->struct_mutex);
847 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
849 mem->mem_type = mem_type;
850 mem->flags = cur_flags;
857 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
858 prios = dev->driver->bo_driver->mem_busy_prio;
860 for (i = 0; i < num_prios; ++i) {
862 man = &bm->man[mem_type];
867 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
870 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
873 mem->flags = cur_flags;
881 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
885 EXPORT_SYMBOL(drm_bo_mem_space);
887 static int drm_bo_new_mask(struct drm_buffer_object * bo,
888 uint64_t new_mask, uint32_t hint)
892 if (bo->type == drm_bo_type_user) {
893 DRM_ERROR("User buffers are not supported yet\n");
896 if (bo->type == drm_bo_type_fake &&
897 !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
898 DRM_ERROR("Fake buffers must be pinned.\n");
902 if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
904 ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
909 new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
913 DRM_ERROR("Invalid buffer object rwx properties\n");
917 bo->mem.mask = new_mask;
922 * Call dev->struct_mutex locked.
925 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
926 uint32_t handle, int check_owner)
928 struct drm_user_object *uo;
929 struct drm_buffer_object *bo;
931 uo = drm_lookup_user_object(file_priv, handle);
933 if (!uo || (uo->type != drm_buffer_type)) {
934 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
938 if (check_owner && file_priv != uo->owner) {
939 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
943 bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
944 atomic_inc(&bo->usage);
949 * Call bo->mutex locked.
950 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
951 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
954 static int drm_bo_quick_busy(struct drm_buffer_object * bo)
956 struct drm_fence_object *fence = bo->fence;
958 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
960 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
961 drm_fence_usage_deref_unlocked(&bo->fence);
970 * Call bo->mutex locked.
971 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
974 static int drm_bo_busy(struct drm_buffer_object * bo)
976 struct drm_fence_object *fence = bo->fence;
978 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
980 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
981 drm_fence_usage_deref_unlocked(&bo->fence);
984 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
985 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
986 drm_fence_usage_deref_unlocked(&bo->fence);
994 static int drm_bo_read_cached(struct drm_buffer_object * bo)
998 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1000 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1005 * Wait until a buffer is unmapped.
1008 static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
1012 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1015 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1016 atomic_read(&bo->mapped) == -1);
1024 static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
1028 mutex_lock(&bo->mutex);
1029 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1030 mutex_unlock(&bo->mutex);
1035 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1036 * Until then, we cannot really do anything with it except delete it.
1037 * The unfenced list is a PITA, and the operations
1039 * 2) submitting commands
1041 * Should really be an atomic operation.
1042 * We now "solve" this problem by keeping
1043 * the buffer "unfenced" after validating, but before fencing.
1046 static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
1049 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1057 mutex_unlock(&bo->mutex);
1058 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1059 !drm_bo_check_unfenced(bo));
1060 mutex_lock(&bo->mutex);
1063 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1065 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1075 * Fill in the ioctl reply argument with buffer info.
1079 static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
1080 struct drm_bo_info_rep *rep)
1082 rep->handle = bo->base.hash.key;
1083 rep->flags = bo->mem.flags;
1084 rep->size = bo->mem.num_pages * PAGE_SIZE;
1085 rep->offset = bo->offset;
1086 rep->arg_handle = bo->map_list.user_token;
1087 rep->mask = bo->mem.mask;
1088 rep->buffer_start = bo->buffer_start;
1089 rep->fence_flags = bo->fence_type;
1091 rep->page_alignment = bo->mem.page_alignment;
1093 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1094 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1100 * Wait for buffer idle and register that we've mapped the buffer.
1101 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1102 * so that if the client dies, the mapping is automatically
1106 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1107 uint32_t map_flags, unsigned hint,
1108 struct drm_bo_info_rep *rep)
1110 struct drm_buffer_object *bo;
1111 struct drm_device *dev = file_priv->head->dev;
1113 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1115 mutex_lock(&dev->struct_mutex);
1116 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1117 mutex_unlock(&dev->struct_mutex);
1122 mutex_lock(&bo->mutex);
1123 if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1124 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1130 * If this returns true, we are currently unmapped.
1131 * We need to do this test, because unmapping can
1132 * be done without the bo->mutex held.
1136 if (atomic_inc_and_test(&bo->mapped)) {
1137 if (no_wait && drm_bo_busy(bo)) {
1138 atomic_dec(&bo->mapped);
1142 ret = drm_bo_wait(bo, 0, 0, no_wait);
1144 atomic_dec(&bo->mapped);
1148 if ((map_flags & DRM_BO_FLAG_READ) &&
1149 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1150 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1151 drm_bo_read_cached(bo);
1154 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1155 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1156 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1159 * We are already mapped with different flags.
1160 * need to wait for unmap.
1163 ret = drm_bo_wait_unmapped(bo, no_wait);
1172 mutex_lock(&dev->struct_mutex);
1173 ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1174 mutex_unlock(&dev->struct_mutex);
1176 if (atomic_add_negative(-1, &bo->mapped))
1177 DRM_WAKEUP(&bo->event_queue);
1180 drm_bo_fill_rep_arg(bo, rep);
1182 mutex_unlock(&bo->mutex);
1183 drm_bo_usage_deref_unlocked(&bo);
1187 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1189 struct drm_device *dev = file_priv->head->dev;
1190 struct drm_buffer_object *bo;
1191 struct drm_ref_object *ro;
1194 mutex_lock(&dev->struct_mutex);
1196 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1202 ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1208 drm_remove_ref_object(file_priv, ro);
1209 drm_bo_usage_deref_locked(&bo);
1211 mutex_unlock(&dev->struct_mutex);
1216 * Call struct-sem locked.
1219 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1220 struct drm_user_object * uo,
1221 enum drm_ref_type action)
1223 struct drm_buffer_object *bo =
1224 drm_user_object_entry(uo, struct drm_buffer_object, base);
1227 * We DON'T want to take the bo->lock here, because we want to
1228 * hold it when we wait for unmapped buffer.
1231 BUG_ON(action != _DRM_REF_TYPE1);
1233 if (atomic_add_negative(-1, &bo->mapped))
1234 DRM_WAKEUP(&bo->event_queue);
1239 * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1242 int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
1243 int no_wait, int move_unfenced)
1245 struct drm_device *dev = bo->dev;
1246 struct drm_buffer_manager *bm = &dev->bm;
1248 struct drm_bo_mem_reg mem;
1250 * Flush outstanding fences.
1256 * Wait for outstanding fences.
1259 ret = drm_bo_wait(bo, 0, 0, no_wait);
1263 mem.num_pages = bo->mem.num_pages;
1264 mem.size = mem.num_pages << PAGE_SHIFT;
1265 mem.mask = new_mem_flags;
1266 mem.page_alignment = bo->mem.page_alignment;
1268 mutex_lock(&bm->evict_mutex);
1269 mutex_lock(&dev->struct_mutex);
1271 list_add_tail(&bo->lru, &bm->unfenced);
1272 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1273 _DRM_BO_FLAG_UNFENCED);
1274 mutex_unlock(&dev->struct_mutex);
1277 * Determine where to move the buffer.
1279 ret = drm_bo_mem_space(bo, &mem, no_wait);
1283 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1286 if (ret || !move_unfenced) {
1287 mutex_lock(&dev->struct_mutex);
1289 if (mem.mm_node != bo->pinned_node)
1290 drm_mm_put_block(mem.mm_node);
1293 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1294 DRM_WAKEUP(&bo->event_queue);
1296 drm_bo_add_to_lru(bo);
1297 mutex_unlock(&dev->struct_mutex);
1300 mutex_unlock(&bm->evict_mutex);
1304 static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
1306 uint32_t flag_diff = (mem->mask ^ mem->flags);
1308 if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1310 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1311 (!(mem->mask & DRM_BO_FLAG_CACHED) ||
1312 (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1315 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1316 ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1317 (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1322 static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem)
1324 struct drm_buffer_manager *bm = &dev->bm;
1325 struct drm_mem_type_manager *man;
1326 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1327 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1330 uint32_t mem_type = 0;
1333 if (drm_bo_mem_compat(mem))
1336 BUG_ON(mem->mm_node);
1338 for (i = 0; i < num_prios; ++i) {
1339 mem_type = prios[i];
1340 man = &bm->man[mem_type];
1341 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1348 mem->mm_node = NULL;
1349 mem->mem_type = mem_type;
1350 mem->flags = cur_flags;
1351 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1355 DRM_ERROR("Illegal fake buffer flags 0x%016llx\n",
1356 (unsigned long long) mem->mask);
1364 static int drm_buffer_object_validate(struct drm_buffer_object * bo,
1365 uint32_t fence_class,
1366 int move_unfenced, int no_wait)
1368 struct drm_device *dev = bo->dev;
1369 struct drm_buffer_manager *bm = &dev->bm;
1370 struct drm_bo_driver *driver = dev->driver->bo_driver;
1374 DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
1375 (unsigned long long) bo->mem.mask,
1376 (unsigned long long) bo->mem.flags);
1378 ret = driver->fence_type(bo, &ftype);
1381 DRM_ERROR("Driver did not support given buffer permissions\n");
1386 * We're switching command submission mechanism,
1387 * or cannot simply rely on the hardware serializing for us.
1389 * Wait for buffer idle.
1392 if ((fence_class != bo->fence_class) ||
1393 ((ftype ^ bo->fence_type) & bo->fence_type)) {
1395 ret = drm_bo_wait(bo, 0, 0, no_wait);
1402 bo->fence_class = fence_class;
1403 bo->fence_type = ftype;
1404 ret = drm_bo_wait_unmapped(bo, no_wait);
1408 if (bo->type == drm_bo_type_fake) {
1409 ret = drm_bo_check_fake(dev, &bo->mem);
1415 * Check whether we need to move buffer.
1418 if (!drm_bo_mem_compat(&bo->mem)) {
1419 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1423 DRM_ERROR("Failed moving buffer.\n");
1432 if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1433 bo->pinned_mem_type = bo->mem.mem_type;
1434 mutex_lock(&dev->struct_mutex);
1435 list_del_init(&bo->pinned_lru);
1436 drm_bo_add_to_pinned_lru(bo);
1438 if (bo->pinned_node != bo->mem.mm_node) {
1439 if (bo->pinned_node != NULL)
1440 drm_mm_put_block(bo->pinned_node);
1441 bo->pinned_node = bo->mem.mm_node;
1444 mutex_unlock(&dev->struct_mutex);
1446 } else if (bo->pinned_node != NULL) {
1448 mutex_lock(&dev->struct_mutex);
1450 if (bo->pinned_node != bo->mem.mm_node)
1451 drm_mm_put_block(bo->pinned_node);
1453 list_del_init(&bo->pinned_lru);
1454 bo->pinned_node = NULL;
1455 mutex_unlock(&dev->struct_mutex);
1460 * We might need to add a TTM.
1463 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1464 ret = drm_bo_add_ttm(bo);
1468 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1471 * Finally, adjust lru to be sure.
1474 mutex_lock(&dev->struct_mutex);
1476 if (move_unfenced) {
1477 list_add_tail(&bo->lru, &bm->unfenced);
1478 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1479 _DRM_BO_FLAG_UNFENCED);
1481 drm_bo_add_to_lru(bo);
1482 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1483 DRM_WAKEUP(&bo->event_queue);
1484 DRM_FLAG_MASKED(bo->priv_flags, 0,
1485 _DRM_BO_FLAG_UNFENCED);
1488 mutex_unlock(&dev->struct_mutex);
1493 static int drm_bo_handle_validate(struct drm_file *file_priv,
1495 uint32_t fence_class,
1496 uint64_t flags, uint64_t mask, uint32_t hint,
1497 struct drm_bo_info_rep *rep)
1499 struct drm_device *dev = file_priv->head->dev;
1500 struct drm_buffer_object *bo;
1502 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1504 mutex_lock(&dev->struct_mutex);
1505 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1506 mutex_unlock(&dev->struct_mutex);
1511 mutex_lock(&bo->mutex);
1512 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1517 DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1518 ret = drm_bo_new_mask(bo, flags, hint);
1523 drm_buffer_object_validate(bo, fence_class,
1524 !(hint & DRM_BO_HINT_DONT_FENCE),
1526 drm_bo_fill_rep_arg(bo, rep);
1530 mutex_unlock(&bo->mutex);
1532 drm_bo_usage_deref_unlocked(&bo);
1536 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1537 struct drm_bo_info_rep *rep)
1539 struct drm_device *dev = file_priv->head->dev;
1540 struct drm_buffer_object *bo;
1542 mutex_lock(&dev->struct_mutex);
1543 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1544 mutex_unlock(&dev->struct_mutex);
1549 mutex_lock(&bo->mutex);
1550 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1551 (void)drm_bo_busy(bo);
1552 drm_bo_fill_rep_arg(bo, rep);
1553 mutex_unlock(&bo->mutex);
1554 drm_bo_usage_deref_unlocked(&bo);
1558 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1560 struct drm_bo_info_rep *rep)
1562 struct drm_device *dev = file_priv->head->dev;
1563 struct drm_buffer_object *bo;
1564 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1567 mutex_lock(&dev->struct_mutex);
1568 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1569 mutex_unlock(&dev->struct_mutex);
1575 mutex_lock(&bo->mutex);
1576 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1579 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1583 drm_bo_fill_rep_arg(bo, rep);
1586 mutex_unlock(&bo->mutex);
1587 drm_bo_usage_deref_unlocked(&bo);
1591 int drm_buffer_object_create(struct drm_device *dev,
1593 enum drm_bo_type type,
1596 uint32_t page_alignment,
1597 unsigned long buffer_start,
1598 struct drm_buffer_object ** buf_obj)
1600 struct drm_buffer_manager *bm = &dev->bm;
1601 struct drm_buffer_object *bo;
1603 unsigned long num_pages;
1605 if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1606 DRM_ERROR("Invalid buffer object start.\n");
1609 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1610 if (num_pages == 0) {
1611 DRM_ERROR("Illegal buffer object size.\n");
1615 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1620 mutex_init(&bo->mutex);
1621 mutex_lock(&bo->mutex);
1623 atomic_set(&bo->usage, 1);
1624 atomic_set(&bo->mapped, -1);
1625 DRM_INIT_WAITQUEUE(&bo->event_queue);
1626 INIT_LIST_HEAD(&bo->lru);
1627 INIT_LIST_HEAD(&bo->pinned_lru);
1628 INIT_LIST_HEAD(&bo->ddestroy);
1629 #ifdef DRM_ODD_MM_COMPAT
1630 INIT_LIST_HEAD(&bo->p_mm_list);
1631 INIT_LIST_HEAD(&bo->vma_list);
1635 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1636 bo->mem.num_pages = num_pages;
1637 bo->mem.mm_node = NULL;
1638 bo->mem.page_alignment = page_alignment;
1639 if (bo->type == drm_bo_type_fake) {
1640 bo->offset = buffer_start;
1641 bo->buffer_start = 0;
1643 bo->buffer_start = buffer_start;
1646 bo->mem.flags = 0ULL;
1647 bo->mem.mask = 0ULL;
1648 atomic_inc(&bm->count);
1649 ret = drm_bo_new_mask(bo, mask, hint);
1654 if (bo->type == drm_bo_type_dc) {
1655 mutex_lock(&dev->struct_mutex);
1656 ret = drm_bo_setup_vm_locked(bo);
1657 mutex_unlock(&dev->struct_mutex);
1661 ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1665 mutex_unlock(&bo->mutex);
1670 mutex_unlock(&bo->mutex);
1672 drm_bo_usage_deref_unlocked(&bo);
1676 static int drm_bo_add_user_object(struct drm_file *file_priv,
1677 struct drm_buffer_object *bo,
1680 struct drm_device *dev = file_priv->head->dev;
1683 mutex_lock(&dev->struct_mutex);
1684 ret = drm_add_user_object(file_priv, &bo->base, shareable);
1688 bo->base.remove = drm_bo_base_deref_locked;
1689 bo->base.type = drm_buffer_type;
1690 bo->base.ref_struct_locked = NULL;
1691 bo->base.unref = drm_buffer_user_object_unmap;
1694 mutex_unlock(&dev->struct_mutex);
1698 static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv)
1700 LOCK_TEST_WITH_RETURN(dev, file_priv);
1704 int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1706 struct drm_bo_op_arg curarg;
1707 struct drm_bo_op_arg *arg = data;
1708 struct drm_bo_op_req *req = &arg->d.req;
1709 struct drm_bo_info_rep rep;
1710 unsigned long next = 0;
1711 void __user *curuserarg = NULL;
1714 if (!dev->bm.initialized) {
1715 DRM_ERROR("Buffer object manager is not initialized.\n");
1721 curuserarg = (void __user *)next;
1722 if (copy_from_user(&curarg, curuserarg,
1723 sizeof(curarg)) != 0)
1735 case drm_bo_validate:
1736 ret = drm_bo_lock_test(dev, file_priv);
1739 ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
1740 req->bo_req.fence_class,
1748 DRM_ERROR("Function is not implemented yet.\n");
1750 case drm_bo_ref_fence:
1752 DRM_ERROR("Function is not implemented yet.\n");
1760 * A signal interrupted us. Make sure the ioctl is restartable.
1767 arg->d.rep.ret = ret;
1768 arg->d.rep.bo_info = rep;
1770 if (copy_to_user(curuserarg, &curarg,
1771 sizeof(curarg)) != 0)
1774 } while (next != 0);
1778 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1780 struct drm_bo_create_arg *arg = data;
1781 struct drm_bo_create_req *req = &arg->d.req;
1782 struct drm_bo_info_rep *rep = &arg->d.rep;
1783 struct drm_buffer_object *entry;
1786 if (!dev->bm.initialized) {
1787 DRM_ERROR("Buffer object manager is not initialized.\n");
1791 ret = drm_bo_lock_test(dev, file_priv);
1795 ret = drm_buffer_object_create(file_priv->head->dev,
1796 req->size, req->type, req->mask,
1797 req->hint, req->page_alignment,
1798 req->buffer_start, &entry);
1802 ret = drm_bo_add_user_object(file_priv, entry,
1803 req->mask & DRM_BO_FLAG_SHAREABLE);
1805 drm_bo_usage_deref_unlocked(&entry);
1809 mutex_lock(&entry->mutex);
1810 drm_bo_fill_rep_arg(entry, rep);
1811 mutex_unlock(&entry->mutex);
1818 int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1820 struct drm_bo_handle_arg *arg = data;
1821 struct drm_user_object *uo;
1824 if (!dev->bm.initialized) {
1825 DRM_ERROR("Buffer object manager is not initialized.\n");
1829 mutex_lock(&dev->struct_mutex);
1830 uo = drm_lookup_user_object(file_priv, arg->handle);
1831 if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) {
1832 mutex_unlock(&dev->struct_mutex);
1835 ret = drm_remove_user_object(file_priv, uo);
1836 mutex_unlock(&dev->struct_mutex);
1841 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1843 struct drm_bo_map_wait_idle_arg *arg = data;
1844 struct drm_bo_info_req *req = &arg->d.req;
1845 struct drm_bo_info_rep *rep = &arg->d.rep;
1847 if (!dev->bm.initialized) {
1848 DRM_ERROR("Buffer object manager is not initialized.\n");
1852 ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1860 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1862 struct drm_bo_handle_arg *arg = data;
1864 if (!dev->bm.initialized) {
1865 DRM_ERROR("Buffer object manager is not initialized.\n");
1869 ret = drm_buffer_object_unmap(file_priv, arg->handle);
1874 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1876 struct drm_bo_reference_info_arg *arg = data;
1877 struct drm_bo_handle_arg *req = &arg->d.req;
1878 struct drm_bo_info_rep *rep = &arg->d.rep;
1879 struct drm_user_object *uo;
1882 if (!dev->bm.initialized) {
1883 DRM_ERROR("Buffer object manager is not initialized.\n");
1887 ret = drm_user_object_ref(file_priv, req->handle,
1888 drm_buffer_type, &uo);
1892 ret = drm_bo_handle_info(file_priv, req->handle, rep);
1899 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1901 struct drm_bo_handle_arg *arg = data;
1904 if (!dev->bm.initialized) {
1905 DRM_ERROR("Buffer object manager is not initialized.\n");
1909 ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
1913 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1915 struct drm_bo_reference_info_arg *arg = data;
1916 struct drm_bo_handle_arg *req = &arg->d.req;
1917 struct drm_bo_info_rep *rep = &arg->d.rep;
1920 if (!dev->bm.initialized) {
1921 DRM_ERROR("Buffer object manager is not initialized.\n");
1925 ret = drm_bo_handle_info(file_priv, req->handle, rep);
1932 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1934 struct drm_bo_map_wait_idle_arg *arg = data;
1935 struct drm_bo_info_req *req = &arg->d.req;
1936 struct drm_bo_info_rep *rep = &arg->d.rep;
1938 if (!dev->bm.initialized) {
1939 DRM_ERROR("Buffer object manager is not initialized.\n");
1943 ret = drm_bo_handle_wait(file_priv, req->handle,
1954 *Clean the unfenced list and put on regular LRU.
1955 *This is part of the memory manager cleanup and should only be
1956 *called with the DRI lock held.
1957 *Call dev->struct_sem locked.
1960 static void drm_bo_clean_unfenced(struct drm_device *dev)
1962 struct drm_buffer_manager *bm = &dev->bm;
1963 struct list_head *head, *list;
1964 struct drm_buffer_object *entry;
1966 head = &bm->unfenced;
1969 while(list != head) {
1970 prefetch(list->next);
1971 entry = list_entry(list, struct drm_buffer_object, lru);
1973 atomic_inc(&entry->usage);
1974 mutex_unlock(&dev->struct_mutex);
1975 mutex_lock(&entry->mutex);
1976 mutex_lock(&dev->struct_mutex);
1978 list_del(&entry->lru);
1979 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1980 drm_bo_add_to_lru(entry);
1981 mutex_unlock(&entry->mutex);
1986 static int drm_bo_leave_list(struct drm_buffer_object * bo,
1988 int free_pinned, int allow_errors)
1990 struct drm_device *dev = bo->dev;
1993 mutex_lock(&bo->mutex);
1995 ret = drm_bo_expire_fence(bo, allow_errors);
2000 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2001 mutex_lock(&dev->struct_mutex);
2002 list_del_init(&bo->pinned_lru);
2003 if (bo->pinned_node == bo->mem.mm_node)
2004 bo->pinned_node = NULL;
2005 if (bo->pinned_node != NULL) {
2006 drm_mm_put_block(bo->pinned_node);
2007 bo->pinned_node = NULL;
2009 mutex_unlock(&dev->struct_mutex);
2012 if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2013 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2014 "cleanup. Removing flag and evicting.\n");
2015 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2016 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
2019 if (bo->mem.mem_type == mem_type)
2020 ret = drm_bo_evict(bo, mem_type, 0);
2027 DRM_ERROR("Cleanup eviction failed\n");
2032 mutex_unlock(&bo->mutex);
2037 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2041 return list_entry(list, struct drm_buffer_object, pinned_lru);
2043 return list_entry(list, struct drm_buffer_object, lru);
2047 * dev->struct_mutex locked.
2050 static int drm_bo_force_list_clean(struct drm_device * dev,
2051 struct list_head *head,
2057 struct list_head *list, *next, *prev;
2058 struct drm_buffer_object *entry, *nentry;
2063 * The list traversal is a bit odd here, because an item may
2064 * disappear from the list when we release the struct_mutex or
2065 * when we decrease the usage count. Also we're not guaranteed
2066 * to drain pinned lists, so we can't always restart.
2071 list_for_each_safe(list, next, head) {
2074 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2075 atomic_inc(&entry->usage);
2077 atomic_dec(&nentry->usage);
2082 * Protect the next item from destruction, so we can check
2083 * its list pointers later on.
2087 nentry = drm_bo_entry(next, pinned_list);
2088 atomic_inc(&nentry->usage);
2090 mutex_unlock(&dev->struct_mutex);
2092 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2094 mutex_lock(&dev->struct_mutex);
2096 drm_bo_usage_deref_locked(&entry);
2101 * Has the next item disappeared from the list?
2104 do_restart = ((next->prev != list) && (next->prev != prev));
2106 if (nentry != NULL && do_restart)
2107 drm_bo_usage_deref_locked(&nentry);
2115 int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
2117 struct drm_buffer_manager *bm = &dev->bm;
2118 struct drm_mem_type_manager *man = &bm->man[mem_type];
2121 if (mem_type >= DRM_BO_MEM_TYPES) {
2122 DRM_ERROR("Illegal memory type %d\n", mem_type);
2126 if (!man->has_type) {
2127 DRM_ERROR("Trying to take down uninitialized "
2128 "memory manager type\n");
2137 drm_bo_clean_unfenced(dev);
2138 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2139 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2141 if (drm_mm_clean(&man->manager)) {
2142 drm_mm_takedown(&man->manager);
2152 *Evict all buffers of a particular mem_type, but leave memory manager
2153 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2154 *point since we have the hardware lock.
2157 static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
2160 struct drm_buffer_manager *bm = &dev->bm;
2161 struct drm_mem_type_manager *man = &bm->man[mem_type];
2163 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2164 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2168 if (!man->has_type) {
2169 DRM_ERROR("Memory type %u has not been initialized.\n",
2174 drm_bo_clean_unfenced(dev);
2175 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2178 ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2183 int drm_bo_init_mm(struct drm_device * dev,
2185 unsigned long p_offset, unsigned long p_size)
2187 struct drm_buffer_manager *bm = &dev->bm;
2189 struct drm_mem_type_manager *man;
2191 if (type >= DRM_BO_MEM_TYPES) {
2192 DRM_ERROR("Illegal memory type %d\n", type);
2196 man = &bm->man[type];
2197 if (man->has_type) {
2198 DRM_ERROR("Memory manager already initialized for type %d\n",
2203 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2208 if (type != DRM_BO_MEM_LOCAL) {
2210 DRM_ERROR("Zero size memory manager type %d\n", type);
2213 ret = drm_mm_init(&man->manager, p_offset, p_size);
2220 INIT_LIST_HEAD(&man->lru);
2221 INIT_LIST_HEAD(&man->pinned);
2225 EXPORT_SYMBOL(drm_bo_init_mm);
2228 * This is called from lastclose, so we don't need to bother about
2229 * any clients still running when we set the initialized flag to zero.
2232 int drm_bo_driver_finish(struct drm_device * dev)
2234 struct drm_buffer_manager *bm = &dev->bm;
2236 unsigned i = DRM_BO_MEM_TYPES;
2237 struct drm_mem_type_manager *man;
2239 mutex_lock(&dev->bm.init_mutex);
2240 mutex_lock(&dev->struct_mutex);
2242 if (!bm->initialized)
2244 bm->initialized = 0;
2248 if (man->has_type) {
2250 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2252 DRM_ERROR("DRM memory manager type %d "
2253 "is not clean.\n", i);
2258 mutex_unlock(&dev->struct_mutex);
2260 if (!cancel_delayed_work(&bm->wq)) {
2261 flush_scheduled_work();
2263 mutex_lock(&dev->struct_mutex);
2264 drm_bo_delayed_delete(dev, 1);
2265 if (list_empty(&bm->ddestroy)) {
2266 DRM_DEBUG("Delayed destroy list was clean\n");
2268 if (list_empty(&bm->man[0].lru)) {
2269 DRM_DEBUG("Swap list was clean\n");
2271 if (list_empty(&bm->man[0].pinned)) {
2272 DRM_DEBUG("NO_MOVE list was clean\n");
2274 if (list_empty(&bm->unfenced)) {
2275 DRM_DEBUG("Unfenced list was clean\n");
2278 mutex_unlock(&dev->struct_mutex);
2279 mutex_unlock(&dev->bm.init_mutex);
2283 int drm_bo_driver_init(struct drm_device * dev)
2285 struct drm_bo_driver *driver = dev->driver->bo_driver;
2286 struct drm_buffer_manager *bm = &dev->bm;
2289 mutex_lock(&dev->bm.init_mutex);
2290 mutex_lock(&dev->struct_mutex);
2295 * Initialize the system memory buffer type.
2296 * Other types need to be driver / IOCTL initialized.
2299 ret = drm_bo_init_mm(dev, 0, 0, 0);
2303 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2304 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2306 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2308 bm->initialized = 1;
2310 atomic_set(&bm->count, 0);
2312 INIT_LIST_HEAD(&bm->unfenced);
2313 INIT_LIST_HEAD(&bm->ddestroy);
2315 mutex_unlock(&dev->struct_mutex);
2316 mutex_unlock(&dev->bm.init_mutex);
2320 EXPORT_SYMBOL(drm_bo_driver_init);
2322 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2324 struct drm_mm_init_arg *arg = data;
2325 struct drm_buffer_manager *bm = &dev->bm;
2326 struct drm_bo_driver *driver = dev->driver->bo_driver;
2330 DRM_ERROR("Buffer objects are not supported by this driver\n");
2335 if (arg->magic != DRM_BO_INIT_MAGIC) {
2336 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2337 "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2340 if (arg->major != DRM_BO_INIT_MAJOR) {
2341 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2342 "\tversion don't match. Got %d, expected %d,\n",
2343 arg->major, DRM_BO_INIT_MAJOR);
2346 if (arg->minor > DRM_BO_INIT_MINOR) {
2347 DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n"
2348 "\tlibdrm buffer object interface version is %d.%d.\n"
2349 "\tkernel DRM buffer object interface version is %d.%d\n",
2350 arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR);
2354 mutex_lock(&dev->bm.init_mutex);
2355 mutex_lock(&dev->struct_mutex);
2356 if (!bm->initialized) {
2357 DRM_ERROR("DRM memory manager was not initialized.\n");
2360 if (arg->mem_type == 0) {
2361 DRM_ERROR("System memory buffers already initialized.\n");
2364 ret = drm_bo_init_mm(dev, arg->mem_type,
2365 arg->p_offset, arg->p_size);
2368 mutex_unlock(&dev->struct_mutex);
2369 mutex_unlock(&dev->bm.init_mutex);
2376 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2378 struct drm_mm_type_arg *arg = data;
2379 struct drm_buffer_manager *bm = &dev->bm;
2380 struct drm_bo_driver *driver = dev->driver->bo_driver;
2384 DRM_ERROR("Buffer objects are not supported by this driver\n");
2388 LOCK_TEST_WITH_RETURN(dev, file_priv);
2389 mutex_lock(&dev->bm.init_mutex);
2390 mutex_lock(&dev->struct_mutex);
2392 if (!bm->initialized) {
2393 DRM_ERROR("DRM memory manager was not initialized\n");
2396 if (arg->mem_type == 0) {
2397 DRM_ERROR("No takedown for System memory buffers.\n");
2401 if (drm_bo_clean_mm(dev, arg->mem_type)) {
2402 DRM_ERROR("Memory manager type %d not clean. "
2403 "Delaying takedown\n", arg->mem_type);
2406 mutex_unlock(&dev->struct_mutex);
2407 mutex_unlock(&dev->bm.init_mutex);
2414 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2416 struct drm_mm_type_arg *arg = data;
2417 struct drm_bo_driver *driver = dev->driver->bo_driver;
2421 DRM_ERROR("Buffer objects are not supported by this driver\n");
2425 LOCK_TEST_WITH_RETURN(dev, file_priv);
2426 mutex_lock(&dev->bm.init_mutex);
2427 mutex_lock(&dev->struct_mutex);
2428 ret = drm_bo_lock_mm(dev, arg->mem_type);
2429 mutex_unlock(&dev->struct_mutex);
2430 mutex_unlock(&dev->bm.init_mutex);
2437 int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2439 struct drm_bo_driver *driver = dev->driver->bo_driver;
2443 DRM_ERROR("Buffer objects are not supported by this driver\n");
2447 LOCK_TEST_WITH_RETURN(dev, file_priv);
2448 mutex_lock(&dev->bm.init_mutex);
2449 mutex_lock(&dev->struct_mutex);
2452 mutex_unlock(&dev->struct_mutex);
2453 mutex_unlock(&dev->bm.init_mutex);
2461 * buffer object vm functions.
2464 int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
2466 struct drm_buffer_manager *bm = &dev->bm;
2467 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2469 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2470 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2473 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2476 if (mem->flags & DRM_BO_FLAG_CACHED)
2482 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2485 * \c Get the PCI offset for the buffer object memory.
2487 * \param bo The buffer object.
2488 * \param bus_base On return the base of the PCI region
2489 * \param bus_offset On return the byte offset into the PCI region
2490 * \param bus_size On return the byte size of the buffer object or zero if
2491 * the buffer object memory is not accessible through a PCI region.
2492 * \return Failure indication.
2494 * Returns -EINVAL if the buffer object is currently not mappable.
2495 * Otherwise returns zero.
2498 int drm_bo_pci_offset(struct drm_device *dev,
2499 struct drm_bo_mem_reg *mem,
2500 unsigned long *bus_base,
2501 unsigned long *bus_offset, unsigned long *bus_size)
2503 struct drm_buffer_manager *bm = &dev->bm;
2504 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2507 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2510 if (drm_mem_reg_is_pci(dev, mem)) {
2511 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2512 *bus_size = mem->num_pages << PAGE_SHIFT;
2513 *bus_base = man->io_offset;
2520 * \c Kill all user-space virtual mappings of this buffer object.
2522 * \param bo The buffer object.
2524 * Call bo->mutex locked.
2527 void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
2529 struct drm_device *dev = bo->dev;
2530 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2531 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2533 if (!dev->dev_mapping)
2536 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2539 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
2541 struct drm_map_list *list = &bo->map_list;
2542 drm_local_map_t *map;
2543 struct drm_device *dev = bo->dev;
2545 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2546 if (list->user_token) {
2547 drm_ht_remove_item(&dev->map_hash, &list->hash);
2548 list->user_token = 0;
2550 if (list->file_offset_node) {
2551 drm_mm_put_block(list->file_offset_node);
2552 list->file_offset_node = NULL;
2559 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2561 list->user_token = 0ULL;
2562 drm_bo_usage_deref_locked(&bo);
2565 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
2567 struct drm_map_list *list = &bo->map_list;
2568 drm_local_map_t *map;
2569 struct drm_device *dev = bo->dev;
2571 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2572 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2578 map->type = _DRM_TTM;
2579 map->flags = _DRM_REMOVABLE;
2580 map->size = bo->mem.num_pages * PAGE_SIZE;
2581 atomic_inc(&bo->usage);
2582 map->handle = (void *)bo;
2584 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2585 bo->mem.num_pages, 0, 0);
2587 if (!list->file_offset_node) {
2588 drm_bo_takedown_vm_locked(bo);
2592 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2593 bo->mem.num_pages, 0);
2595 list->hash.key = list->file_offset_node->start;
2596 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2597 drm_bo_takedown_vm_locked(bo);
2601 list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;