1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44 * both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47 * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48 * traversal will, in general, need to be restarted.
52 static void drm_bo_destroy_locked(drm_buffer_object_t * bo);
53 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo);
54 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo);
55 static void drm_bo_unmap_virtual(drm_buffer_object_t * bo);
57 static inline uint32_t drm_bo_type_flags(unsigned type)
59 return (1 << (24 + type));
63 * bo locked. dev->struct_mutex locked.
66 void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
68 drm_mem_type_manager_t *man;
70 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71 DRM_ASSERT_LOCKED(&bo->mutex);
73 man = &bo->dev->bm.man[bo->pinned_mem_type];
74 list_add_tail(&bo->pinned_lru, &man->pinned);
77 void drm_bo_add_to_lru(drm_buffer_object_t * bo)
79 drm_mem_type_manager_t *man;
81 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
83 if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84 || bo->mem.mem_type != bo->pinned_mem_type) {
85 man = &bo->dev->bm.man[bo->mem.mem_type];
86 list_add_tail(&bo->lru, &man->lru);
88 INIT_LIST_HEAD(&bo->lru);
92 static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
94 #ifdef DRM_ODD_MM_COMPAT
97 if (!bo->map_list.map)
100 ret = drm_bo_lock_kmm(bo);
103 drm_bo_unmap_virtual(bo);
105 drm_bo_finish_unmap(bo);
107 if (!bo->map_list.map)
110 drm_bo_unmap_virtual(bo);
115 static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
117 #ifdef DRM_ODD_MM_COMPAT
120 if (!bo->map_list.map)
123 ret = drm_bo_remap_bound(bo);
125 DRM_ERROR("Failed to remap a bound buffer object.\n"
126 "\tThis might cause a sigbus later.\n");
128 drm_bo_unlock_kmm(bo);
133 * Call bo->mutex locked.
136 static int drm_bo_add_ttm(drm_buffer_object_t * bo)
138 drm_device_t *dev = bo->dev;
142 DRM_ASSERT_LOCKED(&bo->mutex);
146 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
150 case drm_bo_type_kernel:
151 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
155 case drm_bo_type_user:
156 case drm_bo_type_fake:
159 DRM_ERROR("Illegal buffer object type\n");
167 static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
168 drm_bo_mem_reg_t * mem,
169 int evict, int no_wait)
171 drm_device_t *dev = bo->dev;
172 drm_buffer_manager_t *bm = &dev->bm;
173 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
174 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
175 drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
176 drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
179 if (old_is_pci || new_is_pci)
180 ret = drm_bo_vm_pre_move(bo, old_is_pci);
185 * Create and bind a ttm if required.
188 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
189 ret = drm_bo_add_ttm(bo);
193 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
194 ret = drm_bind_ttm(bo->ttm, new_man->flags &
196 mem->mm_node->start);
202 if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
204 drm_bo_mem_reg_t *old_mem = &bo->mem;
205 uint32_t save_flags = old_mem->flags;
206 uint32_t save_mask = old_mem->mask;
210 old_mem->mask = save_mask;
211 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
213 } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
214 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
216 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
218 } else if (dev->driver->bo_driver->move) {
219 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
223 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
230 if (old_is_pci || new_is_pci)
231 drm_bo_vm_post_move(bo);
233 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
235 dev->driver->bo_driver->invalidate_caches(dev,
238 DRM_ERROR("Can not flush read caches\n");
241 DRM_FLAG_MASKED(bo->priv_flags,
242 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
243 _DRM_BO_FLAG_EVICTED);
246 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
251 if (old_is_pci || new_is_pci)
252 drm_bo_vm_post_move(bo);
254 new_man = &bm->man[bo->mem.mem_type];
255 if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
256 drm_ttm_unbind(bo->ttm);
257 drm_destroy_ttm(bo->ttm);
265 * Call bo->mutex locked.
266 * Wait until the buffer is idle.
269 int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
273 drm_fence_object_t *fence = bo->fence;
276 DRM_ASSERT_LOCKED(&bo->mutex);
279 drm_device_t *dev = bo->dev;
280 if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) {
281 drm_fence_usage_deref_unlocked(dev, fence);
289 drm_fence_object_wait(dev, fence, lazy, ignore_signals,
294 drm_fence_usage_deref_unlocked(dev, fence);
301 static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
303 drm_device_t *dev = bo->dev;
304 drm_buffer_manager_t *bm = &dev->bm;
308 unsigned long _end = jiffies + 3 * DRM_HZ;
311 ret = drm_bo_wait(bo, 0, 1, 0);
312 if (ret && allow_errors)
315 } while (ret && !time_after_eq(jiffies, _end));
319 DRM_ERROR("Detected GPU lockup or "
320 "fence driver was taken down. "
321 "Evicting buffer.\n");
325 drm_fence_usage_deref_unlocked(dev, bo->fence);
333 * Call dev->struct_mutex locked.
334 * Attempts to remove all private references to a buffer by expiring its
335 * fence object and removing from lru lists and memory managers.
338 static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
340 drm_device_t *dev = bo->dev;
341 drm_buffer_manager_t *bm = &dev->bm;
343 DRM_ASSERT_LOCKED(&dev->struct_mutex);
345 atomic_inc(&bo->usage);
346 mutex_unlock(&dev->struct_mutex);
347 mutex_lock(&bo->mutex);
349 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
351 if (bo->fence && drm_fence_object_signaled(dev, bo->fence,
352 bo->fence_type, 0)) {
353 drm_fence_usage_deref_unlocked(dev, bo->fence);
357 if (bo->fence && remove_all)
358 (void)drm_bo_expire_fence(bo, 0);
360 mutex_lock(&dev->struct_mutex);
362 if (!atomic_dec_and_test(&bo->usage)) {
367 list_del_init(&bo->lru);
368 if (bo->mem.mm_node) {
369 drm_mm_put_block(bo->mem.mm_node);
370 if (bo->pinned_node == bo->mem.mm_node)
371 bo->pinned_node = NULL;
372 bo->mem.mm_node = NULL;
374 list_del_init(&bo->pinned_lru);
375 if (bo->pinned_node) {
376 drm_mm_put_block(bo->pinned_node);
377 bo->pinned_node = NULL;
379 list_del_init(&bo->ddestroy);
380 mutex_unlock(&bo->mutex);
381 drm_bo_destroy_locked(bo);
385 if (list_empty(&bo->ddestroy)) {
386 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
387 list_add_tail(&bo->ddestroy, &bm->ddestroy);
388 schedule_delayed_work(&bm->wq,
389 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
393 mutex_unlock(&bo->mutex);
398 * Verify that refcount is 0 and that there are no internal references
399 * to the buffer object. Then destroy it.
402 static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
404 drm_device_t *dev = bo->dev;
405 drm_buffer_manager_t *bm = &dev->bm;
407 DRM_ASSERT_LOCKED(&dev->struct_mutex);
409 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
410 list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
411 list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
412 if (bo->fence != NULL) {
413 DRM_ERROR("Fence was non-zero.\n");
414 drm_bo_cleanup_refs(bo, 0);
418 #ifdef DRM_ODD_MM_COMPAT
419 BUG_ON(!list_empty(&bo->vma_list));
420 BUG_ON(!list_empty(&bo->p_mm_list));
424 drm_ttm_unbind(bo->ttm);
425 drm_destroy_ttm(bo->ttm);
429 atomic_dec(&bm->count);
431 BUG_ON(!list_empty(&bo->base.list));
432 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
438 * Some stuff is still trying to reference the buffer object.
439 * Get rid of those references.
442 drm_bo_cleanup_refs(bo, 0);
448 * Call dev->struct_mutex locked.
451 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
453 drm_buffer_manager_t *bm = &dev->bm;
455 drm_buffer_object_t *entry, *nentry;
456 struct list_head *list, *next;
458 list_for_each_safe(list, next, &bm->ddestroy) {
459 entry = list_entry(list, drm_buffer_object_t, ddestroy);
462 if (next != &bm->ddestroy) {
463 nentry = list_entry(next, drm_buffer_object_t,
465 atomic_inc(&nentry->usage);
468 drm_bo_cleanup_refs(entry, remove_all);
471 atomic_dec(&nentry->usage);
476 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
477 static void drm_bo_delayed_workqueue(void *data)
479 static void drm_bo_delayed_workqueue(struct work_struct *work)
482 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
483 drm_device_t *dev = (drm_device_t *) data;
484 drm_buffer_manager_t *bm = &dev->bm;
486 drm_buffer_manager_t *bm =
487 container_of(work, drm_buffer_manager_t, wq.work);
488 drm_device_t *dev = container_of(bm, drm_device_t, bm);
491 DRM_DEBUG("Delayed delete Worker\n");
493 mutex_lock(&dev->struct_mutex);
494 if (!bm->initialized) {
495 mutex_unlock(&dev->struct_mutex);
498 drm_bo_delayed_delete(dev, 0);
499 if (bm->initialized && !list_empty(&bm->ddestroy)) {
500 schedule_delayed_work(&bm->wq,
501 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
503 mutex_unlock(&dev->struct_mutex);
506 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
508 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
510 if (atomic_dec_and_test(&bo->usage)) {
511 drm_bo_destroy_locked(bo);
515 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
517 drm_buffer_object_t *bo =
518 drm_user_object_entry(uo, drm_buffer_object_t, base);
520 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
522 drm_bo_takedown_vm_locked(bo);
523 drm_bo_usage_deref_locked(bo);
526 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
528 drm_device_t *dev = bo->dev;
530 if (atomic_dec_and_test(&bo->usage)) {
531 mutex_lock(&dev->struct_mutex);
532 if (atomic_read(&bo->usage) == 0)
533 drm_bo_destroy_locked(bo);
534 mutex_unlock(&dev->struct_mutex);
539 * Note. The caller has to register (if applicable)
540 * and deregister fence object usage.
543 int drm_fence_buffer_objects(drm_file_t * priv,
544 struct list_head *list,
545 uint32_t fence_flags,
546 drm_fence_object_t * fence,
547 drm_fence_object_t ** used_fence)
549 drm_device_t *dev = priv->head->dev;
550 drm_buffer_manager_t *bm = &dev->bm;
552 drm_buffer_object_t *entry;
553 uint32_t fence_type = 0;
559 mutex_lock(&dev->struct_mutex);
562 list = &bm->unfenced;
564 list_for_each_entry(entry, list, lru) {
565 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
566 fence_type |= entry->fence_type;
567 if (entry->fence_class != 0) {
568 DRM_ERROR("Fence class %d is not implemented yet.\n",
582 * Transfer to a local list before we release the dev->struct_mutex;
583 * This is so we don't get any new unfenced objects while fencing
584 * the ones we already have..
587 list_splice_init(list, &f_list);
590 if ((fence_type & fence->type) != fence_type) {
591 DRM_ERROR("Given fence doesn't match buffers "
592 "on unfenced list.\n");
597 mutex_unlock(&dev->struct_mutex);
598 ret = drm_fence_object_create(dev, 0, fence_type,
599 fence_flags | DRM_FENCE_FLAG_EMIT,
601 mutex_lock(&dev->struct_mutex);
608 while (l != &f_list) {
610 entry = list_entry(l, drm_buffer_object_t, lru);
611 atomic_inc(&entry->usage);
612 mutex_unlock(&dev->struct_mutex);
613 mutex_lock(&entry->mutex);
614 mutex_lock(&dev->struct_mutex);
616 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
619 drm_fence_usage_deref_locked(dev, entry->fence);
620 entry->fence = fence;
621 atomic_inc(&fence->usage);
622 DRM_FLAG_MASKED(entry->priv_flags, 0,
623 _DRM_BO_FLAG_UNFENCED);
624 DRM_WAKEUP(&entry->event_queue);
625 drm_bo_add_to_lru(entry);
627 mutex_unlock(&entry->mutex);
628 drm_bo_usage_deref_locked(entry);
631 DRM_DEBUG("Fenced %d buffers\n", count);
633 mutex_unlock(&dev->struct_mutex);
638 EXPORT_SYMBOL(drm_fence_buffer_objects);
644 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
648 drm_device_t *dev = bo->dev;
649 drm_bo_mem_reg_t evict_mem;
652 * Someone might have modified the buffer before we took the buffer mutex.
655 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
657 if (bo->mem.mem_type != mem_type)
660 ret = drm_bo_wait(bo, 0, 0, no_wait);
662 if (ret && ret != -EAGAIN) {
663 DRM_ERROR("Failed to expire fence before "
664 "buffer eviction.\n");
669 evict_mem.mm_node = NULL;
671 if (bo->type == drm_bo_type_fake) {
672 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
673 bo->mem.mm_node = NULL;
678 evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
679 ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
683 DRM_ERROR("Failed to find memory space for "
684 "buffer 0x%p eviction.\n", bo);
688 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
692 DRM_ERROR("Buffer eviction failed\n");
697 mutex_lock(&dev->struct_mutex);
698 if (evict_mem.mm_node) {
699 if (evict_mem.mm_node != bo->pinned_node)
700 drm_mm_put_block(evict_mem.mm_node);
701 evict_mem.mm_node = NULL;
704 drm_bo_add_to_lru(bo);
705 mutex_unlock(&dev->struct_mutex);
707 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
708 _DRM_BO_FLAG_EVICTED);
714 static int drm_bo_mem_force_space(drm_device_t * dev,
715 drm_bo_mem_reg_t * mem,
716 uint32_t mem_type, int no_wait)
719 drm_buffer_manager_t *bm = &dev->bm;
720 drm_buffer_object_t *entry;
721 drm_mem_type_manager_t *man = &bm->man[mem_type];
722 struct list_head *lru;
723 unsigned long num_pages = mem->num_pages;
726 mutex_lock(&dev->struct_mutex);
728 node = drm_mm_search_free(&man->manager, num_pages,
729 mem->page_alignment, 1);
734 if (lru->next == lru)
737 entry = list_entry(lru->next, drm_buffer_object_t, lru);
738 atomic_inc(&entry->usage);
739 mutex_unlock(&dev->struct_mutex);
740 mutex_lock(&entry->mutex);
741 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
743 ret = drm_bo_evict(entry, mem_type, no_wait);
744 mutex_unlock(&entry->mutex);
745 drm_bo_usage_deref_unlocked(entry);
748 mutex_lock(&dev->struct_mutex);
752 mutex_unlock(&dev->struct_mutex);
756 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
757 mutex_unlock(&dev->struct_mutex);
759 mem->mem_type = mem_type;
763 static int drm_bo_mt_compatible(drm_mem_type_manager_t * man,
765 uint32_t mask, uint32_t * res_mask)
767 uint32_t cur_flags = drm_bo_type_flags(mem_type);
770 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
771 cur_flags |= DRM_BO_FLAG_CACHED;
772 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
773 cur_flags |= DRM_BO_FLAG_MAPPABLE;
774 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
775 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
777 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
780 if (mem_type == DRM_BO_MEM_LOCAL) {
781 *res_mask = cur_flags;
785 flag_diff = (mask ^ cur_flags);
786 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
787 (!(mask & DRM_BO_FLAG_CACHED) ||
788 (mask & DRM_BO_FLAG_FORCE_CACHING)))
791 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
792 ((mask & DRM_BO_FLAG_MAPPABLE) ||
793 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
796 *res_mask = cur_flags;
800 int drm_bo_mem_space(drm_buffer_object_t * bo,
801 drm_bo_mem_reg_t * mem, int no_wait)
803 drm_device_t *dev = bo->dev;
804 drm_buffer_manager_t *bm = &dev->bm;
805 drm_mem_type_manager_t *man;
807 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
808 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
810 uint32_t mem_type = DRM_BO_MEM_LOCAL;
815 drm_mm_node_t *node = NULL;
819 for (i = 0; i < num_prios; ++i) {
821 man = &bm->man[mem_type];
823 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
829 if (mem_type == DRM_BO_MEM_LOCAL)
832 if ((mem_type == bo->pinned_mem_type) &&
833 (bo->pinned_node != NULL)) {
834 node = bo->pinned_node;
838 mutex_lock(&dev->struct_mutex);
839 if (man->has_type && man->use_type) {
841 node = drm_mm_search_free(&man->manager, mem->num_pages,
842 mem->page_alignment, 1);
844 node = drm_mm_get_block(node, mem->num_pages,
845 mem->page_alignment);
847 mutex_unlock(&dev->struct_mutex);
852 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
854 mem->mem_type = mem_type;
855 mem->flags = cur_flags;
862 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
863 prios = dev->driver->bo_driver->mem_busy_prio;
865 for (i = 0; i < num_prios; ++i) {
867 man = &bm->man[mem_type];
872 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
875 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
878 mem->flags = cur_flags;
886 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
890 EXPORT_SYMBOL(drm_bo_mem_space);
892 static int drm_bo_new_mask(drm_buffer_object_t * bo,
893 uint32_t new_mask, uint32_t hint)
897 if (bo->type == drm_bo_type_user) {
898 DRM_ERROR("User buffers are not supported yet\n");
901 if (bo->type == drm_bo_type_fake &&
902 !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
903 DRM_ERROR("Fake buffers must be pinned.\n");
907 if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
909 ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
914 new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
918 DRM_ERROR("Invalid buffer object rwx properties\n");
922 bo->mem.mask = new_mask;
927 * Call dev->struct_mutex locked.
930 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
931 uint32_t handle, int check_owner)
933 drm_user_object_t *uo;
934 drm_buffer_object_t *bo;
936 uo = drm_lookup_user_object(priv, handle);
938 if (!uo || (uo->type != drm_buffer_type)) {
939 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
943 if (check_owner && priv != uo->owner) {
944 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
948 bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
949 atomic_inc(&bo->usage);
954 * Call bo->mutex locked.
955 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
956 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
959 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
961 drm_fence_object_t *fence = bo->fence;
963 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
965 drm_device_t *dev = bo->dev;
966 if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) {
967 drm_fence_usage_deref_unlocked(dev, fence);
977 * Call bo->mutex locked.
978 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
981 static int drm_bo_busy(drm_buffer_object_t * bo)
983 drm_fence_object_t *fence = bo->fence;
985 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
987 drm_device_t *dev = bo->dev;
988 if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) {
989 drm_fence_usage_deref_unlocked(dev, fence);
993 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
994 if (drm_fence_object_signaled(dev, fence, bo->fence_type, 0)) {
995 drm_fence_usage_deref_unlocked(dev, fence);
1004 static int drm_bo_read_cached(drm_buffer_object_t * bo)
1008 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1009 if (bo->mem.mm_node)
1010 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1015 * Wait until a buffer is unmapped.
1018 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
1022 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1025 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1026 atomic_read(&bo->mapped) == -1);
1034 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
1038 mutex_lock(&bo->mutex);
1039 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1040 mutex_unlock(&bo->mutex);
1045 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1046 * Until then, we cannot really do anything with it except delete it.
1047 * The unfenced list is a PITA, and the operations
1049 * 2) submitting commands
1051 * Should really be an atomic operation.
1052 * We now "solve" this problem by keeping
1053 * the buffer "unfenced" after validating, but before fencing.
1056 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
1059 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1067 mutex_unlock(&bo->mutex);
1068 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1069 !drm_bo_check_unfenced(bo));
1070 mutex_lock(&bo->mutex);
1073 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1075 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1085 * Fill in the ioctl reply argument with buffer info.
1089 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
1090 drm_bo_arg_reply_t * rep)
1092 rep->handle = bo->base.hash.key;
1093 rep->flags = bo->mem.flags;
1094 rep->size = bo->mem.num_pages * PAGE_SIZE;
1095 rep->offset = bo->offset;
1096 rep->arg_handle = bo->map_list.user_token;
1097 rep->mask = bo->mem.mask;
1098 rep->buffer_start = bo->buffer_start;
1099 rep->fence_flags = bo->fence_type;
1101 rep->page_alignment = bo->mem.page_alignment;
1103 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1104 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1110 * Wait for buffer idle and register that we've mapped the buffer.
1111 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1112 * so that if the client dies, the mapping is automatically
1116 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1117 uint32_t map_flags, unsigned hint,
1118 drm_bo_arg_reply_t * rep)
1120 drm_buffer_object_t *bo;
1121 drm_device_t *dev = priv->head->dev;
1123 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1125 mutex_lock(&dev->struct_mutex);
1126 bo = drm_lookup_buffer_object(priv, handle, 1);
1127 mutex_unlock(&dev->struct_mutex);
1132 mutex_lock(&bo->mutex);
1133 if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1134 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1140 * If this returns true, we are currently unmapped.
1141 * We need to do this test, because unmapping can
1142 * be done without the bo->mutex held.
1146 if (atomic_inc_and_test(&bo->mapped)) {
1147 if (no_wait && drm_bo_busy(bo)) {
1148 atomic_dec(&bo->mapped);
1152 ret = drm_bo_wait(bo, 0, 0, no_wait);
1154 atomic_dec(&bo->mapped);
1158 if ((map_flags & DRM_BO_FLAG_READ) &&
1159 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1160 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1161 drm_bo_read_cached(bo);
1164 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1165 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1166 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1169 * We are already mapped with different flags.
1170 * need to wait for unmap.
1173 ret = drm_bo_wait_unmapped(bo, no_wait);
1182 mutex_lock(&dev->struct_mutex);
1183 ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1184 mutex_unlock(&dev->struct_mutex);
1186 if (atomic_add_negative(-1, &bo->mapped))
1187 DRM_WAKEUP(&bo->event_queue);
1190 drm_bo_fill_rep_arg(bo, rep);
1192 mutex_unlock(&bo->mutex);
1193 drm_bo_usage_deref_unlocked(bo);
1197 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1199 drm_device_t *dev = priv->head->dev;
1200 drm_buffer_object_t *bo;
1201 drm_ref_object_t *ro;
1204 mutex_lock(&dev->struct_mutex);
1206 bo = drm_lookup_buffer_object(priv, handle, 1);
1212 ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1218 drm_remove_ref_object(priv, ro);
1219 drm_bo_usage_deref_locked(bo);
1221 mutex_unlock(&dev->struct_mutex);
1226 * Call struct-sem locked.
1229 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1230 drm_user_object_t * uo,
1233 drm_buffer_object_t *bo =
1234 drm_user_object_entry(uo, drm_buffer_object_t, base);
1237 * We DON'T want to take the bo->lock here, because we want to
1238 * hold it when we wait for unmapped buffer.
1241 BUG_ON(action != _DRM_REF_TYPE1);
1243 if (atomic_add_negative(-1, &bo->mapped))
1244 DRM_WAKEUP(&bo->event_queue);
1249 * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1252 int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1253 int no_wait, int move_unfenced)
1255 drm_device_t *dev = bo->dev;
1256 drm_buffer_manager_t *bm = &dev->bm;
1258 drm_bo_mem_reg_t mem;
1260 * Flush outstanding fences.
1266 * Wait for outstanding fences.
1269 ret = drm_bo_wait(bo, 0, 0, no_wait);
1273 mem.num_pages = bo->mem.num_pages;
1274 mem.size = mem.num_pages << PAGE_SHIFT;
1275 mem.mask = new_mem_flags;
1276 mem.page_alignment = bo->mem.page_alignment;
1278 mutex_lock(&bm->evict_mutex);
1279 mutex_lock(&dev->struct_mutex);
1281 list_add_tail(&bo->lru, &bm->unfenced);
1282 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1283 _DRM_BO_FLAG_UNFENCED);
1284 mutex_unlock(&dev->struct_mutex);
1287 * Determine where to move the buffer.
1289 ret = drm_bo_mem_space(bo, &mem, no_wait);
1293 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1296 if (ret || !move_unfenced) {
1297 mutex_lock(&dev->struct_mutex);
1299 if (mem.mm_node != bo->pinned_node)
1300 drm_mm_put_block(mem.mm_node);
1303 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1304 DRM_WAKEUP(&bo->event_queue);
1306 drm_bo_add_to_lru(bo);
1307 mutex_unlock(&dev->struct_mutex);
1310 mutex_unlock(&bm->evict_mutex);
1314 static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem)
1316 uint32_t flag_diff = (mem->mask ^ mem->flags);
1318 if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1320 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1321 (!(mem->mask & DRM_BO_FLAG_CACHED) ||
1322 (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1325 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1326 ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1327 (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1332 static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
1334 drm_buffer_manager_t *bm = &dev->bm;
1335 drm_mem_type_manager_t *man;
1336 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1337 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1340 uint32_t mem_type = 0;
1343 if (drm_bo_mem_compat(mem))
1346 BUG_ON(mem->mm_node);
1348 for (i = 0; i < num_prios; ++i) {
1349 mem_type = prios[i];
1350 man = &bm->man[mem_type];
1351 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1358 mem->mm_node = NULL;
1359 mem->mem_type = mem_type;
1360 mem->flags = cur_flags;
1361 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1365 DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask);
1373 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1374 int move_unfenced, int no_wait)
1376 drm_device_t *dev = bo->dev;
1377 drm_buffer_manager_t *bm = &dev->bm;
1378 drm_bo_driver_t *driver = dev->driver->bo_driver;
1381 DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask,
1384 driver->fence_type(bo, &bo->fence_class, &bo->fence_type);
1386 DRM_ERROR("Driver did not support given buffer permissions\n");
1390 ret = drm_bo_wait_unmapped(bo, no_wait);
1394 if (bo->type == drm_bo_type_fake) {
1395 ret = drm_bo_check_fake(dev, &bo->mem);
1401 * Check whether we need to move buffer.
1404 if (!drm_bo_mem_compat(&bo->mem)) {
1405 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1409 DRM_ERROR("Failed moving buffer.\n");
1418 if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1419 bo->pinned_mem_type = bo->mem.mem_type;
1420 mutex_lock(&dev->struct_mutex);
1421 list_del_init(&bo->pinned_lru);
1422 drm_bo_add_to_pinned_lru(bo);
1424 if (bo->pinned_node != bo->mem.mm_node) {
1425 if (bo->pinned_node != NULL)
1426 drm_mm_put_block(bo->pinned_node);
1427 bo->pinned_node = bo->mem.mm_node;
1430 mutex_unlock(&dev->struct_mutex);
1432 } else if (bo->pinned_node != NULL) {
1434 mutex_lock(&dev->struct_mutex);
1436 if (bo->pinned_node != bo->mem.mm_node)
1437 drm_mm_put_block(bo->pinned_node);
1439 list_del_init(&bo->pinned_lru);
1440 bo->pinned_node = NULL;
1441 mutex_unlock(&dev->struct_mutex);
1446 * We might need to add a TTM.
1449 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1450 ret = drm_bo_add_ttm(bo);
1454 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1457 * Finally, adjust lru to be sure.
1460 mutex_lock(&dev->struct_mutex);
1462 if (move_unfenced) {
1463 list_add_tail(&bo->lru, &bm->unfenced);
1464 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1465 _DRM_BO_FLAG_UNFENCED);
1467 drm_bo_add_to_lru(bo);
1468 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1469 DRM_WAKEUP(&bo->event_queue);
1470 DRM_FLAG_MASKED(bo->priv_flags, 0,
1471 _DRM_BO_FLAG_UNFENCED);
1474 mutex_unlock(&dev->struct_mutex);
1479 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1480 uint32_t flags, uint32_t mask, uint32_t hint,
1481 drm_bo_arg_reply_t * rep)
1483 struct drm_device *dev = priv->head->dev;
1484 drm_buffer_object_t *bo;
1486 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1488 mutex_lock(&dev->struct_mutex);
1489 bo = drm_lookup_buffer_object(priv, handle, 1);
1490 mutex_unlock(&dev->struct_mutex);
1495 mutex_lock(&bo->mutex);
1496 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1501 DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1502 ret = drm_bo_new_mask(bo, flags, hint);
1507 drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE),
1509 drm_bo_fill_rep_arg(bo, rep);
1513 mutex_unlock(&bo->mutex);
1515 drm_bo_usage_deref_unlocked(bo);
1519 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1520 drm_bo_arg_reply_t * rep)
1522 struct drm_device *dev = priv->head->dev;
1523 drm_buffer_object_t *bo;
1525 mutex_lock(&dev->struct_mutex);
1526 bo = drm_lookup_buffer_object(priv, handle, 1);
1527 mutex_unlock(&dev->struct_mutex);
1532 mutex_lock(&bo->mutex);
1533 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1534 (void)drm_bo_busy(bo);
1535 drm_bo_fill_rep_arg(bo, rep);
1536 mutex_unlock(&bo->mutex);
1537 drm_bo_usage_deref_unlocked(bo);
1541 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1542 uint32_t hint, drm_bo_arg_reply_t * rep)
1544 drm_buffer_object_t *bo;
1545 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1548 struct drm_device *dev = priv->head->dev;
1549 mutex_lock(&dev->struct_mutex);
1550 bo = drm_lookup_buffer_object(priv, handle, 1);
1551 mutex_unlock(&dev->struct_mutex);
1557 mutex_lock(&bo->mutex);
1558 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1561 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1565 drm_bo_fill_rep_arg(bo, rep);
1568 mutex_unlock(&bo->mutex);
1569 drm_bo_usage_deref_unlocked(bo);
1573 int drm_buffer_object_create(drm_device_t *dev,
1578 uint32_t page_alignment,
1579 unsigned long buffer_start,
1580 drm_buffer_object_t ** buf_obj)
1582 drm_buffer_manager_t *bm = &dev->bm;
1583 drm_buffer_object_t *bo;
1585 unsigned long num_pages;
1587 if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1588 DRM_ERROR("Invalid buffer object start.\n");
1591 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1592 if (num_pages == 0) {
1593 DRM_ERROR("Illegal buffer object size.\n");
1597 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1602 mutex_init(&bo->mutex);
1603 mutex_lock(&bo->mutex);
1605 atomic_set(&bo->usage, 1);
1606 atomic_set(&bo->mapped, -1);
1607 DRM_INIT_WAITQUEUE(&bo->event_queue);
1608 INIT_LIST_HEAD(&bo->lru);
1609 INIT_LIST_HEAD(&bo->pinned_lru);
1610 INIT_LIST_HEAD(&bo->ddestroy);
1611 #ifdef DRM_ODD_MM_COMPAT
1612 INIT_LIST_HEAD(&bo->p_mm_list);
1613 INIT_LIST_HEAD(&bo->vma_list);
1617 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1618 bo->mem.num_pages = num_pages;
1619 bo->mem.mm_node = NULL;
1620 bo->mem.page_alignment = page_alignment;
1621 if (bo->type == drm_bo_type_fake) {
1622 bo->offset = buffer_start;
1623 bo->buffer_start = 0;
1625 bo->buffer_start = buffer_start;
1630 atomic_inc(&bm->count);
1631 ret = drm_bo_new_mask(bo, mask, hint);
1636 if (bo->type == drm_bo_type_dc) {
1637 mutex_lock(&dev->struct_mutex);
1638 ret = drm_bo_setup_vm_locked(bo);
1639 mutex_unlock(&dev->struct_mutex);
1643 ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1647 mutex_unlock(&bo->mutex);
1652 mutex_unlock(&bo->mutex);
1654 drm_bo_usage_deref_unlocked(bo);
1658 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1661 drm_device_t *dev = priv->head->dev;
1664 mutex_lock(&dev->struct_mutex);
1665 ret = drm_add_user_object(priv, &bo->base, shareable);
1669 bo->base.remove = drm_bo_base_deref_locked;
1670 bo->base.type = drm_buffer_type;
1671 bo->base.ref_struct_locked = NULL;
1672 bo->base.unref = drm_buffer_user_object_unmap;
1675 mutex_unlock(&dev->struct_mutex);
1679 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1681 LOCK_TEST_WITH_RETURN(dev, filp);
1685 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1689 drm_bo_arg_request_t *req = &arg.d.req;
1690 drm_bo_arg_reply_t rep;
1692 drm_user_object_t *uo;
1693 drm_buffer_object_t *entry;
1695 if (!dev->bm.initialized) {
1696 DRM_ERROR("Buffer object manager is not initialized.\n");
1701 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1711 rep.ret = drm_bo_lock_test(dev, filp);
1715 drm_buffer_object_create(priv->head->dev,
1720 req->page_alignment,
1721 req->buffer_start, &entry);
1726 drm_bo_add_user_object(priv, entry,
1729 DRM_BO_FLAG_SHAREABLE);
1731 drm_bo_usage_deref_unlocked(entry);
1736 mutex_lock(&entry->mutex);
1737 drm_bo_fill_rep_arg(entry, &rep);
1738 mutex_unlock(&entry->mutex);
1741 rep.ret = drm_buffer_object_unmap(priv, req->handle);
1744 rep.ret = drm_buffer_object_map(priv, req->handle,
1748 case drm_bo_destroy:
1749 mutex_lock(&dev->struct_mutex);
1750 uo = drm_lookup_user_object(priv, req->handle);
1751 if (!uo || (uo->type != drm_buffer_type)
1752 || uo->owner != priv) {
1753 mutex_unlock(&dev->struct_mutex);
1757 rep.ret = drm_remove_user_object(priv, uo);
1758 mutex_unlock(&dev->struct_mutex);
1760 case drm_bo_reference:
1761 rep.ret = drm_user_object_ref(priv, req->handle,
1762 drm_buffer_type, &uo);
1766 rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1768 case drm_bo_unreference:
1769 rep.ret = drm_user_object_unref(priv, req->handle,
1772 case drm_bo_validate:
1773 rep.ret = drm_bo_lock_test(dev, filp);
1778 drm_bo_handle_validate(priv, req->handle, req->mask,
1779 req->arg_handle, req->hint,
1783 rep.ret = drm_bo_lock_test(dev, filp);
1788 rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1790 case drm_bo_wait_idle:
1791 rep.ret = drm_bo_handle_wait(priv, req->handle,
1794 case drm_bo_ref_fence:
1796 DRM_ERROR("Function is not implemented yet.\n");
1803 * A signal interrupted us. Make sure the ioctl is restartable.
1806 if (rep.ret == -EAGAIN)
1811 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1818 *Clean the unfenced list and put on regular LRU.
1819 *This is part of the memory manager cleanup and should only be
1820 *called with the DRI lock held.
1821 *Call dev->struct_sem locked.
1824 static void drm_bo_clean_unfenced(drm_device_t *dev)
1826 drm_buffer_manager_t *bm = &dev->bm;
1827 struct list_head *head, *list;
1828 drm_buffer_object_t *entry;
1830 head = &bm->unfenced;
1833 while(list != head) {
1834 prefetch(list->next);
1835 entry = list_entry(list, drm_buffer_object_t, lru);
1837 atomic_inc(&entry->usage);
1838 mutex_unlock(&dev->struct_mutex);
1839 mutex_lock(&entry->mutex);
1840 mutex_lock(&dev->struct_mutex);
1842 list_del(&entry->lru);
1843 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1844 drm_bo_add_to_lru(entry);
1845 mutex_unlock(&entry->mutex);
1850 static int drm_bo_leave_list(drm_buffer_object_t * bo,
1852 int free_pinned, int allow_errors)
1854 drm_device_t *dev = bo->dev;
1857 mutex_lock(&bo->mutex);
1859 ret = drm_bo_expire_fence(bo, allow_errors);
1864 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1865 mutex_lock(&dev->struct_mutex);
1866 list_del_init(&bo->pinned_lru);
1867 if (bo->pinned_node == bo->mem.mm_node)
1868 bo->pinned_node = NULL;
1869 if (bo->pinned_node != NULL) {
1870 drm_mm_put_block(bo->pinned_node);
1871 bo->pinned_node = NULL;
1873 mutex_unlock(&dev->struct_mutex);
1876 if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1877 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1878 "cleanup. Removing flag and evicting.\n");
1879 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1880 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1883 if (bo->mem.mem_type == mem_type)
1884 ret = drm_bo_evict(bo, mem_type, 0);
1891 DRM_ERROR("Cleanup eviction failed\n");
1896 mutex_unlock(&bo->mutex);
1901 static drm_buffer_object_t *drm_bo_entry(struct list_head *list,
1905 return list_entry(list, drm_buffer_object_t, pinned_lru);
1907 return list_entry(list, drm_buffer_object_t, lru);
1911 * dev->struct_mutex locked.
1914 static int drm_bo_force_list_clean(drm_device_t * dev,
1915 struct list_head *head,
1921 struct list_head *list, *next, *prev;
1922 drm_buffer_object_t *entry, *nentry;
1927 * The list traversal is a bit odd here, because an item may
1928 * disappear from the list when we release the struct_mutex or
1929 * when we decrease the usage count. Also we're not guaranteed
1930 * to drain pinned lists, so we can't always restart.
1935 list_for_each_safe(list, next, head) {
1938 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
1939 atomic_inc(&entry->usage);
1941 atomic_dec(&nentry->usage);
1946 * Protect the next item from destruction, so we can check
1947 * its list pointers later on.
1951 nentry = drm_bo_entry(next, pinned_list);
1952 atomic_inc(&nentry->usage);
1954 mutex_unlock(&dev->struct_mutex);
1956 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
1958 mutex_lock(&dev->struct_mutex);
1960 drm_bo_usage_deref_locked(entry);
1965 * Has the next item disappeared from the list?
1968 do_restart = ((next->prev != list) && (next->prev != prev));
1970 if (nentry != NULL && do_restart) {
1971 drm_bo_usage_deref_locked(nentry);
1981 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1983 drm_buffer_manager_t *bm = &dev->bm;
1984 drm_mem_type_manager_t *man = &bm->man[mem_type];
1987 if (mem_type >= DRM_BO_MEM_TYPES) {
1988 DRM_ERROR("Illegal memory type %d\n", mem_type);
1992 if (!man->has_type) {
1993 DRM_ERROR("Trying to take down uninitialized "
1994 "memory manager type\n");
2003 drm_bo_clean_unfenced(dev);
2004 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2005 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2007 if (drm_mm_clean(&man->manager)) {
2008 drm_mm_takedown(&man->manager);
2018 *Evict all buffers of a particular mem_type, but leave memory manager
2019 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2020 *point since we have the hardware lock.
2023 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
2026 drm_buffer_manager_t *bm = &dev->bm;
2027 drm_mem_type_manager_t *man = &bm->man[mem_type];
2029 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2030 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2034 if (!man->has_type) {
2035 DRM_ERROR("Memory type %u has not been initialized.\n",
2040 drm_bo_clean_unfenced(dev);
2041 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2044 ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2049 int drm_bo_init_mm(drm_device_t * dev,
2051 unsigned long p_offset, unsigned long p_size)
2053 drm_buffer_manager_t *bm = &dev->bm;
2055 drm_mem_type_manager_t *man;
2057 if (type >= DRM_BO_MEM_TYPES) {
2058 DRM_ERROR("Illegal memory type %d\n", type);
2062 man = &bm->man[type];
2063 if (man->has_type) {
2064 DRM_ERROR("Memory manager already initialized for type %d\n",
2069 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2074 if (type != DRM_BO_MEM_LOCAL) {
2076 DRM_ERROR("Zero size memory manager type %d\n", type);
2079 ret = drm_mm_init(&man->manager, p_offset, p_size);
2086 INIT_LIST_HEAD(&man->lru);
2087 INIT_LIST_HEAD(&man->pinned);
2091 EXPORT_SYMBOL(drm_bo_init_mm);
2094 * This is called from lastclose, so we don't need to bother about
2095 * any clients still running when we set the initialized flag to zero.
2098 int drm_bo_driver_finish(drm_device_t * dev)
2100 drm_buffer_manager_t *bm = &dev->bm;
2102 unsigned i = DRM_BO_MEM_TYPES;
2103 drm_mem_type_manager_t *man;
2105 mutex_lock(&dev->bm.init_mutex);
2106 mutex_lock(&dev->struct_mutex);
2108 if (!bm->initialized)
2110 bm->initialized = 0;
2114 if (man->has_type) {
2116 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2118 DRM_ERROR("DRM memory manager type %d "
2119 "is not clean.\n", i);
2124 mutex_unlock(&dev->struct_mutex);
2126 if (!cancel_delayed_work(&bm->wq)) {
2127 flush_scheduled_work();
2129 mutex_lock(&dev->struct_mutex);
2130 drm_bo_delayed_delete(dev, 1);
2131 if (list_empty(&bm->ddestroy)) {
2132 DRM_DEBUG("Delayed destroy list was clean\n");
2134 if (list_empty(&bm->man[0].lru)) {
2135 DRM_DEBUG("Swap list was clean\n");
2137 if (list_empty(&bm->man[0].pinned)) {
2138 DRM_DEBUG("NO_MOVE list was clean\n");
2140 if (list_empty(&bm->unfenced)) {
2141 DRM_DEBUG("Unfenced list was clean\n");
2144 mutex_unlock(&dev->struct_mutex);
2145 mutex_unlock(&dev->bm.init_mutex);
2149 int drm_bo_driver_init(drm_device_t * dev)
2151 drm_bo_driver_t *driver = dev->driver->bo_driver;
2152 drm_buffer_manager_t *bm = &dev->bm;
2155 mutex_lock(&dev->bm.init_mutex);
2156 mutex_lock(&dev->struct_mutex);
2161 * Initialize the system memory buffer type.
2162 * Other types need to be driver / IOCTL initialized.
2165 ret = drm_bo_init_mm(dev, 0, 0, 0);
2169 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2170 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2172 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2174 bm->initialized = 1;
2176 atomic_set(&bm->count, 0);
2178 INIT_LIST_HEAD(&bm->unfenced);
2179 INIT_LIST_HEAD(&bm->ddestroy);
2181 mutex_unlock(&dev->struct_mutex);
2182 mutex_unlock(&dev->bm.init_mutex);
2186 EXPORT_SYMBOL(drm_bo_driver_init);
2188 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
2193 drm_mm_init_arg_t arg;
2194 drm_buffer_manager_t *bm = &dev->bm;
2195 drm_bo_driver_t *driver = dev->driver->bo_driver;
2198 DRM_ERROR("Buffer objects are not supported by this driver\n");
2202 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2204 switch (arg.req.op) {
2207 mutex_lock(&dev->bm.init_mutex);
2208 mutex_lock(&dev->struct_mutex);
2209 if (!bm->initialized) {
2210 DRM_ERROR("DRM memory manager was not initialized.\n");
2213 if (arg.req.mem_type == 0) {
2215 ("System memory buffers already initialized.\n");
2218 ret = drm_bo_init_mm(dev, arg.req.mem_type,
2219 arg.req.p_offset, arg.req.p_size);
2222 LOCK_TEST_WITH_RETURN(dev, filp);
2223 mutex_lock(&dev->bm.init_mutex);
2224 mutex_lock(&dev->struct_mutex);
2226 if (!bm->initialized) {
2227 DRM_ERROR("DRM memory manager was not initialized\n");
2230 if (arg.req.mem_type == 0) {
2231 DRM_ERROR("No takedown for System memory buffers.\n");
2235 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
2236 DRM_ERROR("Memory manager type %d not clean. "
2237 "Delaying takedown\n", arg.req.mem_type);
2241 LOCK_TEST_WITH_RETURN(dev, filp);
2242 mutex_lock(&dev->bm.init_mutex);
2243 mutex_lock(&dev->struct_mutex);
2244 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
2247 LOCK_TEST_WITH_RETURN(dev, filp);
2248 mutex_lock(&dev->bm.init_mutex);
2249 mutex_lock(&dev->struct_mutex);
2253 DRM_ERROR("Function not implemented yet\n");
2257 mutex_unlock(&dev->struct_mutex);
2258 mutex_unlock(&dev->bm.init_mutex);
2262 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2267 * buffer object vm functions.
2270 int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem)
2272 drm_buffer_manager_t *bm = &dev->bm;
2273 drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2275 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2276 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2279 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2282 if (mem->flags & DRM_BO_FLAG_CACHED)
2288 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2291 * \c Get the PCI offset for the buffer object memory.
2293 * \param bo The buffer object.
2294 * \param bus_base On return the base of the PCI region
2295 * \param bus_offset On return the byte offset into the PCI region
2296 * \param bus_size On return the byte size of the buffer object or zero if
2297 * the buffer object memory is not accessible through a PCI region.
2298 * \return Failure indication.
2300 * Returns -EINVAL if the buffer object is currently not mappable.
2301 * Otherwise returns zero.
2304 int drm_bo_pci_offset(drm_device_t * dev,
2305 drm_bo_mem_reg_t * mem,
2306 unsigned long *bus_base,
2307 unsigned long *bus_offset, unsigned long *bus_size)
2309 drm_buffer_manager_t *bm = &dev->bm;
2310 drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2313 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2316 if (drm_mem_reg_is_pci(dev, mem)) {
2317 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2318 *bus_size = mem->num_pages << PAGE_SHIFT;
2319 *bus_base = man->io_offset;
2326 * \c Kill all user-space virtual mappings of this buffer object.
2328 * \param bo The buffer object.
2330 * Call bo->mutex locked.
2333 void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
2335 drm_device_t *dev = bo->dev;
2336 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2337 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2339 if (!dev->dev_mapping)
2342 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2345 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
2347 drm_map_list_t *list = &bo->map_list;
2348 drm_local_map_t *map;
2349 drm_device_t *dev = bo->dev;
2351 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2352 if (list->user_token) {
2353 drm_ht_remove_item(&dev->map_hash, &list->hash);
2354 list->user_token = 0;
2356 if (list->file_offset_node) {
2357 drm_mm_put_block(list->file_offset_node);
2358 list->file_offset_node = NULL;
2365 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2367 list->user_token = 0ULL;
2368 drm_bo_usage_deref_locked(bo);
2371 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
2373 drm_map_list_t *list = &bo->map_list;
2374 drm_local_map_t *map;
2375 drm_device_t *dev = bo->dev;
2377 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2378 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2384 map->type = _DRM_TTM;
2385 map->flags = _DRM_REMOVABLE;
2386 map->size = bo->mem.num_pages * PAGE_SIZE;
2387 atomic_inc(&bo->usage);
2388 map->handle = (void *)bo;
2390 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2391 bo->mem.num_pages, 0, 0);
2393 if (!list->file_offset_node) {
2394 drm_bo_takedown_vm_locked(bo);
2398 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2399 bo->mem.num_pages, 0);
2401 list->hash.key = list->file_offset_node->start;
2402 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2403 drm_bo_takedown_vm_locked(bo);
2407 list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;