1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44 * both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47 * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48 * traversal will, in general, need to be restarted.
52 static void drm_bo_destroy_locked(drm_buffer_object_t * bo);
53 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo);
54 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo);
55 static void drm_bo_unmap_virtual(drm_buffer_object_t * bo);
57 static inline uint32_t drm_bo_type_flags(unsigned type)
59 return (1 << (24 + type));
63 * bo locked. dev->struct_mutex locked.
66 void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
68 drm_mem_type_manager_t *man;
70 man = &bo->dev->bm.man[bo->pinned_mem_type];
71 list_add_tail(&bo->pinned_lru, &man->pinned);
74 void drm_bo_add_to_lru(drm_buffer_object_t * bo)
76 drm_mem_type_manager_t *man;
78 if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
79 man = &bo->dev->bm.man[bo->mem.mem_type];
80 list_add_tail(&bo->lru, &man->lru);
82 INIT_LIST_HEAD(&bo->lru);
86 static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
88 #ifdef DRM_ODD_MM_COMPAT
91 ret = drm_bo_lock_kmm(bo);
94 drm_bo_unmap_virtual(bo);
96 drm_bo_finish_unmap(bo);
98 drm_bo_unmap_virtual(bo);
103 static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
105 #ifdef DRM_ODD_MM_COMPAT
108 ret = drm_bo_remap_bound(bo);
110 DRM_ERROR("Failed to remap a bound buffer object.\n"
111 "\tThis might cause a sigbus later.\n");
113 drm_bo_unlock_kmm(bo);
118 * Call bo->mutex locked.
121 static int drm_bo_add_ttm(drm_buffer_object_t * bo)
123 drm_device_t *dev = bo->dev;
129 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
133 case drm_bo_type_user:
134 case drm_bo_type_fake:
137 DRM_ERROR("Illegal buffer object type\n");
145 static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
146 drm_bo_mem_reg_t * mem,
147 int evict, int no_wait)
149 drm_device_t *dev = bo->dev;
150 drm_buffer_manager_t *bm = &dev->bm;
151 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
152 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
153 drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
154 drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
157 if (old_is_pci || new_is_pci)
158 ret = drm_bo_vm_pre_move(bo, old_is_pci);
163 * Create and bind a ttm if required.
166 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
167 ret = drm_bo_add_ttm(bo);
171 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
172 ret = drm_bind_ttm(bo->ttm, new_man->flags &
174 mem->mm_node->start);
180 if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
182 drm_bo_mem_reg_t *old_mem = &bo->mem;
183 uint32_t save_flags = old_mem->flags;
184 uint32_t save_mask = old_mem->mask;
188 old_mem->mask = save_mask;
189 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
191 } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
192 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
194 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
196 } else if (dev->driver->bo_driver->move) {
197 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
201 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
208 if (old_is_pci || new_is_pci)
209 drm_bo_vm_post_move(bo);
211 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
213 dev->driver->bo_driver->invalidate_caches(dev,
216 DRM_ERROR("Can not flush read caches\n");
219 DRM_FLAG_MASKED(bo->priv_flags,
220 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
221 _DRM_BO_FLAG_EVICTED);
224 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
229 if (old_is_pci || new_is_pci)
230 drm_bo_vm_post_move(bo);
232 new_man = &bm->man[bo->mem.mem_type];
233 if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
234 drm_ttm_unbind(bo->ttm);
235 drm_destroy_ttm(bo->ttm);
243 * Call bo->mutex locked.
244 * Wait until the buffer is idle.
247 int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
251 drm_fence_object_t *fence = bo->fence;
255 drm_device_t *dev = bo->dev;
256 if (drm_fence_object_signaled(fence, bo->fence_type)) {
257 drm_fence_usage_deref_unlocked(dev, fence);
265 drm_fence_object_wait(dev, fence, lazy, ignore_signals,
270 drm_fence_usage_deref_unlocked(dev, fence);
277 static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
279 drm_device_t *dev = bo->dev;
280 drm_buffer_manager_t *bm = &dev->bm;
284 unsigned long _end = jiffies + 3 * DRM_HZ;
287 ret = drm_bo_wait(bo, 0, 1, 0);
288 if (ret && allow_errors)
291 } while (ret && !time_after_eq(jiffies, _end));
295 DRM_ERROR("Detected GPU lockup or "
296 "fence driver was taken down. "
297 "Evicting buffer.\n");
301 drm_fence_usage_deref_unlocked(dev, bo->fence);
309 * Call dev->struct_mutex locked.
310 * Attempts to remove all private references to a buffer by expiring its
311 * fence object and removing from lru lists and memory managers.
314 static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
316 drm_device_t *dev = bo->dev;
317 drm_buffer_manager_t *bm = &dev->bm;
319 atomic_inc(&bo->usage);
320 mutex_unlock(&dev->struct_mutex);
321 mutex_lock(&bo->mutex);
323 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
325 if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) {
326 drm_fence_usage_deref_locked(dev, bo->fence);
330 if (bo->fence && remove_all)
331 (void)drm_bo_expire_fence(bo, 0);
333 mutex_lock(&dev->struct_mutex);
335 if (!atomic_dec_and_test(&bo->usage)) {
340 list_del_init(&bo->lru);
341 if (bo->mem.mm_node) {
342 drm_mm_put_block(bo->mem.mm_node);
343 if (bo->pinned_node == bo->mem.mm_node)
344 bo->pinned_node = NULL;
345 bo->mem.mm_node = NULL;
347 list_del_init(&bo->pinned_lru);
348 if (bo->pinned_node) {
349 drm_mm_put_block(bo->pinned_node);
350 bo->pinned_node = NULL;
352 list_del_init(&bo->ddestroy);
353 mutex_unlock(&bo->mutex);
354 drm_bo_destroy_locked(bo);
358 if (list_empty(&bo->ddestroy)) {
359 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
360 list_add_tail(&bo->ddestroy, &bm->ddestroy);
361 schedule_delayed_work(&bm->wq,
362 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
366 mutex_unlock(&bo->mutex);
371 * Verify that refcount is 0 and that there are no internal references
372 * to the buffer object. Then destroy it.
375 static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
377 drm_device_t *dev = bo->dev;
378 drm_buffer_manager_t *bm = &dev->bm;
380 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
381 list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
382 list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
383 if (bo->fence != NULL) {
384 DRM_ERROR("Fence was non-zero.\n");
385 drm_bo_cleanup_refs(bo, 0);
389 #ifdef DRM_ODD_MM_COMPAT
390 BUG_ON(!list_empty(&bo->vma_list));
391 BUG_ON(!list_empty(&bo->p_mm_list));
395 drm_ttm_unbind(bo->ttm);
396 drm_destroy_ttm(bo->ttm);
400 atomic_dec(&bm->count);
402 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
408 * Some stuff is still trying to reference the buffer object.
409 * Get rid of those references.
412 drm_bo_cleanup_refs(bo, 0);
418 * Call dev->struct_mutex locked.
421 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
423 drm_buffer_manager_t *bm = &dev->bm;
425 drm_buffer_object_t *entry, *nentry;
426 struct list_head *list, *next;
428 list_for_each_safe(list, next, &bm->ddestroy) {
429 entry = list_entry(list, drm_buffer_object_t, ddestroy);
432 if (next != &bm->ddestroy) {
433 nentry = list_entry(next, drm_buffer_object_t,
435 atomic_inc(&nentry->usage);
438 drm_bo_cleanup_refs(entry, remove_all);
441 atomic_dec(&nentry->usage);
446 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
447 static void drm_bo_delayed_workqueue(void *data)
449 static void drm_bo_delayed_workqueue(struct work_struct *work)
452 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
453 drm_device_t *dev = (drm_device_t *) data;
454 drm_buffer_manager_t *bm = &dev->bm;
456 drm_buffer_manager_t *bm =
457 container_of(work, drm_buffer_manager_t, wq.work);
458 drm_device_t *dev = container_of(bm, drm_device_t, bm);
461 DRM_DEBUG("Delayed delete Worker\n");
463 mutex_lock(&dev->struct_mutex);
464 if (!bm->initialized) {
465 mutex_unlock(&dev->struct_mutex);
468 drm_bo_delayed_delete(dev, 0);
469 if (bm->initialized && !list_empty(&bm->ddestroy)) {
470 schedule_delayed_work(&bm->wq,
471 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
473 mutex_unlock(&dev->struct_mutex);
476 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
478 if (atomic_dec_and_test(&bo->usage)) {
479 drm_bo_destroy_locked(bo);
483 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
485 drm_buffer_object_t *bo =
486 drm_user_object_entry(uo, drm_buffer_object_t, base);
488 drm_bo_takedown_vm_locked(bo);
489 drm_bo_usage_deref_locked(bo);
492 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
494 drm_device_t *dev = bo->dev;
496 if (atomic_dec_and_test(&bo->usage)) {
497 mutex_lock(&dev->struct_mutex);
498 if (atomic_read(&bo->usage) == 0)
499 drm_bo_destroy_locked(bo);
500 mutex_unlock(&dev->struct_mutex);
505 * Note. The caller has to register (if applicable)
506 * and deregister fence object usage.
509 int drm_fence_buffer_objects(drm_file_t * priv,
510 struct list_head *list,
511 uint32_t fence_flags,
512 drm_fence_object_t * fence,
513 drm_fence_object_t ** used_fence)
515 drm_device_t *dev = priv->head->dev;
516 drm_buffer_manager_t *bm = &dev->bm;
518 drm_buffer_object_t *entry;
519 uint32_t fence_type = 0;
525 mutex_lock(&dev->struct_mutex);
528 list = &bm->unfenced;
530 list_for_each_entry(entry, list, lru) {
531 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
532 fence_type |= entry->fence_type;
533 if (entry->fence_class != 0) {
534 DRM_ERROR("Fence class %d is not implemented yet.\n",
548 * Transfer to a local list before we release the dev->struct_mutex;
549 * This is so we don't get any new unfenced objects while fencing
550 * the ones we already have..
553 list_splice_init(list, &f_list);
556 if ((fence_type & fence->type) != fence_type) {
557 DRM_ERROR("Given fence doesn't match buffers "
558 "on unfenced list.\n");
563 mutex_unlock(&dev->struct_mutex);
564 ret = drm_fence_object_create(dev, 0, fence_type,
565 fence_flags | DRM_FENCE_FLAG_EMIT,
567 mutex_lock(&dev->struct_mutex);
574 while (l != &f_list) {
576 entry = list_entry(l, drm_buffer_object_t, lru);
577 atomic_inc(&entry->usage);
578 mutex_unlock(&dev->struct_mutex);
579 mutex_lock(&entry->mutex);
580 mutex_lock(&dev->struct_mutex);
582 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
585 drm_fence_usage_deref_locked(dev, entry->fence);
586 entry->fence = fence;
587 DRM_FLAG_MASKED(entry->priv_flags, 0,
588 _DRM_BO_FLAG_UNFENCED);
589 DRM_WAKEUP(&entry->event_queue);
590 drm_bo_add_to_lru(entry);
592 mutex_unlock(&entry->mutex);
593 drm_bo_usage_deref_locked(entry);
596 atomic_add(count, &fence->usage);
597 DRM_DEBUG("Fenced %d buffers\n", count);
599 mutex_unlock(&dev->struct_mutex);
604 EXPORT_SYMBOL(drm_fence_buffer_objects);
610 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
614 drm_device_t *dev = bo->dev;
615 drm_bo_mem_reg_t evict_mem;
618 * Someone might have modified the buffer before we took the buffer mutex.
621 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
623 if (bo->mem.mem_type != mem_type)
626 ret = drm_bo_wait(bo, 0, 0, no_wait);
628 if (ret && ret != -EAGAIN) {
629 DRM_ERROR("Failed to expire fence before "
630 "buffer eviction.\n");
635 evict_mem.mm_node = NULL;
637 if (bo->type == drm_bo_type_fake) {
638 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
639 bo->mem.mm_node = NULL;
644 evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
645 ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
649 DRM_ERROR("Failed to find memory space for "
650 "buffer 0x%p eviction.\n", bo);
654 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
658 DRM_ERROR("Buffer eviction failed\n");
663 mutex_lock(&dev->struct_mutex);
664 if (evict_mem.mm_node) {
665 if (evict_mem.mm_node != bo->pinned_node)
666 drm_mm_put_block(evict_mem.mm_node);
667 evict_mem.mm_node = NULL;
670 drm_bo_add_to_lru(bo);
671 mutex_unlock(&dev->struct_mutex);
673 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
674 _DRM_BO_FLAG_EVICTED);
680 static int drm_bo_mem_force_space(drm_device_t * dev,
681 drm_bo_mem_reg_t * mem,
682 uint32_t mem_type, int no_wait)
685 drm_buffer_manager_t *bm = &dev->bm;
686 drm_buffer_object_t *entry;
687 drm_mem_type_manager_t *man = &bm->man[mem_type];
688 struct list_head *lru;
689 unsigned long num_pages = mem->num_pages;
692 mutex_lock(&dev->struct_mutex);
694 node = drm_mm_search_free(&man->manager, num_pages,
695 mem->page_alignment, 1);
700 if (lru->next == lru)
703 entry = list_entry(lru->next, drm_buffer_object_t, lru);
704 atomic_inc(&entry->usage);
705 mutex_unlock(&dev->struct_mutex);
706 mutex_lock(&entry->mutex);
707 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
709 ret = drm_bo_evict(entry, mem_type, no_wait);
710 mutex_unlock(&entry->mutex);
711 drm_bo_usage_deref_unlocked(entry);
714 mutex_lock(&dev->struct_mutex);
718 mutex_unlock(&dev->struct_mutex);
722 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
723 mutex_unlock(&dev->struct_mutex);
725 mem->mem_type = mem_type;
729 static int drm_bo_mt_compatible(drm_mem_type_manager_t * man,
731 uint32_t mask, uint32_t * res_mask)
733 uint32_t cur_flags = drm_bo_type_flags(mem_type);
736 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
737 cur_flags |= DRM_BO_FLAG_CACHED;
738 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
739 cur_flags |= DRM_BO_FLAG_MAPPABLE;
740 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
741 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
743 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
746 if (mem_type == DRM_BO_MEM_LOCAL) {
747 *res_mask = cur_flags;
751 flag_diff = (mask ^ cur_flags);
752 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
753 (!(mask & DRM_BO_FLAG_CACHED) ||
754 (mask & DRM_BO_FLAG_FORCE_CACHING)))
757 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
758 ((mask & DRM_BO_FLAG_MAPPABLE) ||
759 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
762 *res_mask = cur_flags;
766 int drm_bo_mem_space(drm_buffer_object_t * bo,
767 drm_bo_mem_reg_t * mem, int no_wait)
769 drm_device_t *dev = bo->dev;
770 drm_buffer_manager_t *bm = &dev->bm;
771 drm_mem_type_manager_t *man;
773 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
774 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
776 uint32_t mem_type = DRM_BO_MEM_LOCAL;
781 drm_mm_node_t *node = NULL;
785 for (i = 0; i < num_prios; ++i) {
787 man = &bm->man[mem_type];
789 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
795 if (mem_type == DRM_BO_MEM_LOCAL)
798 if ((mem_type == bo->pinned_mem_type) &&
799 (bo->pinned_node != NULL)) {
800 node = bo->pinned_node;
804 mutex_lock(&dev->struct_mutex);
805 if (man->has_type && man->use_type) {
807 node = drm_mm_search_free(&man->manager, mem->num_pages,
808 mem->page_alignment, 1);
810 node = drm_mm_get_block(node, mem->num_pages,
811 mem->page_alignment);
813 mutex_unlock(&dev->struct_mutex);
818 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
820 mem->mem_type = mem_type;
821 mem->flags = cur_flags;
828 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
829 prios = dev->driver->bo_driver->mem_busy_prio;
831 for (i = 0; i < num_prios; ++i) {
833 man = &bm->man[mem_type];
838 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
841 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
844 mem->flags = cur_flags;
852 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
856 EXPORT_SYMBOL(drm_bo_mem_space);
858 static int drm_bo_new_mask(drm_buffer_object_t * bo,
859 uint32_t new_mask, uint32_t hint)
863 if (bo->type == drm_bo_type_user) {
864 DRM_ERROR("User buffers are not supported yet\n");
867 if (bo->type == drm_bo_type_fake &&
868 !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
869 DRM_ERROR("Fake buffers must be pinned.\n");
873 if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
875 ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
880 new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
884 DRM_ERROR("Invalid buffer object rwx properties\n");
888 bo->mem.mask = new_mask;
893 * Call dev->struct_mutex locked.
896 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
897 uint32_t handle, int check_owner)
899 drm_user_object_t *uo;
900 drm_buffer_object_t *bo;
902 uo = drm_lookup_user_object(priv, handle);
904 if (!uo || (uo->type != drm_buffer_type)) {
905 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
909 if (check_owner && priv != uo->owner) {
910 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
914 bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
915 atomic_inc(&bo->usage);
920 * Call bo->mutex locked.
921 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
922 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
925 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
927 drm_fence_object_t *fence = bo->fence;
929 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
931 drm_device_t *dev = bo->dev;
932 if (drm_fence_object_signaled(fence, bo->fence_type)) {
933 drm_fence_usage_deref_unlocked(dev, fence);
943 * Call bo->mutex locked.
944 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
947 static int drm_bo_busy(drm_buffer_object_t * bo)
949 drm_fence_object_t *fence = bo->fence;
951 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
953 drm_device_t *dev = bo->dev;
954 if (drm_fence_object_signaled(fence, bo->fence_type)) {
955 drm_fence_usage_deref_unlocked(dev, fence);
959 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
960 if (drm_fence_object_signaled(fence, bo->fence_type)) {
961 drm_fence_usage_deref_unlocked(dev, fence);
970 static int drm_bo_read_cached(drm_buffer_object_t * bo)
974 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
976 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
981 * Wait until a buffer is unmapped.
984 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
988 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
991 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
992 atomic_read(&bo->mapped) == -1);
1000 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
1004 mutex_lock(&bo->mutex);
1005 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1006 mutex_unlock(&bo->mutex);
1011 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1012 * Until then, we cannot really do anything with it except delete it.
1013 * The unfenced list is a PITA, and the operations
1015 * 2) submitting commands
1017 * Should really be an atomic operation.
1018 * We now "solve" this problem by keeping
1019 * the buffer "unfenced" after validating, but before fencing.
1022 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
1025 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1026 unsigned long _end = jiffies + 3 * DRM_HZ;
1034 mutex_unlock(&bo->mutex);
1035 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1036 !drm_bo_check_unfenced(bo));
1037 mutex_lock(&bo->mutex);
1042 ("Error waiting for buffer to become fenced\n");
1045 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1046 } while (ret && !time_after_eq(jiffies, _end));
1048 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1058 * Fill in the ioctl reply argument with buffer info.
1062 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
1063 drm_bo_arg_reply_t * rep)
1065 rep->handle = bo->base.hash.key;
1066 rep->flags = bo->mem.flags;
1067 rep->size = bo->mem.num_pages * PAGE_SIZE;
1068 rep->offset = bo->offset;
1069 rep->arg_handle = bo->map_list.user_token;
1070 rep->mask = bo->mem.mask;
1071 rep->buffer_start = bo->buffer_start;
1072 rep->fence_flags = bo->fence_type;
1074 rep->page_alignment = bo->mem.page_alignment;
1076 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1077 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1083 * Wait for buffer idle and register that we've mapped the buffer.
1084 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1085 * so that if the client dies, the mapping is automatically
1089 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1090 uint32_t map_flags, unsigned hint,
1091 drm_bo_arg_reply_t * rep)
1093 drm_buffer_object_t *bo;
1094 drm_device_t *dev = priv->head->dev;
1096 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1098 mutex_lock(&dev->struct_mutex);
1099 bo = drm_lookup_buffer_object(priv, handle, 1);
1100 mutex_unlock(&dev->struct_mutex);
1105 mutex_lock(&bo->mutex);
1106 if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1107 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1113 * If this returns true, we are currently unmapped.
1114 * We need to do this test, because unmapping can
1115 * be done without the bo->mutex held.
1119 if (atomic_inc_and_test(&bo->mapped)) {
1120 if (no_wait && drm_bo_busy(bo)) {
1121 atomic_dec(&bo->mapped);
1125 ret = drm_bo_wait(bo, 0, 0, no_wait);
1127 atomic_dec(&bo->mapped);
1131 if ((map_flags & DRM_BO_FLAG_READ) &&
1132 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1133 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1134 drm_bo_read_cached(bo);
1137 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1138 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1139 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1142 * We are already mapped with different flags.
1143 * need to wait for unmap.
1146 ret = drm_bo_wait_unmapped(bo, no_wait);
1155 mutex_lock(&dev->struct_mutex);
1156 ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1157 mutex_unlock(&dev->struct_mutex);
1159 if (atomic_add_negative(-1, &bo->mapped))
1160 DRM_WAKEUP(&bo->event_queue);
1163 drm_bo_fill_rep_arg(bo, rep);
1165 mutex_unlock(&bo->mutex);
1166 drm_bo_usage_deref_unlocked(bo);
1170 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1172 drm_device_t *dev = priv->head->dev;
1173 drm_buffer_object_t *bo;
1174 drm_ref_object_t *ro;
1177 mutex_lock(&dev->struct_mutex);
1179 bo = drm_lookup_buffer_object(priv, handle, 1);
1185 ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1191 drm_remove_ref_object(priv, ro);
1192 drm_bo_usage_deref_locked(bo);
1194 mutex_unlock(&dev->struct_mutex);
1199 * Call struct-sem locked.
1202 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1203 drm_user_object_t * uo,
1206 drm_buffer_object_t *bo =
1207 drm_user_object_entry(uo, drm_buffer_object_t, base);
1210 * We DON'T want to take the bo->lock here, because we want to
1211 * hold it when we wait for unmapped buffer.
1214 BUG_ON(action != _DRM_REF_TYPE1);
1216 if (atomic_add_negative(-1, &bo->mapped))
1217 DRM_WAKEUP(&bo->event_queue);
1222 * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1225 int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1226 int no_wait, int move_unfenced)
1228 drm_device_t *dev = bo->dev;
1229 drm_buffer_manager_t *bm = &dev->bm;
1231 drm_bo_mem_reg_t mem;
1233 * Flush outstanding fences.
1239 * Wait for outstanding fences.
1242 ret = drm_bo_wait(bo, 0, 0, no_wait);
1246 mem.num_pages = bo->mem.num_pages;
1247 mem.size = mem.num_pages << PAGE_SHIFT;
1248 mem.mask = new_mem_flags;
1249 mem.page_alignment = bo->mem.page_alignment;
1251 mutex_lock(&bm->evict_mutex);
1252 mutex_lock(&dev->struct_mutex);
1254 list_add_tail(&bo->lru, &bm->unfenced);
1255 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1256 _DRM_BO_FLAG_UNFENCED);
1257 mutex_unlock(&dev->struct_mutex);
1260 * Determine where to move the buffer.
1262 ret = drm_bo_mem_space(bo, &mem, no_wait);
1266 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1269 if (ret || !move_unfenced) {
1270 mutex_lock(&dev->struct_mutex);
1272 if (mem.mm_node != bo->pinned_node)
1273 drm_mm_put_block(mem.mm_node);
1276 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1277 DRM_WAKEUP(&bo->event_queue);
1279 drm_bo_add_to_lru(bo);
1280 mutex_unlock(&dev->struct_mutex);
1283 mutex_unlock(&bm->evict_mutex);
1287 static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem)
1289 uint32_t flag_diff = (mem->mask ^ mem->flags);
1291 if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1293 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1294 (!(mem->mask & DRM_BO_FLAG_CACHED) ||
1295 (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1298 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1299 ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1300 (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1305 static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
1307 drm_buffer_manager_t *bm = &dev->bm;
1308 drm_mem_type_manager_t *man;
1309 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1310 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1313 uint32_t mem_type = 0;
1316 if (drm_bo_mem_compat(mem))
1319 BUG_ON(mem->mm_node);
1321 for (i = 0; i < num_prios; ++i) {
1322 mem_type = prios[i];
1323 man = &bm->man[mem_type];
1324 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1331 mem->mm_node = NULL;
1332 mem->mem_type = mem_type;
1333 mem->flags = cur_flags;
1334 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1338 DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask);
1346 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1347 int move_unfenced, int no_wait)
1349 drm_device_t *dev = bo->dev;
1350 drm_buffer_manager_t *bm = &dev->bm;
1351 drm_bo_driver_t *driver = dev->driver->bo_driver;
1354 DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask,
1357 driver->fence_type(bo, &bo->fence_class, &bo->fence_type);
1359 DRM_ERROR("Driver did not support given buffer permissions\n");
1363 ret = drm_bo_wait_unmapped(bo, no_wait);
1367 if (bo->type == drm_bo_type_fake) {
1368 ret = drm_bo_check_fake(dev, &bo->mem);
1374 * Check whether we need to move buffer.
1377 if (!drm_bo_mem_compat(&bo->mem)) {
1378 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1382 DRM_ERROR("Failed moving buffer.\n");
1391 if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1392 bo->pinned_mem_type = bo->mem.mem_type;
1393 mutex_lock(&dev->struct_mutex);
1394 list_del_init(&bo->pinned_lru);
1395 drm_bo_add_to_pinned_lru(bo);
1397 if (bo->pinned_node != bo->mem.mm_node) {
1398 if (bo->pinned_node != NULL)
1399 drm_mm_put_block(bo->pinned_node);
1400 bo->pinned_node = bo->mem.mm_node;
1403 mutex_unlock(&dev->struct_mutex);
1405 } else if (bo->pinned_node != NULL) {
1407 mutex_lock(&dev->struct_mutex);
1408 drm_mm_put_block(bo->pinned_node);
1409 list_del_init(&bo->pinned_lru);
1410 bo->pinned_node = NULL;
1411 mutex_unlock(&dev->struct_mutex);
1416 * We might need to add a TTM.
1419 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1420 ret = drm_bo_add_ttm(bo);
1424 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1427 * Finally, adjust lru to be sure.
1430 mutex_lock(&dev->struct_mutex);
1432 if (move_unfenced) {
1433 list_add_tail(&bo->lru, &bm->unfenced);
1434 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1435 _DRM_BO_FLAG_UNFENCED);
1437 drm_bo_add_to_lru(bo);
1438 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1439 DRM_WAKEUP(&bo->event_queue);
1440 DRM_FLAG_MASKED(bo->priv_flags, 0,
1441 _DRM_BO_FLAG_UNFENCED);
1444 mutex_unlock(&dev->struct_mutex);
1449 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1450 uint32_t flags, uint32_t mask, uint32_t hint,
1451 drm_bo_arg_reply_t * rep)
1453 drm_buffer_object_t *bo;
1455 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1457 bo = drm_lookup_buffer_object(priv, handle, 1);
1462 mutex_lock(&bo->mutex);
1463 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1468 DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1469 ret = drm_bo_new_mask(bo, flags, hint);
1474 drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE),
1476 drm_bo_fill_rep_arg(bo, rep);
1480 mutex_unlock(&bo->mutex);
1482 drm_bo_usage_deref_unlocked(bo);
1486 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1487 drm_bo_arg_reply_t * rep)
1489 drm_buffer_object_t *bo;
1491 bo = drm_lookup_buffer_object(priv, handle, 1);
1495 mutex_lock(&bo->mutex);
1496 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1497 (void)drm_bo_busy(bo);
1498 drm_bo_fill_rep_arg(bo, rep);
1499 mutex_unlock(&bo->mutex);
1500 drm_bo_usage_deref_unlocked(bo);
1504 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1505 uint32_t hint, drm_bo_arg_reply_t * rep)
1507 drm_buffer_object_t *bo;
1508 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1511 bo = drm_lookup_buffer_object(priv, handle, 1);
1516 mutex_lock(&bo->mutex);
1517 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1520 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1524 drm_bo_fill_rep_arg(bo, rep);
1527 mutex_unlock(&bo->mutex);
1528 drm_bo_usage_deref_unlocked(bo);
1532 int drm_buffer_object_create(drm_file_t * priv,
1537 uint32_t page_alignment,
1538 unsigned long buffer_start,
1539 drm_buffer_object_t ** buf_obj)
1541 drm_device_t *dev = priv->head->dev;
1542 drm_buffer_manager_t *bm = &dev->bm;
1543 drm_buffer_object_t *bo;
1545 unsigned long num_pages;
1547 if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1548 DRM_ERROR("Invalid buffer object start.\n");
1551 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1552 if (num_pages == 0) {
1553 DRM_ERROR("Illegal buffer object size.\n");
1557 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1562 mutex_init(&bo->mutex);
1563 mutex_lock(&bo->mutex);
1565 atomic_set(&bo->usage, 1);
1566 atomic_set(&bo->mapped, -1);
1567 DRM_INIT_WAITQUEUE(&bo->event_queue);
1568 INIT_LIST_HEAD(&bo->lru);
1569 INIT_LIST_HEAD(&bo->pinned_lru);
1570 INIT_LIST_HEAD(&bo->ddestroy);
1571 #ifdef DRM_ODD_MM_COMPAT
1572 INIT_LIST_HEAD(&bo->p_mm_list);
1573 INIT_LIST_HEAD(&bo->vma_list);
1577 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1578 bo->mem.num_pages = num_pages;
1579 bo->mem.mm_node = NULL;
1580 bo->mem.page_alignment = page_alignment;
1581 if (bo->type == drm_bo_type_fake) {
1582 bo->offset = buffer_start;
1583 bo->buffer_start = 0;
1585 bo->buffer_start = buffer_start;
1590 atomic_inc(&bm->count);
1591 ret = drm_bo_new_mask(bo, mask, hint);
1596 if (bo->type == drm_bo_type_dc) {
1597 mutex_lock(&dev->struct_mutex);
1598 ret = drm_bo_setup_vm_locked(bo);
1599 mutex_unlock(&dev->struct_mutex);
1603 ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1607 mutex_unlock(&bo->mutex);
1612 mutex_unlock(&bo->mutex);
1614 drm_bo_usage_deref_unlocked(bo);
1618 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1621 drm_device_t *dev = priv->head->dev;
1624 mutex_lock(&dev->struct_mutex);
1625 ret = drm_add_user_object(priv, &bo->base, shareable);
1629 bo->base.remove = drm_bo_base_deref_locked;
1630 bo->base.type = drm_buffer_type;
1631 bo->base.ref_struct_locked = NULL;
1632 bo->base.unref = drm_buffer_user_object_unmap;
1635 mutex_unlock(&dev->struct_mutex);
1639 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1641 LOCK_TEST_WITH_RETURN(dev, filp);
1645 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1649 drm_bo_arg_request_t *req = &arg.d.req;
1650 drm_bo_arg_reply_t rep;
1652 drm_user_object_t *uo;
1653 drm_buffer_object_t *entry;
1655 if (!dev->bm.initialized) {
1656 DRM_ERROR("Buffer object manager is not initialized.\n");
1661 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1672 drm_buffer_object_create(priv, req->size,
1676 req->page_alignment,
1677 req->buffer_start, &entry);
1682 drm_bo_add_user_object(priv, entry,
1685 DRM_BO_FLAG_SHAREABLE);
1687 drm_bo_usage_deref_unlocked(entry);
1692 mutex_lock(&entry->mutex);
1693 drm_bo_fill_rep_arg(entry, &rep);
1694 mutex_unlock(&entry->mutex);
1697 rep.ret = drm_buffer_object_unmap(priv, req->handle);
1700 rep.ret = drm_buffer_object_map(priv, req->handle,
1704 case drm_bo_destroy:
1705 mutex_lock(&dev->struct_mutex);
1706 uo = drm_lookup_user_object(priv, req->handle);
1707 if (!uo || (uo->type != drm_buffer_type)
1708 || uo->owner != priv) {
1709 mutex_unlock(&dev->struct_mutex);
1713 rep.ret = drm_remove_user_object(priv, uo);
1714 mutex_unlock(&dev->struct_mutex);
1716 case drm_bo_reference:
1717 rep.ret = drm_user_object_ref(priv, req->handle,
1718 drm_buffer_type, &uo);
1721 mutex_lock(&dev->struct_mutex);
1722 uo = drm_lookup_user_object(priv, req->handle);
1724 drm_user_object_entry(uo, drm_buffer_object_t,
1726 atomic_dec(&entry->usage);
1727 mutex_unlock(&dev->struct_mutex);
1728 mutex_lock(&entry->mutex);
1729 drm_bo_fill_rep_arg(entry, &rep);
1730 mutex_unlock(&entry->mutex);
1732 case drm_bo_unreference:
1733 rep.ret = drm_user_object_unref(priv, req->handle,
1736 case drm_bo_validate:
1737 rep.ret = drm_bo_lock_test(dev, filp);
1742 drm_bo_handle_validate(priv, req->handle, req->mask,
1743 req->arg_handle, req->hint,
1747 rep.ret = drm_bo_lock_test(dev, filp);
1752 rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1754 case drm_bo_wait_idle:
1755 rep.ret = drm_bo_handle_wait(priv, req->handle,
1758 case drm_bo_ref_fence:
1760 DRM_ERROR("Function is not implemented yet.\n");
1767 * A signal interrupted us. Make sure the ioctl is restartable.
1770 if (rep.ret == -EAGAIN)
1775 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1782 *Clean the unfenced list and put on regular LRU.
1783 *This is part of the memory manager cleanup and should only be
1784 *called with the DRI lock held.
1785 *Call dev->struct_sem locked.
1788 static void drm_bo_clean_unfenced(drm_device_t *dev)
1790 drm_buffer_manager_t *bm = &dev->bm;
1791 struct list_head *head, *list;
1792 drm_buffer_object_t *entry;
1794 head = &bm->unfenced;
1797 while(list != head) {
1798 prefetch(list->next);
1799 entry = list_entry(list, drm_buffer_object_t, lru);
1801 atomic_inc(&entry->usage);
1802 mutex_unlock(&dev->struct_mutex);
1803 mutex_lock(&entry->mutex);
1804 mutex_lock(&dev->struct_mutex);
1806 list_del(&entry->lru);
1807 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1808 drm_bo_add_to_lru(entry);
1809 mutex_unlock(&entry->mutex);
1814 static int drm_bo_leave_list(drm_buffer_object_t * bo,
1816 int free_pinned, int allow_errors)
1818 drm_device_t *dev = bo->dev;
1821 mutex_lock(&bo->mutex);
1823 ret = drm_bo_expire_fence(bo, allow_errors);
1828 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1829 mutex_lock(&dev->struct_mutex);
1830 list_del_init(&bo->pinned_lru);
1831 if (bo->pinned_node == bo->mem.mm_node)
1832 bo->pinned_node = NULL;
1833 if (bo->pinned_node != NULL) {
1834 drm_mm_put_block(bo->pinned_node);
1835 bo->pinned_node = NULL;
1837 mutex_unlock(&dev->struct_mutex);
1840 if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1841 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1842 "cleanup. Removing flag and evicting.\n");
1843 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1844 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1847 if (bo->mem.mem_type == mem_type)
1848 ret = drm_bo_evict(bo, mem_type, 0);
1855 DRM_ERROR("Cleanup eviction failed\n");
1860 mutex_unlock(&bo->mutex);
1865 static drm_buffer_object_t *drm_bo_entry(struct list_head *list,
1869 return list_entry(list, drm_buffer_object_t, pinned_lru);
1871 return list_entry(list, drm_buffer_object_t, lru);
1875 * dev->struct_mutex locked.
1878 static int drm_bo_force_list_clean(drm_device_t * dev,
1879 struct list_head *head,
1885 struct list_head *list, *next, *prev;
1886 drm_buffer_object_t *entry, *nentry;
1891 * The list traversal is a bit odd here, because an item may
1892 * disappear from the list when we release the struct_mutex or
1893 * when we decrease the usage count. Also we're not guaranteed
1894 * to drain pinned lists, so we can't always restart.
1899 list_for_each_safe(list, next, head) {
1902 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
1903 atomic_inc(&entry->usage);
1905 atomic_dec(&nentry->usage);
1910 * Protect the next item from destruction, so we can check
1911 * its list pointers later on.
1915 nentry = drm_bo_entry(next, pinned_list);
1916 atomic_inc(&nentry->usage);
1918 mutex_unlock(&dev->struct_mutex);
1920 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
1922 mutex_lock(&dev->struct_mutex);
1924 drm_bo_usage_deref_locked(entry);
1929 * Has the next item disappeared from the list?
1932 do_restart = ((next->prev != list) && (next->prev != prev));
1934 if (nentry != NULL && do_restart) {
1935 drm_bo_usage_deref_locked(nentry);
1945 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1947 drm_buffer_manager_t *bm = &dev->bm;
1948 drm_mem_type_manager_t *man = &bm->man[mem_type];
1951 if (mem_type >= DRM_BO_MEM_TYPES) {
1952 DRM_ERROR("Illegal memory type %d\n", mem_type);
1956 if (!man->has_type) {
1957 DRM_ERROR("Trying to take down uninitialized "
1958 "memory manager type\n");
1967 drm_bo_clean_unfenced(dev);
1968 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
1969 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
1971 if (drm_mm_clean(&man->manager)) {
1972 drm_mm_takedown(&man->manager);
1982 *Evict all buffers of a particular mem_type, but leave memory manager
1983 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
1984 *point since we have the hardware lock.
1987 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1990 drm_buffer_manager_t *bm = &dev->bm;
1991 drm_mem_type_manager_t *man = &bm->man[mem_type];
1993 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1994 DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
1998 drm_bo_clean_unfenced(dev);
1999 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2002 ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2007 int drm_bo_init_mm(drm_device_t * dev,
2009 unsigned long p_offset, unsigned long p_size)
2011 drm_buffer_manager_t *bm = &dev->bm;
2013 drm_mem_type_manager_t *man;
2015 if (type >= DRM_BO_MEM_TYPES) {
2016 DRM_ERROR("Illegal memory type %d\n", type);
2020 man = &bm->man[type];
2021 if (man->has_type) {
2022 DRM_ERROR("Memory manager already initialized for type %d\n",
2027 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2032 if (type != DRM_BO_MEM_LOCAL) {
2034 DRM_ERROR("Zero size memory manager type %d\n", type);
2037 ret = drm_mm_init(&man->manager, p_offset, p_size);
2044 INIT_LIST_HEAD(&man->lru);
2045 INIT_LIST_HEAD(&man->pinned);
2049 EXPORT_SYMBOL(drm_bo_init_mm);
2052 * This is called from lastclose, so we don't need to bother about
2053 * any clients still running when we set the initialized flag to zero.
2056 int drm_bo_driver_finish(drm_device_t * dev)
2058 drm_buffer_manager_t *bm = &dev->bm;
2060 unsigned i = DRM_BO_MEM_TYPES;
2061 drm_mem_type_manager_t *man;
2063 mutex_lock(&dev->bm.init_mutex);
2064 mutex_lock(&dev->struct_mutex);
2066 if (!bm->initialized)
2068 bm->initialized = 0;
2072 if (man->has_type) {
2074 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2076 DRM_ERROR("DRM memory manager type %d "
2077 "is not clean.\n", i);
2082 mutex_unlock(&dev->struct_mutex);
2084 if (!cancel_delayed_work(&bm->wq)) {
2085 flush_scheduled_work();
2087 mutex_lock(&dev->struct_mutex);
2088 drm_bo_delayed_delete(dev, 1);
2089 if (list_empty(&bm->ddestroy)) {
2090 DRM_DEBUG("Delayed destroy list was clean\n");
2092 if (list_empty(&bm->man[0].lru)) {
2093 DRM_DEBUG("Swap list was clean\n");
2095 if (list_empty(&bm->man[0].pinned)) {
2096 DRM_DEBUG("NO_MOVE list was clean\n");
2098 if (list_empty(&bm->unfenced)) {
2099 DRM_DEBUG("Unfenced list was clean\n");
2102 mutex_unlock(&dev->struct_mutex);
2103 mutex_unlock(&dev->bm.init_mutex);
2107 int drm_bo_driver_init(drm_device_t * dev)
2109 drm_bo_driver_t *driver = dev->driver->bo_driver;
2110 drm_buffer_manager_t *bm = &dev->bm;
2113 mutex_lock(&dev->bm.init_mutex);
2114 mutex_lock(&dev->struct_mutex);
2119 * Initialize the system memory buffer type.
2120 * Other types need to be driver / IOCTL initialized.
2123 ret = drm_bo_init_mm(dev, 0, 0, 0);
2127 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2128 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2130 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2132 bm->initialized = 1;
2134 atomic_set(&bm->count, 0);
2136 INIT_LIST_HEAD(&bm->unfenced);
2137 INIT_LIST_HEAD(&bm->ddestroy);
2139 mutex_unlock(&dev->struct_mutex);
2140 mutex_unlock(&dev->bm.init_mutex);
2144 EXPORT_SYMBOL(drm_bo_driver_init);
2146 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
2151 drm_mm_init_arg_t arg;
2152 drm_buffer_manager_t *bm = &dev->bm;
2153 drm_bo_driver_t *driver = dev->driver->bo_driver;
2156 DRM_ERROR("Buffer objects are not supported by this driver\n");
2160 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2162 switch (arg.req.op) {
2165 mutex_lock(&dev->bm.init_mutex);
2166 mutex_lock(&dev->struct_mutex);
2167 if (!bm->initialized) {
2168 DRM_ERROR("DRM memory manager was not initialized.\n");
2171 if (arg.req.mem_type == 0) {
2173 ("System memory buffers already initialized.\n");
2176 ret = drm_bo_init_mm(dev, arg.req.mem_type,
2177 arg.req.p_offset, arg.req.p_size);
2180 LOCK_TEST_WITH_RETURN(dev, filp);
2181 mutex_lock(&dev->bm.init_mutex);
2182 mutex_lock(&dev->struct_mutex);
2184 if (!bm->initialized) {
2185 DRM_ERROR("DRM memory manager was not initialized\n");
2188 if (arg.req.mem_type == 0) {
2189 DRM_ERROR("No takedown for System memory buffers.\n");
2193 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
2194 DRM_ERROR("Memory manager type %d not clean. "
2195 "Delaying takedown\n", arg.req.mem_type);
2199 LOCK_TEST_WITH_RETURN(dev, filp);
2200 mutex_lock(&dev->bm.init_mutex);
2201 mutex_lock(&dev->struct_mutex);
2202 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
2205 LOCK_TEST_WITH_RETURN(dev, filp);
2206 mutex_lock(&dev->bm.init_mutex);
2207 mutex_lock(&dev->struct_mutex);
2211 DRM_ERROR("Function not implemented yet\n");
2215 mutex_unlock(&dev->struct_mutex);
2216 mutex_unlock(&dev->bm.init_mutex);
2220 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2225 * buffer object vm functions.
2228 int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem)
2230 drm_buffer_manager_t *bm = &dev->bm;
2231 drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2233 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2234 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2237 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2240 if (mem->flags & DRM_BO_FLAG_CACHED)
2246 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2249 * \c Get the PCI offset for the buffer object memory.
2251 * \param bo The buffer object.
2252 * \param bus_base On return the base of the PCI region
2253 * \param bus_offset On return the byte offset into the PCI region
2254 * \param bus_size On return the byte size of the buffer object or zero if
2255 * the buffer object memory is not accessible through a PCI region.
2256 * \return Failure indication.
2258 * Returns -EINVAL if the buffer object is currently not mappable.
2259 * Otherwise returns zero.
2262 int drm_bo_pci_offset(drm_device_t * dev,
2263 drm_bo_mem_reg_t * mem,
2264 unsigned long *bus_base,
2265 unsigned long *bus_offset, unsigned long *bus_size)
2267 drm_buffer_manager_t *bm = &dev->bm;
2268 drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2271 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2274 if (drm_mem_reg_is_pci(dev, mem)) {
2275 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2276 *bus_size = mem->num_pages << PAGE_SHIFT;
2277 *bus_base = man->io_offset;
2284 * \c Kill all user-space virtual mappings of this buffer object.
2286 * \param bo The buffer object.
2288 * Call bo->mutex locked.
2291 void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
2293 drm_device_t *dev = bo->dev;
2294 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2295 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2297 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2300 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
2302 drm_map_list_t *list = &bo->map_list;
2303 drm_local_map_t *map;
2304 drm_device_t *dev = bo->dev;
2306 if (list->user_token) {
2307 drm_ht_remove_item(&dev->map_hash, &list->hash);
2308 list->user_token = 0;
2310 if (list->file_offset_node) {
2311 drm_mm_put_block(list->file_offset_node);
2312 list->file_offset_node = NULL;
2319 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2321 list->user_token = 0ULL;
2322 drm_bo_usage_deref_locked(bo);
2325 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
2327 drm_map_list_t *list = &bo->map_list;
2328 drm_local_map_t *map;
2329 drm_device_t *dev = bo->dev;
2331 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2337 map->type = _DRM_TTM;
2338 map->flags = _DRM_REMOVABLE;
2339 map->size = bo->mem.num_pages * PAGE_SIZE;
2340 atomic_inc(&bo->usage);
2341 map->handle = (void *)bo;
2343 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2344 bo->mem.num_pages, 0, 0);
2346 if (!list->file_offset_node) {
2347 drm_bo_takedown_vm_locked(bo);
2351 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2352 bo->mem.num_pages, 0);
2354 list->hash.key = list->file_offset_node->start;
2355 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2356 drm_bo_takedown_vm_locked(bo);
2360 list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;