1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44 * both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47 * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48 * traversal will, in general, need to be restarted.
52 static void drm_bo_destroy_locked(drm_buffer_object_t * bo);
53 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo);
54 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo);
55 static void drm_bo_unmap_virtual(drm_buffer_object_t * bo);
57 static inline uint32_t drm_bo_type_flags(unsigned type)
59 return (1 << (24 + type));
63 * bo locked. dev->struct_mutex locked.
66 void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
68 drm_mem_type_manager_t *man;
70 man = &bo->dev->bm.man[bo->pinned_mem_type];
71 list_add_tail(&bo->pinned_lru, &man->pinned);
74 void drm_bo_add_to_lru(drm_buffer_object_t * bo)
76 drm_mem_type_manager_t *man;
78 if (bo->mem.mm_node != bo->pinned_node) {
79 man = &bo->dev->bm.man[bo->mem.mem_type];
80 list_add_tail(&bo->lru, &man->lru);
82 INIT_LIST_HEAD(&bo->lru);
85 static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
87 #ifdef DRM_ODD_MM_COMPAT
90 ret = drm_bo_lock_kmm(bo);
93 drm_bo_unmap_virtual(bo);
95 drm_bo_finish_unmap(bo);
97 drm_bo_unmap_virtual(bo);
102 static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
104 #ifdef DRM_ODD_MM_COMPAT
107 ret = drm_bo_remap_bound(bo);
109 DRM_ERROR("Failed to remap a bound buffer object.\n"
110 "\tThis might cause a sigbus later.\n");
112 drm_bo_unlock_kmm(bo);
117 * Call bo->mutex locked.
120 static int drm_bo_add_ttm(drm_buffer_object_t * bo)
122 drm_device_t *dev = bo->dev;
128 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
132 case drm_bo_type_user:
133 case drm_bo_type_fake:
136 DRM_ERROR("Illegal buffer object type\n");
144 static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
145 drm_bo_mem_reg_t * mem,
146 int evict, int no_wait)
148 drm_device_t *dev = bo->dev;
149 drm_buffer_manager_t *bm = &dev->bm;
150 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
151 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
152 drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
153 drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
156 if (old_is_pci || new_is_pci)
157 ret = drm_bo_vm_pre_move(bo, old_is_pci);
162 * Create and bind a ttm if required.
165 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
166 ret = drm_bo_add_ttm(bo);
170 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
171 ret = drm_bind_ttm(bo->ttm, new_man->flags &
173 mem->mm_node->start);
179 if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
181 drm_bo_mem_reg_t *old_mem = &bo->mem;
182 uint32_t save_flags = old_mem->flags;
183 uint32_t save_mask = old_mem->mask;
187 old_mem->mask = save_mask;
188 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
190 } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
191 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
193 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
195 } else if (dev->driver->bo_driver->move) {
196 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
200 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
207 if (old_is_pci || new_is_pci)
208 drm_bo_vm_post_move(bo);
210 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
212 dev->driver->bo_driver->invalidate_caches(dev,
215 DRM_ERROR("Can not flush read caches\n");
218 DRM_FLAG_MASKED(bo->priv_flags,
219 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
220 _DRM_BO_FLAG_EVICTED);
223 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
228 if (old_is_pci || new_is_pci)
229 drm_bo_vm_post_move(bo);
231 new_man = &bm->man[bo->mem.mem_type];
232 if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
233 drm_ttm_unbind(bo->ttm);
234 drm_destroy_ttm(bo->ttm);
242 * Call bo->mutex locked.
243 * Wait until the buffer is idle.
246 int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
250 drm_fence_object_t *fence = bo->fence;
254 drm_device_t *dev = bo->dev;
255 if (drm_fence_object_signaled(fence, bo->fence_type)) {
256 drm_fence_usage_deref_unlocked(dev, fence);
264 drm_fence_object_wait(dev, fence, lazy, ignore_signals,
269 drm_fence_usage_deref_unlocked(dev, fence);
276 static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
278 drm_device_t *dev = bo->dev;
279 drm_buffer_manager_t *bm = &dev->bm;
283 unsigned long _end = jiffies + 3 * DRM_HZ;
286 ret = drm_bo_wait(bo, 0, 1, 0);
287 if (ret && allow_errors)
290 } while (ret && !time_after_eq(jiffies, _end));
294 DRM_ERROR("Detected GPU lockup or "
295 "fence driver was taken down. "
296 "Evicting buffer.\n");
300 drm_fence_usage_deref_unlocked(dev, bo->fence);
308 * Call dev->struct_mutex locked.
309 * Attempts to remove all private references to a buffer by expiring its
310 * fence object and removing from lru lists and memory managers.
313 static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
315 drm_device_t *dev = bo->dev;
316 drm_buffer_manager_t *bm = &dev->bm;
318 atomic_inc(&bo->usage);
319 mutex_unlock(&dev->struct_mutex);
320 mutex_lock(&bo->mutex);
322 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
324 if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) {
325 drm_fence_usage_deref_locked(dev, bo->fence);
329 if (bo->fence && remove_all)
330 (void)drm_bo_expire_fence(bo, 0);
332 mutex_lock(&dev->struct_mutex);
334 if (!atomic_dec_and_test(&bo->usage)) {
339 list_del_init(&bo->lru);
340 if (bo->mem.mm_node) {
341 drm_mm_put_block(bo->mem.mm_node);
342 bo->mem.mm_node = NULL;
344 list_del_init(&bo->pinned_lru);
345 if (bo->pinned_node) {
346 drm_mm_put_block(bo->pinned_node);
347 bo->pinned_node = NULL;
349 list_del_init(&bo->ddestroy);
350 mutex_unlock(&bo->mutex);
351 drm_bo_destroy_locked(bo);
355 if (list_empty(&bo->ddestroy)) {
356 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
357 list_add_tail(&bo->ddestroy, &bm->ddestroy);
358 schedule_delayed_work(&bm->wq,
359 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
363 mutex_unlock(&bo->mutex);
368 * Verify that refcount is 0 and that there are no internal references
369 * to the buffer object. Then destroy it.
372 static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
374 drm_device_t *dev = bo->dev;
375 drm_buffer_manager_t *bm = &dev->bm;
377 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
378 list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
379 list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
380 BUG_ON(bo->fence != NULL);
382 #ifdef DRM_ODD_MM_COMPAT
383 BUG_ON(!list_empty(&bo->vma_list));
384 BUG_ON(!list_empty(&bo->p_mm_list));
388 drm_ttm_unbind(bo->ttm);
389 drm_destroy_ttm(bo->ttm);
393 atomic_dec(&bm->count);
395 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
401 * Some stuff is still trying to reference the buffer object.
402 * Get rid of those references.
405 drm_bo_cleanup_refs(bo, 0);
411 * Call dev->struct_mutex locked.
414 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
416 drm_buffer_manager_t *bm = &dev->bm;
418 drm_buffer_object_t *entry, *nentry;
419 struct list_head *list, *next;
421 list_for_each_safe(list, next, &bm->ddestroy) {
422 entry = list_entry(list, drm_buffer_object_t, ddestroy);
425 if (next != &bm->ddestroy) {
426 nentry = list_entry(next, drm_buffer_object_t,
428 atomic_inc(&nentry->usage);
431 drm_bo_cleanup_refs(entry, remove_all);
434 atomic_dec(&nentry->usage);
439 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
440 static void drm_bo_delayed_workqueue(void *data)
442 static void drm_bo_delayed_workqueue(struct work_struct *work)
445 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
446 drm_device_t *dev = (drm_device_t *) data;
447 drm_buffer_manager_t *bm = &dev->bm;
449 drm_buffer_manager_t *bm =
450 container_of(work, drm_buffer_manager_t, wq.work);
451 drm_device_t *dev = container_of(bm, drm_device_t, bm);
454 DRM_DEBUG("Delayed delete Worker\n");
456 mutex_lock(&dev->struct_mutex);
457 if (!bm->initialized) {
458 mutex_unlock(&dev->struct_mutex);
461 drm_bo_delayed_delete(dev, 0);
462 if (bm->initialized && !list_empty(&bm->ddestroy)) {
463 schedule_delayed_work(&bm->wq,
464 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
466 mutex_unlock(&dev->struct_mutex);
469 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
471 if (atomic_dec_and_test(&bo->usage)) {
472 drm_bo_destroy_locked(bo);
476 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
478 drm_buffer_object_t *bo =
479 drm_user_object_entry(uo, drm_buffer_object_t, base);
481 drm_bo_takedown_vm_locked(bo);
482 drm_bo_usage_deref_locked(bo);
485 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
487 drm_device_t *dev = bo->dev;
489 if (atomic_dec_and_test(&bo->usage)) {
490 mutex_lock(&dev->struct_mutex);
491 if (atomic_read(&bo->usage) == 0)
492 drm_bo_destroy_locked(bo);
493 mutex_unlock(&dev->struct_mutex);
498 * Note. The caller has to register (if applicable)
499 * and deregister fence object usage.
502 int drm_fence_buffer_objects(drm_file_t * priv,
503 struct list_head *list,
504 uint32_t fence_flags,
505 drm_fence_object_t * fence,
506 drm_fence_object_t ** used_fence)
508 drm_device_t *dev = priv->head->dev;
509 drm_buffer_manager_t *bm = &dev->bm;
511 drm_buffer_object_t *entry;
512 uint32_t fence_type = 0;
518 mutex_lock(&dev->struct_mutex);
521 list = &bm->unfenced;
523 list_for_each_entry(entry, list, lru) {
524 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
525 fence_type |= entry->fence_type;
526 if (entry->fence_class != 0) {
527 DRM_ERROR("Fence class %d is not implemented yet.\n",
541 * Transfer to a local list before we release the dev->struct_mutex;
542 * This is so we don't get any new unfenced objects while fencing
543 * the ones we already have..
546 list_splice_init(list, &f_list);
549 if ((fence_type & fence->type) != fence_type) {
550 DRM_ERROR("Given fence doesn't match buffers "
551 "on unfenced list.\n");
556 mutex_unlock(&dev->struct_mutex);
557 ret = drm_fence_object_create(dev, fence_type,
558 fence_flags | DRM_FENCE_FLAG_EMIT,
560 mutex_lock(&dev->struct_mutex);
567 while (l != &f_list) {
568 entry = list_entry(l, drm_buffer_object_t, lru);
569 atomic_inc(&entry->usage);
570 mutex_unlock(&dev->struct_mutex);
571 mutex_lock(&entry->mutex);
572 mutex_lock(&dev->struct_mutex);
574 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
577 drm_fence_usage_deref_locked(dev, entry->fence);
578 entry->fence = fence;
579 DRM_FLAG_MASKED(entry->priv_flags, 0,
580 _DRM_BO_FLAG_UNFENCED);
581 DRM_WAKEUP(&entry->event_queue);
582 drm_bo_add_to_lru(entry);
584 mutex_unlock(&entry->mutex);
585 drm_bo_usage_deref_locked(entry);
588 atomic_add(count, &fence->usage);
589 DRM_DEBUG("Fenced %d buffers\n", count);
591 mutex_unlock(&dev->struct_mutex);
596 EXPORT_SYMBOL(drm_fence_buffer_objects);
602 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
606 drm_device_t *dev = bo->dev;
607 drm_bo_mem_reg_t evict_mem;
610 * Someone might have modified the buffer before we took the buffer mutex.
613 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
615 if (bo->mem.mem_type != mem_type)
618 ret = drm_bo_wait(bo, 0, 0, no_wait);
620 if (ret && ret != -EAGAIN) {
621 DRM_ERROR("Failed to expire fence before "
622 "buffer eviction.\n");
627 evict_mem.mm_node = NULL;
629 if (bo->type == drm_bo_type_fake) {
630 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
631 bo->mem.mm_node = NULL;
632 bo->pinned_mem_type = DRM_BO_MEM_LOCAL;
633 bo->pinned_node = NULL;
638 evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type);
639 ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
643 DRM_ERROR("Failed to find memory space for "
644 "buffer eviction.\n");
649 DRM_ERROR("Evicting pinned buffer\n");
651 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
655 DRM_ERROR("Buffer eviction failed\n");
660 mutex_lock(&dev->struct_mutex);
661 if (evict_mem.mm_node) {
662 drm_mm_put_block(evict_mem.mm_node);
663 evict_mem.mm_node = NULL;
666 drm_bo_add_to_lru(bo);
667 mutex_unlock(&dev->struct_mutex);
669 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
670 _DRM_BO_FLAG_EVICTED);
676 static int drm_bo_mem_force_space(drm_device_t * dev,
677 drm_bo_mem_reg_t * mem,
678 uint32_t mem_type, int no_wait)
681 drm_buffer_manager_t *bm = &dev->bm;
682 drm_buffer_object_t *entry;
683 drm_mem_type_manager_t *man = &bm->man[mem_type];
684 struct list_head *lru;
685 unsigned long num_pages = mem->num_pages;
688 mutex_lock(&dev->struct_mutex);
690 node = drm_mm_search_free(&man->manager, num_pages,
691 mem->page_alignment, 1);
696 if (lru->next == lru)
699 entry = list_entry(lru->next, drm_buffer_object_t, lru);
700 atomic_inc(&entry->usage);
701 mutex_unlock(&dev->struct_mutex);
702 mutex_lock(&entry->mutex);
704 flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
706 ret = drm_bo_evict(entry, mem_type, no_wait);
707 mutex_unlock(&entry->mutex);
708 drm_bo_usage_deref_unlocked(entry);
711 mutex_lock(&dev->struct_mutex);
715 mutex_unlock(&dev->struct_mutex);
719 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
720 mutex_unlock(&dev->struct_mutex);
722 mem->mem_type = mem_type;
726 static int drm_bo_mt_compatible(drm_mem_type_manager_t * man,
728 uint32_t mask, uint32_t * res_mask)
730 uint32_t cur_flags = drm_bo_type_flags(mem_type);
733 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
734 cur_flags |= DRM_BO_FLAG_CACHED;
735 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
736 cur_flags |= DRM_BO_FLAG_MAPPABLE;
737 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
738 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
740 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) {
743 flag_diff = (mask ^ cur_flags);
744 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
745 (mask & DRM_BO_FLAG_FORCE_CACHING)) {
748 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
749 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) {
753 *res_mask = cur_flags;
757 int drm_bo_mem_space(drm_buffer_object_t * bo,
758 drm_bo_mem_reg_t * mem, int no_wait)
760 drm_device_t *dev = bo->dev;
761 drm_buffer_manager_t *bm = &dev->bm;
762 drm_mem_type_manager_t *man;
764 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
765 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
767 uint32_t mem_type = DRM_BO_MEM_LOCAL;
772 drm_mm_node_t *node = NULL;
775 for (i = 0; i < num_prios; ++i) {
777 man = &bm->man[mem_type];
779 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
785 if (mem_type == DRM_BO_MEM_LOCAL)
788 if ((mem_type == bo->pinned_mem_type) &&
789 (bo->pinned_node != NULL)) {
790 DRM_ERROR("Choosing pinned region\n");
791 node = bo->pinned_node;
795 mutex_lock(&dev->struct_mutex);
796 if (man->has_type && man->use_type) {
798 node = drm_mm_search_free(&man->manager, mem->num_pages,
799 mem->page_alignment, 1);
801 node = drm_mm_get_block(node, mem->num_pages,
802 mem->page_alignment);
804 mutex_unlock(&dev->struct_mutex);
809 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
811 mem->mem_type = mem_type;
812 mem->flags = cur_flags;
819 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
820 prios = dev->driver->bo_driver->mem_busy_prio;
822 for (i = 0; i < num_prios; ++i) {
824 man = &bm->man[mem_type];
826 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
829 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
832 mem->flags = cur_flags;
840 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
844 EXPORT_SYMBOL(drm_bo_mem_space);
846 static int drm_bo_new_mask(drm_buffer_object_t * bo,
847 uint32_t new_mask, uint32_t hint)
851 if (bo->type == drm_bo_type_user) {
852 DRM_ERROR("User buffers are not supported yet\n");
855 if (bo->type == drm_bo_type_fake &&
856 !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
857 DRM_ERROR("Fake buffers must be pinned.\n");
861 if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
863 ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
868 new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
872 DRM_ERROR("Invalid buffer object rwx properties\n");
876 bo->mem.mask = new_mask;
881 * Call dev->struct_mutex locked.
884 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
885 uint32_t handle, int check_owner)
887 drm_user_object_t *uo;
888 drm_buffer_object_t *bo;
890 uo = drm_lookup_user_object(priv, handle);
892 if (!uo || (uo->type != drm_buffer_type)) {
893 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
897 if (check_owner && priv != uo->owner) {
898 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
902 bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
903 atomic_inc(&bo->usage);
908 * Call bo->mutex locked.
909 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
910 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
913 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
915 drm_fence_object_t *fence = bo->fence;
917 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
919 drm_device_t *dev = bo->dev;
920 if (drm_fence_object_signaled(fence, bo->fence_type)) {
921 drm_fence_usage_deref_unlocked(dev, fence);
931 * Call bo->mutex locked.
932 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
935 static int drm_bo_busy(drm_buffer_object_t * bo)
937 drm_fence_object_t *fence = bo->fence;
939 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
941 drm_device_t *dev = bo->dev;
942 if (drm_fence_object_signaled(fence, bo->fence_type)) {
943 drm_fence_usage_deref_unlocked(dev, fence);
947 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
948 if (drm_fence_object_signaled(fence, bo->fence_type)) {
949 drm_fence_usage_deref_unlocked(dev, fence);
958 static int drm_bo_read_cached(drm_buffer_object_t * bo)
962 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
964 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
969 * Wait until a buffer is unmapped.
972 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
976 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
979 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
980 atomic_read(&bo->mapped) == -1);
988 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
992 mutex_lock(&bo->mutex);
993 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
994 mutex_unlock(&bo->mutex);
999 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1000 * Until then, we cannot really do anything with it except delete it.
1001 * The unfenced list is a PITA, and the operations
1003 * 2) submitting commands
1005 * Should really be an atomic operation.
1006 * We now "solve" this problem by keeping
1007 * the buffer "unfenced" after validating, but before fencing.
1010 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
1013 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1014 unsigned long _end = jiffies + 3 * DRM_HZ;
1022 mutex_unlock(&bo->mutex);
1023 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1024 !drm_bo_check_unfenced(bo));
1025 mutex_lock(&bo->mutex);
1030 ("Error waiting for buffer to become fenced\n");
1033 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1034 } while (ret && !time_after_eq(jiffies, _end));
1036 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1046 * Fill in the ioctl reply argument with buffer info.
1050 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
1051 drm_bo_arg_reply_t * rep)
1053 rep->handle = bo->base.hash.key;
1054 rep->flags = bo->mem.flags;
1055 rep->size = bo->mem.num_pages * PAGE_SIZE;
1056 rep->offset = bo->offset;
1057 rep->arg_handle = bo->map_list.user_token;
1058 rep->mask = bo->mem.mask;
1059 rep->buffer_start = bo->buffer_start;
1060 rep->fence_flags = bo->fence_type;
1062 rep->page_alignment = bo->mem.page_alignment;
1064 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1065 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1071 * Wait for buffer idle and register that we've mapped the buffer.
1072 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1073 * so that if the client dies, the mapping is automatically
1077 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1078 uint32_t map_flags, unsigned hint,
1079 drm_bo_arg_reply_t * rep)
1081 drm_buffer_object_t *bo;
1082 drm_device_t *dev = priv->head->dev;
1084 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1086 mutex_lock(&dev->struct_mutex);
1087 bo = drm_lookup_buffer_object(priv, handle, 1);
1088 mutex_unlock(&dev->struct_mutex);
1093 mutex_lock(&bo->mutex);
1094 if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1095 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1101 * If this returns true, we are currently unmapped.
1102 * We need to do this test, because unmapping can
1103 * be done without the bo->mutex held.
1107 if (atomic_inc_and_test(&bo->mapped)) {
1108 if (no_wait && drm_bo_busy(bo)) {
1109 atomic_dec(&bo->mapped);
1113 ret = drm_bo_wait(bo, 0, 0, no_wait);
1115 atomic_dec(&bo->mapped);
1119 if ((map_flags & DRM_BO_FLAG_READ) &&
1120 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1121 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1122 drm_bo_read_cached(bo);
1125 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1126 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1127 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1130 * We are already mapped with different flags.
1131 * need to wait for unmap.
1134 ret = drm_bo_wait_unmapped(bo, no_wait);
1143 mutex_lock(&dev->struct_mutex);
1144 ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1145 mutex_unlock(&dev->struct_mutex);
1147 if (atomic_add_negative(-1, &bo->mapped))
1148 DRM_WAKEUP(&bo->event_queue);
1151 drm_bo_fill_rep_arg(bo, rep);
1153 mutex_unlock(&bo->mutex);
1154 drm_bo_usage_deref_unlocked(bo);
1158 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1160 drm_device_t *dev = priv->head->dev;
1161 drm_buffer_object_t *bo;
1162 drm_ref_object_t *ro;
1165 mutex_lock(&dev->struct_mutex);
1167 bo = drm_lookup_buffer_object(priv, handle, 1);
1173 ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1179 drm_remove_ref_object(priv, ro);
1180 drm_bo_usage_deref_locked(bo);
1182 mutex_unlock(&dev->struct_mutex);
1187 * Call struct-sem locked.
1190 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1191 drm_user_object_t * uo,
1194 drm_buffer_object_t *bo =
1195 drm_user_object_entry(uo, drm_buffer_object_t, base);
1198 * We DON'T want to take the bo->lock here, because we want to
1199 * hold it when we wait for unmapped buffer.
1202 BUG_ON(action != _DRM_REF_TYPE1);
1204 if (atomic_add_negative(-1, &bo->mapped))
1205 DRM_WAKEUP(&bo->event_queue);
1210 * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1213 int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1214 int no_wait, int move_unfenced)
1216 drm_device_t *dev = bo->dev;
1217 drm_buffer_manager_t *bm = &dev->bm;
1219 drm_bo_mem_reg_t mem;
1221 * Flush outstanding fences.
1227 * Wait for outstanding fences.
1230 ret = drm_bo_wait(bo, 0, 0, no_wait);
1234 mem.num_pages = bo->mem.num_pages;
1235 mem.size = mem.num_pages << PAGE_SHIFT;
1236 mem.mask = new_mem_flags;
1237 mem.page_alignment = bo->mem.page_alignment;
1239 mutex_lock(&bm->evict_mutex);
1240 mutex_lock(&dev->struct_mutex);
1242 list_add_tail(&bo->lru, &bm->unfenced);
1243 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1244 _DRM_BO_FLAG_UNFENCED);
1245 mutex_unlock(&dev->struct_mutex);
1248 * Determine where to move the buffer.
1250 ret = drm_bo_mem_space(bo, &mem, no_wait);
1255 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1258 if (ret || !move_unfenced) {
1259 mutex_lock(&dev->struct_mutex);
1261 drm_mm_put_block(mem.mm_node);
1264 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1265 DRM_WAKEUP(&bo->event_queue);
1267 drm_bo_add_to_lru(bo);
1268 mutex_unlock(&dev->struct_mutex);
1271 mutex_unlock(&bm->evict_mutex);
1275 static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem)
1277 uint32_t flag_diff = (mem->mask ^ mem->flags);
1279 if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1281 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1282 (mem->mask & DRM_BO_FLAG_FORCE_CACHING))
1284 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1285 (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE))
1290 static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
1292 drm_buffer_manager_t *bm = &dev->bm;
1293 drm_mem_type_manager_t *man;
1294 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1295 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1298 uint32_t mem_type = 0;
1301 if (drm_bo_mem_compat(mem))
1304 BUG_ON(mem->mm_node);
1306 for (i = 0; i < num_prios; ++i) {
1307 mem_type = prios[i];
1308 man = &bm->man[mem_type];
1309 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1316 mem->mm_node = NULL;
1317 mem->mem_type = mem_type;
1318 mem->flags = cur_flags;
1319 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1323 DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask);
1331 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1332 int move_unfenced, int no_wait)
1334 drm_device_t *dev = bo->dev;
1335 drm_buffer_manager_t *bm = &dev->bm;
1336 drm_bo_driver_t *driver = dev->driver->bo_driver;
1339 DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask,
1342 driver->fence_type(bo->mem.mask, &bo->fence_class, &bo->fence_type);
1344 DRM_ERROR("Driver did not support given buffer permissions\n");
1348 ret = drm_bo_wait_unmapped(bo, no_wait);
1352 if (bo->type == drm_bo_type_fake) {
1353 ret = drm_bo_check_fake(dev, &bo->mem);
1359 * Check whether we need to move buffer.
1362 if (!drm_bo_mem_compat(&bo->mem)) {
1363 ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE,
1364 no_wait, move_unfenced);
1367 DRM_ERROR("Failed moving buffer.\n");
1376 if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1378 bo->pinned_mem_type = bo->mem.mem_type;
1379 mutex_lock(&dev->struct_mutex);
1380 list_del_init(&bo->pinned_lru);
1381 drm_bo_add_to_pinned_lru(bo);
1383 if (bo->pinned_node != bo->mem.mm_node) {
1384 drm_mm_put_block(bo->pinned_node);
1385 bo->pinned_node = bo->mem.mm_node;
1388 mutex_unlock(&dev->struct_mutex);
1390 } else if (bo->pinned_node != NULL) {
1392 mutex_lock(&dev->struct_mutex);
1393 drm_mm_put_block(bo->pinned_node);
1394 list_del_init(&bo->pinned_lru);
1395 bo->pinned_node = NULL;
1396 mutex_unlock(&dev->struct_mutex);
1401 * We might need to add a TTM.
1404 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1405 ret = drm_bo_add_ttm(bo);
1409 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1412 * Finally, adjust lru to be sure.
1415 mutex_lock(&dev->struct_mutex);
1417 if (move_unfenced) {
1418 list_add_tail(&bo->lru, &bm->unfenced);
1419 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1420 _DRM_BO_FLAG_UNFENCED);
1422 drm_bo_add_to_lru(bo);
1423 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1424 DRM_WAKEUP(&bo->event_queue);
1425 DRM_FLAG_MASKED(bo->priv_flags, 0,
1426 _DRM_BO_FLAG_UNFENCED);
1429 mutex_unlock(&dev->struct_mutex);
1434 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1435 uint32_t flags, uint32_t mask, uint32_t hint,
1436 drm_bo_arg_reply_t * rep)
1438 drm_buffer_object_t *bo;
1440 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1442 bo = drm_lookup_buffer_object(priv, handle, 1);
1447 mutex_lock(&bo->mutex);
1448 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1453 DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1454 ret = drm_bo_new_mask(bo, flags, hint);
1459 drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE),
1461 drm_bo_fill_rep_arg(bo, rep);
1465 mutex_unlock(&bo->mutex);
1467 drm_bo_usage_deref_unlocked(bo);
1471 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1472 drm_bo_arg_reply_t * rep)
1474 drm_buffer_object_t *bo;
1476 bo = drm_lookup_buffer_object(priv, handle, 1);
1480 mutex_lock(&bo->mutex);
1481 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1482 (void)drm_bo_busy(bo);
1483 drm_bo_fill_rep_arg(bo, rep);
1484 mutex_unlock(&bo->mutex);
1485 drm_bo_usage_deref_unlocked(bo);
1489 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1490 uint32_t hint, drm_bo_arg_reply_t * rep)
1492 drm_buffer_object_t *bo;
1493 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1496 bo = drm_lookup_buffer_object(priv, handle, 1);
1501 mutex_lock(&bo->mutex);
1502 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1505 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1509 drm_bo_fill_rep_arg(bo, rep);
1512 mutex_unlock(&bo->mutex);
1513 drm_bo_usage_deref_unlocked(bo);
1517 int drm_buffer_object_create(drm_file_t * priv,
1522 uint32_t page_alignment,
1523 unsigned long buffer_start,
1524 drm_buffer_object_t ** buf_obj)
1526 drm_device_t *dev = priv->head->dev;
1527 drm_buffer_manager_t *bm = &dev->bm;
1528 drm_buffer_object_t *bo;
1530 unsigned long num_pages;
1532 if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1533 DRM_ERROR("Invalid buffer object start.\n");
1536 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1537 if (num_pages == 0) {
1538 DRM_ERROR("Illegal buffer object size.\n");
1542 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1547 mutex_init(&bo->mutex);
1548 mutex_lock(&bo->mutex);
1550 atomic_set(&bo->usage, 1);
1551 atomic_set(&bo->mapped, -1);
1552 DRM_INIT_WAITQUEUE(&bo->event_queue);
1553 INIT_LIST_HEAD(&bo->lru);
1554 INIT_LIST_HEAD(&bo->pinned_lru);
1555 INIT_LIST_HEAD(&bo->ddestroy);
1556 #ifdef DRM_ODD_MM_COMPAT
1557 INIT_LIST_HEAD(&bo->p_mm_list);
1558 INIT_LIST_HEAD(&bo->vma_list);
1562 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1563 bo->mem.num_pages = num_pages;
1564 bo->mem.mm_node = NULL;
1565 bo->mem.page_alignment = page_alignment;
1566 if (bo->type == drm_bo_type_fake) {
1567 bo->offset = buffer_start;
1568 bo->buffer_start = 0;
1570 bo->buffer_start = buffer_start;
1575 atomic_inc(&bm->count);
1576 ret = drm_bo_new_mask(bo, mask, hint);
1581 if (bo->type == drm_bo_type_dc) {
1582 mutex_lock(&dev->struct_mutex);
1583 ret = drm_bo_setup_vm_locked(bo);
1584 mutex_unlock(&dev->struct_mutex);
1588 ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1592 mutex_unlock(&bo->mutex);
1597 mutex_unlock(&bo->mutex);
1599 drm_bo_usage_deref_unlocked(bo);
1603 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1606 drm_device_t *dev = priv->head->dev;
1609 mutex_lock(&dev->struct_mutex);
1610 ret = drm_add_user_object(priv, &bo->base, shareable);
1614 bo->base.remove = drm_bo_base_deref_locked;
1615 bo->base.type = drm_buffer_type;
1616 bo->base.ref_struct_locked = NULL;
1617 bo->base.unref = drm_buffer_user_object_unmap;
1620 mutex_unlock(&dev->struct_mutex);
1624 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1626 LOCK_TEST_WITH_RETURN(dev, filp);
1630 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1634 drm_bo_arg_request_t *req = &arg.d.req;
1635 drm_bo_arg_reply_t rep;
1637 drm_user_object_t *uo;
1638 drm_buffer_object_t *entry;
1640 if (!dev->bm.initialized) {
1641 DRM_ERROR("Buffer object manager is not initialized.\n");
1646 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1657 drm_buffer_object_create(priv, req->size,
1661 req->page_alignment,
1662 req->buffer_start, &entry);
1667 drm_bo_add_user_object(priv, entry,
1670 DRM_BO_FLAG_SHAREABLE);
1672 drm_bo_usage_deref_unlocked(entry);
1677 mutex_lock(&entry->mutex);
1678 drm_bo_fill_rep_arg(entry, &rep);
1679 mutex_unlock(&entry->mutex);
1682 rep.ret = drm_buffer_object_unmap(priv, req->handle);
1685 rep.ret = drm_buffer_object_map(priv, req->handle,
1689 case drm_bo_destroy:
1690 mutex_lock(&dev->struct_mutex);
1691 uo = drm_lookup_user_object(priv, req->handle);
1692 if (!uo || (uo->type != drm_buffer_type)
1693 || uo->owner != priv) {
1694 mutex_unlock(&dev->struct_mutex);
1698 rep.ret = drm_remove_user_object(priv, uo);
1699 mutex_unlock(&dev->struct_mutex);
1701 case drm_bo_reference:
1702 rep.ret = drm_user_object_ref(priv, req->handle,
1703 drm_buffer_type, &uo);
1706 mutex_lock(&dev->struct_mutex);
1707 uo = drm_lookup_user_object(priv, req->handle);
1709 drm_user_object_entry(uo, drm_buffer_object_t,
1711 atomic_dec(&entry->usage);
1712 mutex_unlock(&dev->struct_mutex);
1713 mutex_lock(&entry->mutex);
1714 drm_bo_fill_rep_arg(entry, &rep);
1715 mutex_unlock(&entry->mutex);
1717 case drm_bo_unreference:
1718 rep.ret = drm_user_object_unref(priv, req->handle,
1721 case drm_bo_validate:
1722 rep.ret = drm_bo_lock_test(dev, filp);
1727 drm_bo_handle_validate(priv, req->handle, req->mask,
1728 req->arg_handle, req->hint,
1732 rep.ret = drm_bo_lock_test(dev, filp);
1737 rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1739 case drm_bo_wait_idle:
1740 rep.ret = drm_bo_handle_wait(priv, req->handle,
1743 case drm_bo_ref_fence:
1745 DRM_ERROR("Function is not implemented yet.\n");
1752 * A signal interrupted us. Make sure the ioctl is restartable.
1755 if (rep.ret == -EAGAIN)
1760 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1766 static int drm_bo_leave_list(drm_buffer_object_t * bo,
1768 int free_pinned, int allow_errors)
1770 drm_device_t *dev = bo->dev;
1773 atomic_inc(&bo->usage);
1774 mutex_unlock(&dev->struct_mutex);
1775 mutex_lock(&bo->mutex);
1777 ret = drm_bo_expire_fence(bo, allow_errors);
1781 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1784 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1785 mutex_lock(&dev->struct_mutex);
1786 list_del_init(&bo->pinned_lru);
1787 if (bo->pinned_node == bo->mem.mm_node)
1788 bo->pinned_node = NULL;
1789 if (bo->pinned_node != NULL) {
1790 drm_mm_put_block(bo->pinned_node);
1791 bo->pinned_node = NULL;
1793 mutex_unlock(&dev->struct_mutex);
1796 if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1797 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1798 "cleanup. Removing flag and evicting.\n");
1799 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1800 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1803 if (bo->mem.mem_type == mem_type)
1804 ret = drm_bo_evict(bo, mem_type, 0);
1811 DRM_ERROR("Cleanup eviction failed\n");
1816 mutex_unlock(&bo->mutex);
1817 mutex_lock(&dev->struct_mutex);
1818 drm_bo_usage_deref_locked(bo);
1823 * dev->struct_sem locked.
1826 static int drm_bo_force_list_clean(drm_device_t * dev,
1827 struct list_head *head,
1830 int allow_errors, int pinned_list)
1832 struct list_head *list, *next;
1833 drm_buffer_object_t *entry;
1839 * restart if a node disappears from under us.
1840 * Nodes cannot be added since the hardware lock is needed
1841 * For this operation.
1845 list_for_each_safe(list, next, head) {
1847 entry = list_entry(list, drm_buffer_object_t,
1850 entry = list_entry(list, drm_buffer_object_t, lru);
1851 atomic_inc(&entry->usage);
1852 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
1855 do_retry = list->next != next;
1856 drm_bo_usage_deref_locked(entry);
1867 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1869 drm_buffer_manager_t *bm = &dev->bm;
1870 drm_mem_type_manager_t *man = &bm->man[mem_type];
1873 if (mem_type >= DRM_BO_MEM_TYPES) {
1874 DRM_ERROR("Illegal memory type %d\n", mem_type);
1878 if (!man->has_type) {
1879 DRM_ERROR("Trying to take down uninitialized "
1880 "memory manager type\n");
1889 drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0, 0);
1890 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
1891 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
1893 if (drm_mm_clean(&man->manager)) {
1894 drm_mm_takedown(&man->manager);
1904 *Evict all buffers of a particular mem_type, but leave memory manager
1905 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
1906 *point since we have the hardware lock.
1909 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1912 drm_buffer_manager_t *bm = &dev->bm;
1913 drm_mem_type_manager_t *man = &bm->man[mem_type];
1915 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1916 DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
1920 ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1, 0);
1923 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
1928 static int drm_bo_init_mm(drm_device_t * dev,
1930 unsigned long p_offset, unsigned long p_size)
1932 drm_buffer_manager_t *bm = &dev->bm;
1934 drm_mem_type_manager_t *man;
1936 if (type >= DRM_BO_MEM_TYPES) {
1937 DRM_ERROR("Illegal memory type %d\n", type);
1941 man = &bm->man[type];
1942 if (man->has_type) {
1943 DRM_ERROR("Memory manager already initialized for type %d\n",
1948 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
1953 if (type != DRM_BO_MEM_LOCAL) {
1955 DRM_ERROR("Zero size memory manager type %d\n", type);
1958 ret = drm_mm_init(&man->manager, p_offset, p_size);
1965 INIT_LIST_HEAD(&man->lru);
1966 INIT_LIST_HEAD(&man->pinned);
1972 * This is called from lastclose, so we don't need to bother about
1973 * any clients still running when we set the initialized flag to zero.
1976 int drm_bo_driver_finish(drm_device_t * dev)
1978 drm_buffer_manager_t *bm = &dev->bm;
1980 unsigned i = DRM_BO_MEM_TYPES;
1981 drm_mem_type_manager_t *man;
1983 mutex_lock(&dev->bm.init_mutex);
1984 mutex_lock(&dev->struct_mutex);
1986 if (!bm->initialized)
1988 bm->initialized = 0;
1992 if (man->has_type) {
1994 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
1996 DRM_ERROR("DRM memory manager type %d "
1997 "is not clean.\n", i);
2002 mutex_unlock(&dev->struct_mutex);
2004 if (!cancel_delayed_work(&bm->wq)) {
2005 flush_scheduled_work();
2007 mutex_lock(&dev->struct_mutex);
2008 drm_bo_delayed_delete(dev, 1);
2009 if (list_empty(&bm->ddestroy)) {
2010 DRM_DEBUG("Delayed destroy list was clean\n");
2012 if (list_empty(&bm->man[0].lru)) {
2013 DRM_DEBUG("Swap list was clean\n");
2015 if (list_empty(&bm->man[0].pinned)) {
2016 DRM_DEBUG("NO_MOVE list was clean\n");
2018 if (list_empty(&bm->unfenced)) {
2019 DRM_DEBUG("Unfenced list was clean\n");
2022 mutex_unlock(&dev->struct_mutex);
2023 mutex_unlock(&dev->bm.init_mutex);
2027 int drm_bo_driver_init(drm_device_t * dev)
2029 drm_bo_driver_t *driver = dev->driver->bo_driver;
2030 drm_buffer_manager_t *bm = &dev->bm;
2033 mutex_lock(&dev->bm.init_mutex);
2034 mutex_lock(&dev->struct_mutex);
2039 * Initialize the system memory buffer type.
2040 * Other types need to be driver / IOCTL initialized.
2043 ret = drm_bo_init_mm(dev, 0, 0, 0);
2047 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2048 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2050 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2052 bm->initialized = 1;
2054 atomic_set(&bm->count, 0);
2056 INIT_LIST_HEAD(&bm->unfenced);
2057 INIT_LIST_HEAD(&bm->ddestroy);
2059 mutex_unlock(&dev->struct_mutex);
2060 mutex_unlock(&dev->bm.init_mutex);
2064 EXPORT_SYMBOL(drm_bo_driver_init);
2066 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
2071 drm_mm_init_arg_t arg;
2072 drm_buffer_manager_t *bm = &dev->bm;
2073 drm_bo_driver_t *driver = dev->driver->bo_driver;
2076 DRM_ERROR("Buffer objects are not supported by this driver\n");
2080 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2082 switch (arg.req.op) {
2085 mutex_lock(&dev->bm.init_mutex);
2086 mutex_lock(&dev->struct_mutex);
2087 if (!bm->initialized) {
2088 DRM_ERROR("DRM memory manager was not initialized.\n");
2091 if (arg.req.mem_type == 0) {
2093 ("System memory buffers already initialized.\n");
2096 ret = drm_bo_init_mm(dev, arg.req.mem_type,
2097 arg.req.p_offset, arg.req.p_size);
2100 LOCK_TEST_WITH_RETURN(dev, filp);
2101 mutex_lock(&dev->bm.init_mutex);
2102 mutex_lock(&dev->struct_mutex);
2104 if (!bm->initialized) {
2105 DRM_ERROR("DRM memory manager was not initialized\n");
2108 if (arg.req.mem_type == 0) {
2109 DRM_ERROR("No takedown for System memory buffers.\n");
2113 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
2114 DRM_ERROR("Memory manager type %d not clean. "
2115 "Delaying takedown\n", arg.req.mem_type);
2119 LOCK_TEST_WITH_RETURN(dev, filp);
2120 mutex_lock(&dev->bm.init_mutex);
2121 mutex_lock(&dev->struct_mutex);
2122 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
2125 LOCK_TEST_WITH_RETURN(dev, filp);
2126 mutex_lock(&dev->bm.init_mutex);
2127 mutex_lock(&dev->struct_mutex);
2131 DRM_ERROR("Function not implemented yet\n");
2135 mutex_unlock(&dev->struct_mutex);
2136 mutex_unlock(&dev->bm.init_mutex);
2140 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2145 * buffer object vm functions.
2148 int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem)
2150 drm_buffer_manager_t *bm = &dev->bm;
2151 drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2153 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2154 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2157 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2160 if (mem->flags & DRM_BO_FLAG_CACHED)
2166 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2169 * \c Get the PCI offset for the buffer object memory.
2171 * \param bo The buffer object.
2172 * \param bus_base On return the base of the PCI region
2173 * \param bus_offset On return the byte offset into the PCI region
2174 * \param bus_size On return the byte size of the buffer object or zero if
2175 * the buffer object memory is not accessible through a PCI region.
2176 * \return Failure indication.
2178 * Returns -EINVAL if the buffer object is currently not mappable.
2179 * Otherwise returns zero.
2182 int drm_bo_pci_offset(drm_device_t * dev,
2183 drm_bo_mem_reg_t * mem,
2184 unsigned long *bus_base,
2185 unsigned long *bus_offset, unsigned long *bus_size)
2187 drm_buffer_manager_t *bm = &dev->bm;
2188 drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2191 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2194 if (drm_mem_reg_is_pci(dev, mem)) {
2195 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2196 *bus_size = mem->num_pages << PAGE_SHIFT;
2197 *bus_base = man->io_offset;
2204 * \c Kill all user-space virtual mappings of this buffer object.
2206 * \param bo The buffer object.
2208 * Call bo->mutex locked.
2211 void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
2213 drm_device_t *dev = bo->dev;
2214 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2215 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2217 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2220 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
2222 drm_map_list_t *list = &bo->map_list;
2223 drm_local_map_t *map;
2224 drm_device_t *dev = bo->dev;
2226 if (list->user_token) {
2227 drm_ht_remove_item(&dev->map_hash, &list->hash);
2228 list->user_token = 0;
2230 if (list->file_offset_node) {
2231 drm_mm_put_block(list->file_offset_node);
2232 list->file_offset_node = NULL;
2239 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2241 list->user_token = 0ULL;
2242 drm_bo_usage_deref_locked(bo);
2245 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
2247 drm_map_list_t *list = &bo->map_list;
2248 drm_local_map_t *map;
2249 drm_device_t *dev = bo->dev;
2251 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2257 map->type = _DRM_TTM;
2258 map->flags = _DRM_REMOVABLE;
2259 map->size = bo->mem.num_pages * PAGE_SIZE;
2260 atomic_inc(&bo->usage);
2261 map->handle = (void *)bo;
2263 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2264 bo->mem.num_pages, 0, 0);
2266 if (!list->file_offset_node) {
2267 drm_bo_takedown_vm_locked(bo);
2271 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2272 bo->mem.num_pages, 0);
2274 list->hash.key = list->file_offset_node->start;
2275 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2276 drm_bo_takedown_vm_locked(bo);
2280 list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;