1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44 * both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47 * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48 * traversal will, in general, need to be restarted.
52 static void drm_bo_destroy_locked(drm_buffer_object_t * bo);
53 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo);
54 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo);
55 static void drm_bo_unmap_virtual(drm_buffer_object_t * bo);
57 static inline uint32_t drm_bo_type_flags(unsigned type)
59 return (1 << (24 + type));
63 * bo locked. dev->struct_mutex locked.
66 void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
68 drm_mem_type_manager_t *man;
70 man = &bo->dev->bm.man[bo->pinned_mem_type];
71 list_add_tail(&bo->pinned_lru, &man->pinned);
74 void drm_bo_add_to_lru(drm_buffer_object_t * bo)
76 drm_mem_type_manager_t *man;
78 if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
79 || bo->mem.mem_type != bo->pinned_mem_type) {
80 man = &bo->dev->bm.man[bo->mem.mem_type];
81 list_add_tail(&bo->lru, &man->lru);
83 INIT_LIST_HEAD(&bo->lru);
87 static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
89 #ifdef DRM_ODD_MM_COMPAT
92 if (!bo->map_list.map)
95 ret = drm_bo_lock_kmm(bo);
98 drm_bo_unmap_virtual(bo);
100 drm_bo_finish_unmap(bo);
102 if (!bo->map_list.map)
105 drm_bo_unmap_virtual(bo);
110 static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
112 #ifdef DRM_ODD_MM_COMPAT
115 if (!bo->map_list.map)
118 ret = drm_bo_remap_bound(bo);
120 DRM_ERROR("Failed to remap a bound buffer object.\n"
121 "\tThis might cause a sigbus later.\n");
123 drm_bo_unlock_kmm(bo);
128 * Call bo->mutex locked.
131 static int drm_bo_add_ttm(drm_buffer_object_t * bo)
133 drm_device_t *dev = bo->dev;
139 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
143 case drm_bo_type_kernel:
144 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
148 case drm_bo_type_user:
149 case drm_bo_type_fake:
152 DRM_ERROR("Illegal buffer object type\n");
160 static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
161 drm_bo_mem_reg_t * mem,
162 int evict, int no_wait)
164 drm_device_t *dev = bo->dev;
165 drm_buffer_manager_t *bm = &dev->bm;
166 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
167 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
168 drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
169 drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
172 if (old_is_pci || new_is_pci)
173 ret = drm_bo_vm_pre_move(bo, old_is_pci);
178 * Create and bind a ttm if required.
181 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
182 ret = drm_bo_add_ttm(bo);
186 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
187 ret = drm_bind_ttm(bo->ttm, new_man->flags &
189 mem->mm_node->start);
195 if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
197 drm_bo_mem_reg_t *old_mem = &bo->mem;
198 uint64_t save_flags = old_mem->flags;
199 uint64_t save_mask = old_mem->mask;
203 old_mem->mask = save_mask;
204 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
206 } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
207 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
209 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
211 } else if (dev->driver->bo_driver->move) {
212 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
216 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
223 if (old_is_pci || new_is_pci)
224 drm_bo_vm_post_move(bo);
226 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
228 dev->driver->bo_driver->invalidate_caches(dev,
231 DRM_ERROR("Can not flush read caches\n");
234 DRM_FLAG_MASKED(bo->priv_flags,
235 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
236 _DRM_BO_FLAG_EVICTED);
239 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
244 if (old_is_pci || new_is_pci)
245 drm_bo_vm_post_move(bo);
247 new_man = &bm->man[bo->mem.mem_type];
248 if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
249 drm_ttm_unbind(bo->ttm);
250 drm_destroy_ttm(bo->ttm);
258 * Call bo->mutex locked.
259 * Wait until the buffer is idle.
262 int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
266 drm_fence_object_t *fence = bo->fence;
270 drm_device_t *dev = bo->dev;
271 if (drm_fence_object_signaled(fence, bo->fence_type)) {
272 drm_fence_usage_deref_unlocked(dev, fence);
280 drm_fence_object_wait(dev, fence, lazy, ignore_signals,
285 drm_fence_usage_deref_unlocked(dev, fence);
292 static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
294 drm_device_t *dev = bo->dev;
295 drm_buffer_manager_t *bm = &dev->bm;
299 unsigned long _end = jiffies + 3 * DRM_HZ;
302 ret = drm_bo_wait(bo, 0, 1, 0);
303 if (ret && allow_errors)
306 } while (ret && !time_after_eq(jiffies, _end));
310 DRM_ERROR("Detected GPU lockup or "
311 "fence driver was taken down. "
312 "Evicting buffer.\n");
316 drm_fence_usage_deref_unlocked(dev, bo->fence);
324 * Call dev->struct_mutex locked.
325 * Attempts to remove all private references to a buffer by expiring its
326 * fence object and removing from lru lists and memory managers.
329 static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
331 drm_device_t *dev = bo->dev;
332 drm_buffer_manager_t *bm = &dev->bm;
334 atomic_inc(&bo->usage);
335 mutex_unlock(&dev->struct_mutex);
336 mutex_lock(&bo->mutex);
338 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
340 if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) {
341 drm_fence_usage_deref_locked(dev, bo->fence);
345 if (bo->fence && remove_all)
346 (void)drm_bo_expire_fence(bo, 0);
348 mutex_lock(&dev->struct_mutex);
350 if (!atomic_dec_and_test(&bo->usage)) {
355 list_del_init(&bo->lru);
356 if (bo->mem.mm_node) {
357 drm_mm_put_block(bo->mem.mm_node);
358 if (bo->pinned_node == bo->mem.mm_node)
359 bo->pinned_node = NULL;
360 bo->mem.mm_node = NULL;
362 list_del_init(&bo->pinned_lru);
363 if (bo->pinned_node) {
364 drm_mm_put_block(bo->pinned_node);
365 bo->pinned_node = NULL;
367 list_del_init(&bo->ddestroy);
368 mutex_unlock(&bo->mutex);
369 drm_bo_destroy_locked(bo);
373 if (list_empty(&bo->ddestroy)) {
374 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
375 list_add_tail(&bo->ddestroy, &bm->ddestroy);
376 schedule_delayed_work(&bm->wq,
377 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
381 mutex_unlock(&bo->mutex);
386 * Verify that refcount is 0 and that there are no internal references
387 * to the buffer object. Then destroy it.
390 static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
392 drm_device_t *dev = bo->dev;
393 drm_buffer_manager_t *bm = &dev->bm;
395 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
396 list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
397 list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
398 if (bo->fence != NULL) {
399 DRM_ERROR("Fence was non-zero.\n");
400 drm_bo_cleanup_refs(bo, 0);
404 #ifdef DRM_ODD_MM_COMPAT
405 BUG_ON(!list_empty(&bo->vma_list));
406 BUG_ON(!list_empty(&bo->p_mm_list));
410 drm_ttm_unbind(bo->ttm);
411 drm_destroy_ttm(bo->ttm);
415 atomic_dec(&bm->count);
417 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
423 * Some stuff is still trying to reference the buffer object.
424 * Get rid of those references.
427 drm_bo_cleanup_refs(bo, 0);
433 * Call dev->struct_mutex locked.
436 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
438 drm_buffer_manager_t *bm = &dev->bm;
440 drm_buffer_object_t *entry, *nentry;
441 struct list_head *list, *next;
443 list_for_each_safe(list, next, &bm->ddestroy) {
444 entry = list_entry(list, drm_buffer_object_t, ddestroy);
447 if (next != &bm->ddestroy) {
448 nentry = list_entry(next, drm_buffer_object_t,
450 atomic_inc(&nentry->usage);
453 drm_bo_cleanup_refs(entry, remove_all);
456 atomic_dec(&nentry->usage);
461 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
462 static void drm_bo_delayed_workqueue(void *data)
464 static void drm_bo_delayed_workqueue(struct work_struct *work)
467 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
468 drm_device_t *dev = (drm_device_t *) data;
469 drm_buffer_manager_t *bm = &dev->bm;
471 drm_buffer_manager_t *bm =
472 container_of(work, drm_buffer_manager_t, wq.work);
473 drm_device_t *dev = container_of(bm, drm_device_t, bm);
476 DRM_DEBUG("Delayed delete Worker\n");
478 mutex_lock(&dev->struct_mutex);
479 if (!bm->initialized) {
480 mutex_unlock(&dev->struct_mutex);
483 drm_bo_delayed_delete(dev, 0);
484 if (bm->initialized && !list_empty(&bm->ddestroy)) {
485 schedule_delayed_work(&bm->wq,
486 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
488 mutex_unlock(&dev->struct_mutex);
491 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
493 if (atomic_dec_and_test(&bo->usage)) {
494 drm_bo_destroy_locked(bo);
498 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
500 drm_buffer_object_t *bo =
501 drm_user_object_entry(uo, drm_buffer_object_t, base);
503 drm_bo_takedown_vm_locked(bo);
504 drm_bo_usage_deref_locked(bo);
507 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
509 drm_device_t *dev = bo->dev;
511 if (atomic_dec_and_test(&bo->usage)) {
512 mutex_lock(&dev->struct_mutex);
513 if (atomic_read(&bo->usage) == 0)
514 drm_bo_destroy_locked(bo);
515 mutex_unlock(&dev->struct_mutex);
520 * Note. The caller has to register (if applicable)
521 * and deregister fence object usage.
524 int drm_fence_buffer_objects(drm_file_t * priv,
525 struct list_head *list,
526 uint32_t fence_flags,
527 drm_fence_object_t * fence,
528 drm_fence_object_t ** used_fence)
530 drm_device_t *dev = priv->head->dev;
531 drm_buffer_manager_t *bm = &dev->bm;
533 drm_buffer_object_t *entry;
534 uint32_t fence_type = 0;
540 mutex_lock(&dev->struct_mutex);
543 list = &bm->unfenced;
545 list_for_each_entry(entry, list, lru) {
546 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
547 fence_type |= entry->fence_type;
548 if (entry->fence_class != 0) {
549 DRM_ERROR("Fence class %d is not implemented yet.\n",
563 * Transfer to a local list before we release the dev->struct_mutex;
564 * This is so we don't get any new unfenced objects while fencing
565 * the ones we already have..
568 list_splice_init(list, &f_list);
571 if ((fence_type & fence->type) != fence_type) {
572 DRM_ERROR("Given fence doesn't match buffers "
573 "on unfenced list.\n");
578 mutex_unlock(&dev->struct_mutex);
579 ret = drm_fence_object_create(dev, 0, fence_type,
580 fence_flags | DRM_FENCE_FLAG_EMIT,
582 mutex_lock(&dev->struct_mutex);
589 while (l != &f_list) {
591 entry = list_entry(l, drm_buffer_object_t, lru);
592 atomic_inc(&entry->usage);
593 mutex_unlock(&dev->struct_mutex);
594 mutex_lock(&entry->mutex);
595 mutex_lock(&dev->struct_mutex);
597 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
600 drm_fence_usage_deref_locked(dev, entry->fence);
601 entry->fence = fence;
602 DRM_FLAG_MASKED(entry->priv_flags, 0,
603 _DRM_BO_FLAG_UNFENCED);
604 DRM_WAKEUP(&entry->event_queue);
605 drm_bo_add_to_lru(entry);
607 mutex_unlock(&entry->mutex);
608 drm_bo_usage_deref_locked(entry);
611 atomic_add(count, &fence->usage);
612 DRM_DEBUG("Fenced %d buffers\n", count);
614 mutex_unlock(&dev->struct_mutex);
619 EXPORT_SYMBOL(drm_fence_buffer_objects);
625 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
629 drm_device_t *dev = bo->dev;
630 drm_bo_mem_reg_t evict_mem;
633 * Someone might have modified the buffer before we took the buffer mutex.
636 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
638 if (bo->mem.mem_type != mem_type)
641 ret = drm_bo_wait(bo, 0, 0, no_wait);
643 if (ret && ret != -EAGAIN) {
644 DRM_ERROR("Failed to expire fence before "
645 "buffer eviction.\n");
650 evict_mem.mm_node = NULL;
652 if (bo->type == drm_bo_type_fake) {
653 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
654 bo->mem.mm_node = NULL;
659 evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
660 ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
664 DRM_ERROR("Failed to find memory space for "
665 "buffer 0x%p eviction.\n", bo);
669 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
673 DRM_ERROR("Buffer eviction failed\n");
678 mutex_lock(&dev->struct_mutex);
679 if (evict_mem.mm_node) {
680 if (evict_mem.mm_node != bo->pinned_node)
681 drm_mm_put_block(evict_mem.mm_node);
682 evict_mem.mm_node = NULL;
685 drm_bo_add_to_lru(bo);
686 mutex_unlock(&dev->struct_mutex);
688 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
689 _DRM_BO_FLAG_EVICTED);
695 static int drm_bo_mem_force_space(drm_device_t * dev,
696 drm_bo_mem_reg_t * mem,
697 uint32_t mem_type, int no_wait)
700 drm_buffer_manager_t *bm = &dev->bm;
701 drm_buffer_object_t *entry;
702 drm_mem_type_manager_t *man = &bm->man[mem_type];
703 struct list_head *lru;
704 unsigned long num_pages = mem->num_pages;
707 mutex_lock(&dev->struct_mutex);
709 node = drm_mm_search_free(&man->manager, num_pages,
710 mem->page_alignment, 1);
715 if (lru->next == lru)
718 entry = list_entry(lru->next, drm_buffer_object_t, lru);
719 atomic_inc(&entry->usage);
720 mutex_unlock(&dev->struct_mutex);
721 mutex_lock(&entry->mutex);
722 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
724 ret = drm_bo_evict(entry, mem_type, no_wait);
725 mutex_unlock(&entry->mutex);
726 drm_bo_usage_deref_unlocked(entry);
729 mutex_lock(&dev->struct_mutex);
733 mutex_unlock(&dev->struct_mutex);
737 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
738 mutex_unlock(&dev->struct_mutex);
740 mem->mem_type = mem_type;
744 static int drm_bo_mt_compatible(drm_mem_type_manager_t * man,
746 uint32_t mask, uint32_t * res_mask)
748 uint32_t cur_flags = drm_bo_type_flags(mem_type);
751 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
752 cur_flags |= DRM_BO_FLAG_CACHED;
753 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
754 cur_flags |= DRM_BO_FLAG_MAPPABLE;
755 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
756 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
758 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
761 if (mem_type == DRM_BO_MEM_LOCAL) {
762 *res_mask = cur_flags;
766 flag_diff = (mask ^ cur_flags);
767 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
768 (!(mask & DRM_BO_FLAG_CACHED) ||
769 (mask & DRM_BO_FLAG_FORCE_CACHING)))
772 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
773 ((mask & DRM_BO_FLAG_MAPPABLE) ||
774 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
777 *res_mask = cur_flags;
781 int drm_bo_mem_space(drm_buffer_object_t * bo,
782 drm_bo_mem_reg_t * mem, int no_wait)
784 drm_device_t *dev = bo->dev;
785 drm_buffer_manager_t *bm = &dev->bm;
786 drm_mem_type_manager_t *man;
788 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
789 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
791 uint32_t mem_type = DRM_BO_MEM_LOCAL;
796 drm_mm_node_t *node = NULL;
800 for (i = 0; i < num_prios; ++i) {
802 man = &bm->man[mem_type];
804 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
810 if (mem_type == DRM_BO_MEM_LOCAL)
813 if ((mem_type == bo->pinned_mem_type) &&
814 (bo->pinned_node != NULL)) {
815 node = bo->pinned_node;
819 mutex_lock(&dev->struct_mutex);
820 if (man->has_type && man->use_type) {
822 node = drm_mm_search_free(&man->manager, mem->num_pages,
823 mem->page_alignment, 1);
825 node = drm_mm_get_block(node, mem->num_pages,
826 mem->page_alignment);
828 mutex_unlock(&dev->struct_mutex);
833 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
835 mem->mem_type = mem_type;
836 mem->flags = cur_flags;
843 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
844 prios = dev->driver->bo_driver->mem_busy_prio;
846 for (i = 0; i < num_prios; ++i) {
848 man = &bm->man[mem_type];
853 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
856 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
859 mem->flags = cur_flags;
867 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
871 EXPORT_SYMBOL(drm_bo_mem_space);
873 static int drm_bo_new_mask(drm_buffer_object_t * bo,
874 uint64_t new_mask, uint32_t hint)
878 if (bo->type == drm_bo_type_user) {
879 DRM_ERROR("User buffers are not supported yet\n");
882 if (bo->type == drm_bo_type_fake &&
883 !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
884 DRM_ERROR("Fake buffers must be pinned.\n");
888 if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
890 ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
895 new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
899 DRM_ERROR("Invalid buffer object rwx properties\n");
903 bo->mem.mask = new_mask;
908 * Call dev->struct_mutex locked.
911 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
912 uint32_t handle, int check_owner)
914 drm_user_object_t *uo;
915 drm_buffer_object_t *bo;
917 uo = drm_lookup_user_object(priv, handle);
919 if (!uo || (uo->type != drm_buffer_type)) {
920 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
924 if (check_owner && priv != uo->owner) {
925 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
929 bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
930 atomic_inc(&bo->usage);
935 * Call bo->mutex locked.
936 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
937 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
940 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
942 drm_fence_object_t *fence = bo->fence;
944 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
946 drm_device_t *dev = bo->dev;
947 if (drm_fence_object_signaled(fence, bo->fence_type)) {
948 drm_fence_usage_deref_unlocked(dev, fence);
958 * Call bo->mutex locked.
959 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
962 static int drm_bo_busy(drm_buffer_object_t * bo)
964 drm_fence_object_t *fence = bo->fence;
966 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
968 drm_device_t *dev = bo->dev;
969 if (drm_fence_object_signaled(fence, bo->fence_type)) {
970 drm_fence_usage_deref_unlocked(dev, fence);
974 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
975 if (drm_fence_object_signaled(fence, bo->fence_type)) {
976 drm_fence_usage_deref_unlocked(dev, fence);
985 static int drm_bo_read_cached(drm_buffer_object_t * bo)
989 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
991 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
996 * Wait until a buffer is unmapped.
999 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
1003 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1006 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1007 atomic_read(&bo->mapped) == -1);
1015 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
1019 mutex_lock(&bo->mutex);
1020 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1021 mutex_unlock(&bo->mutex);
1026 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1027 * Until then, we cannot really do anything with it except delete it.
1028 * The unfenced list is a PITA, and the operations
1030 * 2) submitting commands
1032 * Should really be an atomic operation.
1033 * We now "solve" this problem by keeping
1034 * the buffer "unfenced" after validating, but before fencing.
1037 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
1040 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1048 mutex_unlock(&bo->mutex);
1049 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1050 !drm_bo_check_unfenced(bo));
1051 mutex_lock(&bo->mutex);
1054 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1056 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1066 * Fill in the ioctl reply argument with buffer info.
1070 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
1071 struct drm_bo_info_rep *rep)
1073 rep->handle = bo->base.hash.key;
1074 rep->flags = bo->mem.flags;
1075 rep->size = bo->mem.num_pages * PAGE_SIZE;
1076 rep->offset = bo->offset;
1077 rep->arg_handle = bo->map_list.user_token;
1078 rep->mask = bo->mem.mask;
1079 rep->buffer_start = bo->buffer_start;
1080 rep->fence_flags = bo->fence_type;
1082 rep->page_alignment = bo->mem.page_alignment;
1084 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1085 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1091 * Wait for buffer idle and register that we've mapped the buffer.
1092 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1093 * so that if the client dies, the mapping is automatically
1097 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1098 uint32_t map_flags, unsigned hint,
1099 struct drm_bo_info_rep *rep)
1101 drm_buffer_object_t *bo;
1102 drm_device_t *dev = priv->head->dev;
1104 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1106 mutex_lock(&dev->struct_mutex);
1107 bo = drm_lookup_buffer_object(priv, handle, 1);
1108 mutex_unlock(&dev->struct_mutex);
1113 mutex_lock(&bo->mutex);
1114 if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1115 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1121 * If this returns true, we are currently unmapped.
1122 * We need to do this test, because unmapping can
1123 * be done without the bo->mutex held.
1127 if (atomic_inc_and_test(&bo->mapped)) {
1128 if (no_wait && drm_bo_busy(bo)) {
1129 atomic_dec(&bo->mapped);
1133 ret = drm_bo_wait(bo, 0, 0, no_wait);
1135 atomic_dec(&bo->mapped);
1139 if ((map_flags & DRM_BO_FLAG_READ) &&
1140 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1141 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1142 drm_bo_read_cached(bo);
1145 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1146 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1147 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1150 * We are already mapped with different flags.
1151 * need to wait for unmap.
1154 ret = drm_bo_wait_unmapped(bo, no_wait);
1163 mutex_lock(&dev->struct_mutex);
1164 ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1165 mutex_unlock(&dev->struct_mutex);
1167 if (atomic_add_negative(-1, &bo->mapped))
1168 DRM_WAKEUP(&bo->event_queue);
1171 drm_bo_fill_rep_arg(bo, rep);
1173 mutex_unlock(&bo->mutex);
1174 drm_bo_usage_deref_unlocked(bo);
1178 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1180 drm_device_t *dev = priv->head->dev;
1181 drm_buffer_object_t *bo;
1182 drm_ref_object_t *ro;
1185 mutex_lock(&dev->struct_mutex);
1187 bo = drm_lookup_buffer_object(priv, handle, 1);
1193 ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1199 drm_remove_ref_object(priv, ro);
1200 drm_bo_usage_deref_locked(bo);
1202 mutex_unlock(&dev->struct_mutex);
1207 * Call struct-sem locked.
1210 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1211 drm_user_object_t * uo,
1214 drm_buffer_object_t *bo =
1215 drm_user_object_entry(uo, drm_buffer_object_t, base);
1218 * We DON'T want to take the bo->lock here, because we want to
1219 * hold it when we wait for unmapped buffer.
1222 BUG_ON(action != _DRM_REF_TYPE1);
1224 if (atomic_add_negative(-1, &bo->mapped))
1225 DRM_WAKEUP(&bo->event_queue);
1230 * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1233 int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1234 int no_wait, int move_unfenced)
1236 drm_device_t *dev = bo->dev;
1237 drm_buffer_manager_t *bm = &dev->bm;
1239 drm_bo_mem_reg_t mem;
1241 * Flush outstanding fences.
1247 * Wait for outstanding fences.
1250 ret = drm_bo_wait(bo, 0, 0, no_wait);
1254 mem.num_pages = bo->mem.num_pages;
1255 mem.size = mem.num_pages << PAGE_SHIFT;
1256 mem.mask = new_mem_flags;
1257 mem.page_alignment = bo->mem.page_alignment;
1259 mutex_lock(&bm->evict_mutex);
1260 mutex_lock(&dev->struct_mutex);
1262 list_add_tail(&bo->lru, &bm->unfenced);
1263 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1264 _DRM_BO_FLAG_UNFENCED);
1265 mutex_unlock(&dev->struct_mutex);
1268 * Determine where to move the buffer.
1270 ret = drm_bo_mem_space(bo, &mem, no_wait);
1274 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1277 if (ret || !move_unfenced) {
1278 mutex_lock(&dev->struct_mutex);
1280 if (mem.mm_node != bo->pinned_node)
1281 drm_mm_put_block(mem.mm_node);
1284 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1285 DRM_WAKEUP(&bo->event_queue);
1287 drm_bo_add_to_lru(bo);
1288 mutex_unlock(&dev->struct_mutex);
1291 mutex_unlock(&bm->evict_mutex);
1295 static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem)
1297 uint32_t flag_diff = (mem->mask ^ mem->flags);
1299 if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1301 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1302 (!(mem->mask & DRM_BO_FLAG_CACHED) ||
1303 (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1306 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1307 ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1308 (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1313 static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
1315 drm_buffer_manager_t *bm = &dev->bm;
1316 drm_mem_type_manager_t *man;
1317 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1318 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1321 uint32_t mem_type = 0;
1324 if (drm_bo_mem_compat(mem))
1327 BUG_ON(mem->mm_node);
1329 for (i = 0; i < num_prios; ++i) {
1330 mem_type = prios[i];
1331 man = &bm->man[mem_type];
1332 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1339 mem->mm_node = NULL;
1340 mem->mem_type = mem_type;
1341 mem->flags = cur_flags;
1342 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1346 DRM_ERROR("Illegal fake buffer flags 0x%016llx\n",
1347 (unsigned long long) mem->mask);
1355 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1356 uint32_t fence_class,
1357 int move_unfenced, int no_wait)
1359 drm_device_t *dev = bo->dev;
1360 drm_buffer_manager_t *bm = &dev->bm;
1361 drm_bo_driver_t *driver = dev->driver->bo_driver;
1365 DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
1366 (unsigned long long) bo->mem.mask,
1367 (unsigned long long) bo->mem.flags);
1369 ret = driver->fence_type(bo, &ftype);
1372 DRM_ERROR("Driver did not support given buffer permissions\n");
1377 * We're switching command submission mechanism,
1378 * or cannot simply rely on the hardware serializing for us.
1380 * Wait for buffer idle.
1383 if ((fence_class != bo->fence_class) ||
1384 ((ftype ^ bo->fence_type) & bo->fence_type)) {
1386 ret = drm_bo_wait(bo, 0, 0, no_wait);
1393 bo->fence_class = fence_class;
1394 bo->fence_type = ftype;
1395 ret = drm_bo_wait_unmapped(bo, no_wait);
1399 if (bo->type == drm_bo_type_fake) {
1400 ret = drm_bo_check_fake(dev, &bo->mem);
1406 * Check whether we need to move buffer.
1409 if (!drm_bo_mem_compat(&bo->mem)) {
1410 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1414 DRM_ERROR("Failed moving buffer.\n");
1423 if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1424 bo->pinned_mem_type = bo->mem.mem_type;
1425 mutex_lock(&dev->struct_mutex);
1426 list_del_init(&bo->pinned_lru);
1427 drm_bo_add_to_pinned_lru(bo);
1429 if (bo->pinned_node != bo->mem.mm_node) {
1430 if (bo->pinned_node != NULL)
1431 drm_mm_put_block(bo->pinned_node);
1432 bo->pinned_node = bo->mem.mm_node;
1435 mutex_unlock(&dev->struct_mutex);
1437 } else if (bo->pinned_node != NULL) {
1439 mutex_lock(&dev->struct_mutex);
1441 if (bo->pinned_node != bo->mem.mm_node)
1442 drm_mm_put_block(bo->pinned_node);
1444 list_del_init(&bo->pinned_lru);
1445 bo->pinned_node = NULL;
1446 mutex_unlock(&dev->struct_mutex);
1451 * We might need to add a TTM.
1454 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1455 ret = drm_bo_add_ttm(bo);
1459 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1462 * Finally, adjust lru to be sure.
1465 mutex_lock(&dev->struct_mutex);
1467 if (move_unfenced) {
1468 list_add_tail(&bo->lru, &bm->unfenced);
1469 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1470 _DRM_BO_FLAG_UNFENCED);
1472 drm_bo_add_to_lru(bo);
1473 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1474 DRM_WAKEUP(&bo->event_queue);
1475 DRM_FLAG_MASKED(bo->priv_flags, 0,
1476 _DRM_BO_FLAG_UNFENCED);
1479 mutex_unlock(&dev->struct_mutex);
1484 static int drm_bo_handle_validate(drm_file_t * priv,
1486 uint32_t fence_class,
1487 uint64_t flags, uint64_t mask, uint32_t hint,
1488 struct drm_bo_info_rep *rep)
1490 drm_buffer_object_t *bo;
1492 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1494 bo = drm_lookup_buffer_object(priv, handle, 1);
1499 mutex_lock(&bo->mutex);
1500 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1505 DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1506 ret = drm_bo_new_mask(bo, flags, hint);
1511 drm_buffer_object_validate(bo, fence_class,
1512 !(hint & DRM_BO_HINT_DONT_FENCE),
1514 drm_bo_fill_rep_arg(bo, rep);
1518 mutex_unlock(&bo->mutex);
1520 drm_bo_usage_deref_unlocked(bo);
1524 static int drm_bo_handle_info(drm_file_t *priv, uint32_t handle,
1525 struct drm_bo_info_rep *rep)
1527 drm_buffer_object_t *bo;
1529 bo = drm_lookup_buffer_object(priv, handle, 1);
1533 mutex_lock(&bo->mutex);
1534 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1535 (void)drm_bo_busy(bo);
1536 drm_bo_fill_rep_arg(bo, rep);
1537 mutex_unlock(&bo->mutex);
1538 drm_bo_usage_deref_unlocked(bo);
1542 static int drm_bo_handle_wait(drm_file_t *priv, uint32_t handle,
1544 struct drm_bo_info_rep *rep)
1546 drm_buffer_object_t *bo;
1547 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1550 bo = drm_lookup_buffer_object(priv, handle, 1);
1555 mutex_lock(&bo->mutex);
1556 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1559 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1563 drm_bo_fill_rep_arg(bo, rep);
1566 mutex_unlock(&bo->mutex);
1567 drm_bo_usage_deref_unlocked(bo);
1571 int drm_buffer_object_create(drm_device_t *dev,
1576 uint32_t page_alignment,
1577 unsigned long buffer_start,
1578 drm_buffer_object_t ** buf_obj)
1580 drm_buffer_manager_t *bm = &dev->bm;
1581 drm_buffer_object_t *bo;
1583 unsigned long num_pages;
1585 if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1586 DRM_ERROR("Invalid buffer object start.\n");
1589 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1590 if (num_pages == 0) {
1591 DRM_ERROR("Illegal buffer object size.\n");
1595 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1600 mutex_init(&bo->mutex);
1601 mutex_lock(&bo->mutex);
1603 atomic_set(&bo->usage, 1);
1604 atomic_set(&bo->mapped, -1);
1605 DRM_INIT_WAITQUEUE(&bo->event_queue);
1606 INIT_LIST_HEAD(&bo->lru);
1607 INIT_LIST_HEAD(&bo->pinned_lru);
1608 INIT_LIST_HEAD(&bo->ddestroy);
1609 #ifdef DRM_ODD_MM_COMPAT
1610 INIT_LIST_HEAD(&bo->p_mm_list);
1611 INIT_LIST_HEAD(&bo->vma_list);
1615 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1616 bo->mem.num_pages = num_pages;
1617 bo->mem.mm_node = NULL;
1618 bo->mem.page_alignment = page_alignment;
1619 if (bo->type == drm_bo_type_fake) {
1620 bo->offset = buffer_start;
1621 bo->buffer_start = 0;
1623 bo->buffer_start = buffer_start;
1626 bo->mem.flags = 0ULL;
1627 bo->mem.mask = 0ULL;
1628 atomic_inc(&bm->count);
1629 ret = drm_bo_new_mask(bo, mask, hint);
1634 if (bo->type == drm_bo_type_dc) {
1635 mutex_lock(&dev->struct_mutex);
1636 ret = drm_bo_setup_vm_locked(bo);
1637 mutex_unlock(&dev->struct_mutex);
1641 ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1645 mutex_unlock(&bo->mutex);
1650 mutex_unlock(&bo->mutex);
1652 drm_bo_usage_deref_unlocked(bo);
1656 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1659 drm_device_t *dev = priv->head->dev;
1662 mutex_lock(&dev->struct_mutex);
1663 ret = drm_add_user_object(priv, &bo->base, shareable);
1667 bo->base.remove = drm_bo_base_deref_locked;
1668 bo->base.type = drm_buffer_type;
1669 bo->base.ref_struct_locked = NULL;
1670 bo->base.unref = drm_buffer_user_object_unmap;
1673 mutex_unlock(&dev->struct_mutex);
1677 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1679 LOCK_TEST_WITH_RETURN(dev, filp);
1683 int drm_bo_op_ioctl(DRM_IOCTL_ARGS)
1686 struct drm_bo_op_arg arg;
1687 struct drm_bo_op_req *req = &arg.d.req;
1688 struct drm_bo_info_rep rep;
1692 if (!dev->bm.initialized) {
1693 DRM_ERROR("Buffer object manager is not initialized.\n");
1698 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1707 case drm_bo_validate:
1708 ret = drm_bo_lock_test(dev, filp);
1711 ret = drm_bo_handle_validate(priv, req->bo_req.handle,
1712 req->bo_req.fence_class,
1720 DRM_ERROR("Function is not implemented yet.\n");
1722 case drm_bo_ref_fence:
1724 DRM_ERROR("Function is not implemented yet.\n");
1732 * A signal interrupted us. Make sure the ioctl is restartable.
1739 arg.d.rep.ret = ret;
1740 arg.d.rep.bo_info = rep;
1741 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1747 int drm_bo_create_ioctl(DRM_IOCTL_ARGS)
1750 struct drm_bo_create_arg arg;
1751 struct drm_bo_create_req *req = &arg.d.req;
1752 struct drm_bo_info_rep *rep = &arg.d.rep;
1753 drm_buffer_object_t *entry;
1756 if (!dev->bm.initialized) {
1757 DRM_ERROR("Buffer object manager is not initialized.\n");
1761 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1763 ret = drm_bo_lock_test(dev, filp);
1767 ret = drm_buffer_object_create(priv->head->dev,
1768 req->size, req->type, req->mask,
1769 req->hint, req->page_alignment,
1770 req->buffer_start, &entry);
1774 ret = drm_bo_add_user_object(priv, entry,
1775 req->mask & DRM_BO_FLAG_SHAREABLE);
1777 drm_bo_usage_deref_unlocked(entry);
1781 mutex_lock(&entry->mutex);
1782 drm_bo_fill_rep_arg(entry, rep);
1783 mutex_unlock(&entry->mutex);
1785 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1791 int drm_bo_destroy_ioctl(DRM_IOCTL_ARGS)
1794 struct drm_bo_handle_arg arg;
1795 drm_user_object_t *uo;
1798 if (!dev->bm.initialized) {
1799 DRM_ERROR("Buffer object manager is not initialized.\n");
1803 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1805 mutex_lock(&dev->struct_mutex);
1806 uo = drm_lookup_user_object(priv, arg.handle);
1807 if (!uo || (uo->type != drm_buffer_type) || uo->owner != priv) {
1808 mutex_unlock(&dev->struct_mutex);
1811 ret = drm_remove_user_object(priv, uo);
1812 mutex_unlock(&dev->struct_mutex);
1817 int drm_bo_map_ioctl(DRM_IOCTL_ARGS)
1820 struct drm_bo_map_wait_idle_arg arg;
1821 struct drm_bo_info_req *req = &arg.d.req;
1822 struct drm_bo_info_rep *rep = &arg.d.rep;
1824 if (!dev->bm.initialized) {
1825 DRM_ERROR("Buffer object manager is not initialized.\n");
1829 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1831 ret = drm_buffer_object_map(priv, req->handle, req->mask,
1836 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1840 int drm_bo_unmap_ioctl(DRM_IOCTL_ARGS)
1843 struct drm_bo_handle_arg arg;
1845 if (!dev->bm.initialized) {
1846 DRM_ERROR("Buffer object manager is not initialized.\n");
1850 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1852 ret = drm_buffer_object_unmap(priv, arg.handle);
1857 int drm_bo_reference_ioctl(DRM_IOCTL_ARGS)
1860 struct drm_bo_reference_info_arg arg;
1861 struct drm_bo_handle_arg *req = &arg.d.req;
1862 struct drm_bo_info_rep *rep = &arg.d.rep;
1863 drm_user_object_t *uo;
1866 if (!dev->bm.initialized) {
1867 DRM_ERROR("Buffer object manager is not initialized.\n");
1871 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1873 ret = drm_user_object_ref(priv, req->handle,
1874 drm_buffer_type, &uo);
1878 ret = drm_bo_handle_info(priv, req->handle, rep);
1882 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1886 int drm_bo_unreference_ioctl(DRM_IOCTL_ARGS)
1889 struct drm_bo_handle_arg arg;
1892 if (!dev->bm.initialized) {
1893 DRM_ERROR("Buffer object manager is not initialized.\n");
1897 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1899 ret = drm_user_object_unref(priv, arg.handle, drm_buffer_type);
1903 int drm_bo_info_ioctl(DRM_IOCTL_ARGS)
1906 struct drm_bo_reference_info_arg arg;
1907 struct drm_bo_handle_arg *req = &arg.d.req;
1908 struct drm_bo_info_rep *rep = &arg.d.rep;
1911 if (!dev->bm.initialized) {
1912 DRM_ERROR("Buffer object manager is not initialized.\n");
1916 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1918 ret = drm_bo_handle_info(priv, req->handle, rep);
1921 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1925 int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS)
1928 struct drm_bo_map_wait_idle_arg arg;
1929 struct drm_bo_info_req *req = &arg.d.req;
1930 struct drm_bo_info_rep *rep = &arg.d.rep;
1932 if (!dev->bm.initialized) {
1933 DRM_ERROR("Buffer object manager is not initialized.\n");
1937 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1939 ret = drm_bo_handle_wait(priv, req->handle,
1944 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1951 *Clean the unfenced list and put on regular LRU.
1952 *This is part of the memory manager cleanup and should only be
1953 *called with the DRI lock held.
1954 *Call dev->struct_sem locked.
1957 static void drm_bo_clean_unfenced(drm_device_t *dev)
1959 drm_buffer_manager_t *bm = &dev->bm;
1960 struct list_head *head, *list;
1961 drm_buffer_object_t *entry;
1963 head = &bm->unfenced;
1966 while(list != head) {
1967 prefetch(list->next);
1968 entry = list_entry(list, drm_buffer_object_t, lru);
1970 atomic_inc(&entry->usage);
1971 mutex_unlock(&dev->struct_mutex);
1972 mutex_lock(&entry->mutex);
1973 mutex_lock(&dev->struct_mutex);
1975 list_del(&entry->lru);
1976 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1977 drm_bo_add_to_lru(entry);
1978 mutex_unlock(&entry->mutex);
1983 static int drm_bo_leave_list(drm_buffer_object_t * bo,
1985 int free_pinned, int allow_errors)
1987 drm_device_t *dev = bo->dev;
1990 mutex_lock(&bo->mutex);
1992 ret = drm_bo_expire_fence(bo, allow_errors);
1997 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1998 mutex_lock(&dev->struct_mutex);
1999 list_del_init(&bo->pinned_lru);
2000 if (bo->pinned_node == bo->mem.mm_node)
2001 bo->pinned_node = NULL;
2002 if (bo->pinned_node != NULL) {
2003 drm_mm_put_block(bo->pinned_node);
2004 bo->pinned_node = NULL;
2006 mutex_unlock(&dev->struct_mutex);
2009 if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2010 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2011 "cleanup. Removing flag and evicting.\n");
2012 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2013 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
2016 if (bo->mem.mem_type == mem_type)
2017 ret = drm_bo_evict(bo, mem_type, 0);
2024 DRM_ERROR("Cleanup eviction failed\n");
2029 mutex_unlock(&bo->mutex);
2034 static drm_buffer_object_t *drm_bo_entry(struct list_head *list,
2038 return list_entry(list, drm_buffer_object_t, pinned_lru);
2040 return list_entry(list, drm_buffer_object_t, lru);
2044 * dev->struct_mutex locked.
2047 static int drm_bo_force_list_clean(drm_device_t * dev,
2048 struct list_head *head,
2054 struct list_head *list, *next, *prev;
2055 drm_buffer_object_t *entry, *nentry;
2060 * The list traversal is a bit odd here, because an item may
2061 * disappear from the list when we release the struct_mutex or
2062 * when we decrease the usage count. Also we're not guaranteed
2063 * to drain pinned lists, so we can't always restart.
2068 list_for_each_safe(list, next, head) {
2071 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2072 atomic_inc(&entry->usage);
2074 atomic_dec(&nentry->usage);
2079 * Protect the next item from destruction, so we can check
2080 * its list pointers later on.
2084 nentry = drm_bo_entry(next, pinned_list);
2085 atomic_inc(&nentry->usage);
2087 mutex_unlock(&dev->struct_mutex);
2089 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2091 mutex_lock(&dev->struct_mutex);
2093 drm_bo_usage_deref_locked(entry);
2098 * Has the next item disappeared from the list?
2101 do_restart = ((next->prev != list) && (next->prev != prev));
2103 if (nentry != NULL && do_restart) {
2104 drm_bo_usage_deref_locked(nentry);
2114 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
2116 drm_buffer_manager_t *bm = &dev->bm;
2117 drm_mem_type_manager_t *man = &bm->man[mem_type];
2120 if (mem_type >= DRM_BO_MEM_TYPES) {
2121 DRM_ERROR("Illegal memory type %d\n", mem_type);
2125 if (!man->has_type) {
2126 DRM_ERROR("Trying to take down uninitialized "
2127 "memory manager type\n");
2136 drm_bo_clean_unfenced(dev);
2137 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2138 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2140 if (drm_mm_clean(&man->manager)) {
2141 drm_mm_takedown(&man->manager);
2151 *Evict all buffers of a particular mem_type, but leave memory manager
2152 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2153 *point since we have the hardware lock.
2156 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
2159 drm_buffer_manager_t *bm = &dev->bm;
2160 drm_mem_type_manager_t *man = &bm->man[mem_type];
2162 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2163 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2167 if (!man->has_type) {
2168 DRM_ERROR("Memory type %u has not been initialized.\n",
2173 drm_bo_clean_unfenced(dev);
2174 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2177 ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2182 int drm_bo_init_mm(drm_device_t * dev,
2184 unsigned long p_offset, unsigned long p_size)
2186 drm_buffer_manager_t *bm = &dev->bm;
2188 drm_mem_type_manager_t *man;
2190 if (type >= DRM_BO_MEM_TYPES) {
2191 DRM_ERROR("Illegal memory type %d\n", type);
2195 man = &bm->man[type];
2196 if (man->has_type) {
2197 DRM_ERROR("Memory manager already initialized for type %d\n",
2202 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2207 if (type != DRM_BO_MEM_LOCAL) {
2209 DRM_ERROR("Zero size memory manager type %d\n", type);
2212 ret = drm_mm_init(&man->manager, p_offset, p_size);
2219 INIT_LIST_HEAD(&man->lru);
2220 INIT_LIST_HEAD(&man->pinned);
2224 EXPORT_SYMBOL(drm_bo_init_mm);
2227 * This is called from lastclose, so we don't need to bother about
2228 * any clients still running when we set the initialized flag to zero.
2231 int drm_bo_driver_finish(drm_device_t * dev)
2233 drm_buffer_manager_t *bm = &dev->bm;
2235 unsigned i = DRM_BO_MEM_TYPES;
2236 drm_mem_type_manager_t *man;
2238 mutex_lock(&dev->bm.init_mutex);
2239 mutex_lock(&dev->struct_mutex);
2241 if (!bm->initialized)
2243 bm->initialized = 0;
2247 if (man->has_type) {
2249 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2251 DRM_ERROR("DRM memory manager type %d "
2252 "is not clean.\n", i);
2257 mutex_unlock(&dev->struct_mutex);
2259 if (!cancel_delayed_work(&bm->wq)) {
2260 flush_scheduled_work();
2262 mutex_lock(&dev->struct_mutex);
2263 drm_bo_delayed_delete(dev, 1);
2264 if (list_empty(&bm->ddestroy)) {
2265 DRM_DEBUG("Delayed destroy list was clean\n");
2267 if (list_empty(&bm->man[0].lru)) {
2268 DRM_DEBUG("Swap list was clean\n");
2270 if (list_empty(&bm->man[0].pinned)) {
2271 DRM_DEBUG("NO_MOVE list was clean\n");
2273 if (list_empty(&bm->unfenced)) {
2274 DRM_DEBUG("Unfenced list was clean\n");
2277 mutex_unlock(&dev->struct_mutex);
2278 mutex_unlock(&dev->bm.init_mutex);
2282 int drm_bo_driver_init(drm_device_t * dev)
2284 drm_bo_driver_t *driver = dev->driver->bo_driver;
2285 drm_buffer_manager_t *bm = &dev->bm;
2288 mutex_lock(&dev->bm.init_mutex);
2289 mutex_lock(&dev->struct_mutex);
2294 * Initialize the system memory buffer type.
2295 * Other types need to be driver / IOCTL initialized.
2298 ret = drm_bo_init_mm(dev, 0, 0, 0);
2302 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2303 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2305 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2307 bm->initialized = 1;
2309 atomic_set(&bm->count, 0);
2311 INIT_LIST_HEAD(&bm->unfenced);
2312 INIT_LIST_HEAD(&bm->ddestroy);
2314 mutex_unlock(&dev->struct_mutex);
2315 mutex_unlock(&dev->bm.init_mutex);
2319 EXPORT_SYMBOL(drm_bo_driver_init);
2321 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
2324 struct drm_mm_init_arg arg;
2325 drm_buffer_manager_t *bm = &dev->bm;
2326 drm_bo_driver_t *driver = dev->driver->bo_driver;
2330 DRM_ERROR("Buffer objects are not supported by this driver\n");
2334 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2336 if (arg.magic != DRM_BO_INIT_MAGIC) {
2337 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2338 "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2341 if (arg.major != DRM_BO_INIT_MAJOR) {
2342 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2343 "\tversion don't match. Got %d, expected %d,\n",
2344 arg.major, DRM_BO_INIT_MAJOR);
2347 if (arg.minor > DRM_BO_INIT_MINOR) {
2348 DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n"
2349 "\tlibdrm buffer object interface version is %d.%d.\n"
2350 "\tkernel DRM buffer object interface version is %d.%d\n",
2351 arg.major, arg.minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR);
2355 mutex_lock(&dev->bm.init_mutex);
2356 mutex_lock(&dev->struct_mutex);
2357 if (!bm->initialized) {
2358 DRM_ERROR("DRM memory manager was not initialized.\n");
2361 if (arg.mem_type == 0) {
2362 DRM_ERROR("System memory buffers already initialized.\n");
2365 ret = drm_bo_init_mm(dev, arg.mem_type,
2366 arg.p_offset, arg.p_size);
2369 mutex_unlock(&dev->struct_mutex);
2370 mutex_unlock(&dev->bm.init_mutex);
2374 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2378 int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS)
2381 struct drm_mm_type_arg arg;
2382 drm_buffer_manager_t *bm = &dev->bm;
2383 drm_bo_driver_t *driver = dev->driver->bo_driver;
2387 DRM_ERROR("Buffer objects are not supported by this driver\n");
2391 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2393 LOCK_TEST_WITH_RETURN(dev, filp);
2394 mutex_lock(&dev->bm.init_mutex);
2395 mutex_lock(&dev->struct_mutex);
2397 if (!bm->initialized) {
2398 DRM_ERROR("DRM memory manager was not initialized\n");
2401 if (arg.mem_type == 0) {
2402 DRM_ERROR("No takedown for System memory buffers.\n");
2406 if (drm_bo_clean_mm(dev, arg.mem_type)) {
2407 DRM_ERROR("Memory manager type %d not clean. "
2408 "Delaying takedown\n", arg.mem_type);
2411 mutex_unlock(&dev->struct_mutex);
2412 mutex_unlock(&dev->bm.init_mutex);
2416 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2420 int drm_mm_lock_ioctl(DRM_IOCTL_ARGS)
2423 struct drm_mm_type_arg arg;
2424 drm_bo_driver_t *driver = dev->driver->bo_driver;
2428 DRM_ERROR("Buffer objects are not supported by this driver\n");
2432 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2434 LOCK_TEST_WITH_RETURN(dev, filp);
2435 mutex_lock(&dev->bm.init_mutex);
2436 mutex_lock(&dev->struct_mutex);
2437 ret = drm_bo_lock_mm(dev, arg.mem_type);
2438 mutex_unlock(&dev->struct_mutex);
2439 mutex_unlock(&dev->bm.init_mutex);
2443 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2447 int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS)
2450 struct drm_mm_type_arg arg;
2451 drm_bo_driver_t *driver = dev->driver->bo_driver;
2455 DRM_ERROR("Buffer objects are not supported by this driver\n");
2459 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2460 LOCK_TEST_WITH_RETURN(dev, filp);
2461 mutex_lock(&dev->bm.init_mutex);
2462 mutex_lock(&dev->struct_mutex);
2465 mutex_unlock(&dev->struct_mutex);
2466 mutex_unlock(&dev->bm.init_mutex);
2470 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2475 * buffer object vm functions.
2478 int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem)
2480 drm_buffer_manager_t *bm = &dev->bm;
2481 drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2483 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2484 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2487 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2490 if (mem->flags & DRM_BO_FLAG_CACHED)
2496 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2499 * \c Get the PCI offset for the buffer object memory.
2501 * \param bo The buffer object.
2502 * \param bus_base On return the base of the PCI region
2503 * \param bus_offset On return the byte offset into the PCI region
2504 * \param bus_size On return the byte size of the buffer object or zero if
2505 * the buffer object memory is not accessible through a PCI region.
2506 * \return Failure indication.
2508 * Returns -EINVAL if the buffer object is currently not mappable.
2509 * Otherwise returns zero.
2512 int drm_bo_pci_offset(drm_device_t * dev,
2513 drm_bo_mem_reg_t * mem,
2514 unsigned long *bus_base,
2515 unsigned long *bus_offset, unsigned long *bus_size)
2517 drm_buffer_manager_t *bm = &dev->bm;
2518 drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2521 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2524 if (drm_mem_reg_is_pci(dev, mem)) {
2525 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2526 *bus_size = mem->num_pages << PAGE_SHIFT;
2527 *bus_base = man->io_offset;
2534 * \c Kill all user-space virtual mappings of this buffer object.
2536 * \param bo The buffer object.
2538 * Call bo->mutex locked.
2541 void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
2543 drm_device_t *dev = bo->dev;
2544 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2545 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2547 if (!dev->dev_mapping)
2550 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2553 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
2555 drm_map_list_t *list = &bo->map_list;
2556 drm_local_map_t *map;
2557 drm_device_t *dev = bo->dev;
2559 if (list->user_token) {
2560 drm_ht_remove_item(&dev->map_hash, &list->hash);
2561 list->user_token = 0;
2563 if (list->file_offset_node) {
2564 drm_mm_put_block(list->file_offset_node);
2565 list->file_offset_node = NULL;
2572 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2574 list->user_token = 0ULL;
2575 drm_bo_usage_deref_locked(bo);
2578 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
2580 drm_map_list_t *list = &bo->map_list;
2581 drm_local_map_t *map;
2582 drm_device_t *dev = bo->dev;
2584 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2590 map->type = _DRM_TTM;
2591 map->flags = _DRM_REMOVABLE;
2592 map->size = bo->mem.num_pages * PAGE_SIZE;
2593 atomic_inc(&bo->usage);
2594 map->handle = (void *)bo;
2596 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2597 bo->mem.num_pages, 0, 0);
2599 if (!list->file_offset_node) {
2600 drm_bo_takedown_vm_locked(bo);
2604 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2605 bo->mem.num_pages, 0);
2607 list->hash.key = list->file_offset_node->start;
2608 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2609 drm_bo_takedown_vm_locked(bo);
2613 list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;