1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44 * both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47 * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48 * traversal will, in general, need to be restarted.
52 static void drm_bo_destroy_locked(drm_buffer_object_t * bo);
53 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo);
54 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo);
55 static void drm_bo_unmap_virtual(drm_buffer_object_t * bo);
57 static inline uint32_t drm_bo_type_flags(unsigned type)
59 return (1 << (24 + type));
63 * bo locked. dev->struct_mutex locked.
66 void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
68 drm_mem_type_manager_t *man;
70 man = &bo->dev->bm.man[bo->pinned_mem_type];
71 list_add_tail(&bo->pinned_lru, &man->pinned);
74 void drm_bo_add_to_lru(drm_buffer_object_t * bo)
76 drm_mem_type_manager_t *man;
78 if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
79 || bo->mem.mem_type != bo->pinned_mem_type) {
80 man = &bo->dev->bm.man[bo->mem.mem_type];
81 list_add_tail(&bo->lru, &man->lru);
83 INIT_LIST_HEAD(&bo->lru);
87 static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
89 #ifdef DRM_ODD_MM_COMPAT
92 ret = drm_bo_lock_kmm(bo);
95 drm_bo_unmap_virtual(bo);
97 drm_bo_finish_unmap(bo);
99 drm_bo_unmap_virtual(bo);
104 static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
106 #ifdef DRM_ODD_MM_COMPAT
109 ret = drm_bo_remap_bound(bo);
111 DRM_ERROR("Failed to remap a bound buffer object.\n"
112 "\tThis might cause a sigbus later.\n");
114 drm_bo_unlock_kmm(bo);
119 * Call bo->mutex locked.
122 static int drm_bo_add_ttm(drm_buffer_object_t * bo)
124 drm_device_t *dev = bo->dev;
130 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
134 case drm_bo_type_user:
135 case drm_bo_type_fake:
138 DRM_ERROR("Illegal buffer object type\n");
146 static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
147 drm_bo_mem_reg_t * mem,
148 int evict, int no_wait)
150 drm_device_t *dev = bo->dev;
151 drm_buffer_manager_t *bm = &dev->bm;
152 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
153 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
154 drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
155 drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
158 if (old_is_pci || new_is_pci)
159 ret = drm_bo_vm_pre_move(bo, old_is_pci);
164 * Create and bind a ttm if required.
167 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
168 ret = drm_bo_add_ttm(bo);
172 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
173 ret = drm_bind_ttm(bo->ttm, new_man->flags &
175 mem->mm_node->start);
181 if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
183 drm_bo_mem_reg_t *old_mem = &bo->mem;
184 uint32_t save_flags = old_mem->flags;
185 uint32_t save_mask = old_mem->mask;
189 old_mem->mask = save_mask;
190 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
192 } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
193 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
195 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
197 } else if (dev->driver->bo_driver->move) {
198 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
202 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
209 if (old_is_pci || new_is_pci)
210 drm_bo_vm_post_move(bo);
212 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
214 dev->driver->bo_driver->invalidate_caches(dev,
217 DRM_ERROR("Can not flush read caches\n");
220 DRM_FLAG_MASKED(bo->priv_flags,
221 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
222 _DRM_BO_FLAG_EVICTED);
225 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
230 if (old_is_pci || new_is_pci)
231 drm_bo_vm_post_move(bo);
233 new_man = &bm->man[bo->mem.mem_type];
234 if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
235 drm_ttm_unbind(bo->ttm);
236 drm_destroy_ttm(bo->ttm);
244 * Call bo->mutex locked.
245 * Wait until the buffer is idle.
248 int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
252 drm_fence_object_t *fence = bo->fence;
256 drm_device_t *dev = bo->dev;
257 if (drm_fence_object_signaled(fence, bo->fence_type)) {
258 drm_fence_usage_deref_unlocked(dev, fence);
266 drm_fence_object_wait(dev, fence, lazy, ignore_signals,
271 drm_fence_usage_deref_unlocked(dev, fence);
278 static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
280 drm_device_t *dev = bo->dev;
281 drm_buffer_manager_t *bm = &dev->bm;
285 unsigned long _end = jiffies + 3 * DRM_HZ;
288 ret = drm_bo_wait(bo, 0, 1, 0);
289 if (ret && allow_errors)
292 } while (ret && !time_after_eq(jiffies, _end));
296 DRM_ERROR("Detected GPU lockup or "
297 "fence driver was taken down. "
298 "Evicting buffer.\n");
302 drm_fence_usage_deref_unlocked(dev, bo->fence);
310 * Call dev->struct_mutex locked.
311 * Attempts to remove all private references to a buffer by expiring its
312 * fence object and removing from lru lists and memory managers.
315 static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
317 drm_device_t *dev = bo->dev;
318 drm_buffer_manager_t *bm = &dev->bm;
320 atomic_inc(&bo->usage);
321 mutex_unlock(&dev->struct_mutex);
322 mutex_lock(&bo->mutex);
324 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
326 if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) {
327 drm_fence_usage_deref_locked(dev, bo->fence);
331 if (bo->fence && remove_all)
332 (void)drm_bo_expire_fence(bo, 0);
334 mutex_lock(&dev->struct_mutex);
336 if (!atomic_dec_and_test(&bo->usage)) {
341 list_del_init(&bo->lru);
342 if (bo->mem.mm_node) {
343 drm_mm_put_block(bo->mem.mm_node);
344 if (bo->pinned_node == bo->mem.mm_node)
345 bo->pinned_node = NULL;
346 bo->mem.mm_node = NULL;
348 list_del_init(&bo->pinned_lru);
349 if (bo->pinned_node) {
350 drm_mm_put_block(bo->pinned_node);
351 bo->pinned_node = NULL;
353 list_del_init(&bo->ddestroy);
354 mutex_unlock(&bo->mutex);
355 drm_bo_destroy_locked(bo);
359 if (list_empty(&bo->ddestroy)) {
360 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
361 list_add_tail(&bo->ddestroy, &bm->ddestroy);
362 schedule_delayed_work(&bm->wq,
363 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
367 mutex_unlock(&bo->mutex);
372 * Verify that refcount is 0 and that there are no internal references
373 * to the buffer object. Then destroy it.
376 static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
378 drm_device_t *dev = bo->dev;
379 drm_buffer_manager_t *bm = &dev->bm;
381 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
382 list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
383 list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
384 if (bo->fence != NULL) {
385 DRM_ERROR("Fence was non-zero.\n");
386 drm_bo_cleanup_refs(bo, 0);
390 #ifdef DRM_ODD_MM_COMPAT
391 BUG_ON(!list_empty(&bo->vma_list));
392 BUG_ON(!list_empty(&bo->p_mm_list));
396 drm_ttm_unbind(bo->ttm);
397 drm_destroy_ttm(bo->ttm);
401 atomic_dec(&bm->count);
403 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
409 * Some stuff is still trying to reference the buffer object.
410 * Get rid of those references.
413 drm_bo_cleanup_refs(bo, 0);
419 * Call dev->struct_mutex locked.
422 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
424 drm_buffer_manager_t *bm = &dev->bm;
426 drm_buffer_object_t *entry, *nentry;
427 struct list_head *list, *next;
429 list_for_each_safe(list, next, &bm->ddestroy) {
430 entry = list_entry(list, drm_buffer_object_t, ddestroy);
433 if (next != &bm->ddestroy) {
434 nentry = list_entry(next, drm_buffer_object_t,
436 atomic_inc(&nentry->usage);
439 drm_bo_cleanup_refs(entry, remove_all);
442 atomic_dec(&nentry->usage);
447 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
448 static void drm_bo_delayed_workqueue(void *data)
450 static void drm_bo_delayed_workqueue(struct work_struct *work)
453 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
454 drm_device_t *dev = (drm_device_t *) data;
455 drm_buffer_manager_t *bm = &dev->bm;
457 drm_buffer_manager_t *bm =
458 container_of(work, drm_buffer_manager_t, wq.work);
459 drm_device_t *dev = container_of(bm, drm_device_t, bm);
462 DRM_DEBUG("Delayed delete Worker\n");
464 mutex_lock(&dev->struct_mutex);
465 if (!bm->initialized) {
466 mutex_unlock(&dev->struct_mutex);
469 drm_bo_delayed_delete(dev, 0);
470 if (bm->initialized && !list_empty(&bm->ddestroy)) {
471 schedule_delayed_work(&bm->wq,
472 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
474 mutex_unlock(&dev->struct_mutex);
477 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
479 if (atomic_dec_and_test(&bo->usage)) {
480 drm_bo_destroy_locked(bo);
484 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
486 drm_buffer_object_t *bo =
487 drm_user_object_entry(uo, drm_buffer_object_t, base);
489 drm_bo_takedown_vm_locked(bo);
490 drm_bo_usage_deref_locked(bo);
493 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
495 drm_device_t *dev = bo->dev;
497 if (atomic_dec_and_test(&bo->usage)) {
498 mutex_lock(&dev->struct_mutex);
499 if (atomic_read(&bo->usage) == 0)
500 drm_bo_destroy_locked(bo);
501 mutex_unlock(&dev->struct_mutex);
506 * Note. The caller has to register (if applicable)
507 * and deregister fence object usage.
510 int drm_fence_buffer_objects(drm_file_t * priv,
511 struct list_head *list,
512 uint32_t fence_flags,
513 drm_fence_object_t * fence,
514 drm_fence_object_t ** used_fence)
516 drm_device_t *dev = priv->head->dev;
517 drm_buffer_manager_t *bm = &dev->bm;
519 drm_buffer_object_t *entry;
520 uint32_t fence_type = 0;
526 mutex_lock(&dev->struct_mutex);
529 list = &bm->unfenced;
531 list_for_each_entry(entry, list, lru) {
532 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
533 fence_type |= entry->fence_type;
534 if (entry->fence_class != 0) {
535 DRM_ERROR("Fence class %d is not implemented yet.\n",
549 * Transfer to a local list before we release the dev->struct_mutex;
550 * This is so we don't get any new unfenced objects while fencing
551 * the ones we already have..
554 list_splice_init(list, &f_list);
557 if ((fence_type & fence->type) != fence_type) {
558 DRM_ERROR("Given fence doesn't match buffers "
559 "on unfenced list.\n");
564 mutex_unlock(&dev->struct_mutex);
565 ret = drm_fence_object_create(dev, 0, fence_type,
566 fence_flags | DRM_FENCE_FLAG_EMIT,
568 mutex_lock(&dev->struct_mutex);
575 while (l != &f_list) {
577 entry = list_entry(l, drm_buffer_object_t, lru);
578 atomic_inc(&entry->usage);
579 mutex_unlock(&dev->struct_mutex);
580 mutex_lock(&entry->mutex);
581 mutex_lock(&dev->struct_mutex);
583 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
586 drm_fence_usage_deref_locked(dev, entry->fence);
587 entry->fence = fence;
588 DRM_FLAG_MASKED(entry->priv_flags, 0,
589 _DRM_BO_FLAG_UNFENCED);
590 DRM_WAKEUP(&entry->event_queue);
591 drm_bo_add_to_lru(entry);
593 mutex_unlock(&entry->mutex);
594 drm_bo_usage_deref_locked(entry);
597 atomic_add(count, &fence->usage);
598 DRM_DEBUG("Fenced %d buffers\n", count);
600 mutex_unlock(&dev->struct_mutex);
605 EXPORT_SYMBOL(drm_fence_buffer_objects);
611 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
615 drm_device_t *dev = bo->dev;
616 drm_bo_mem_reg_t evict_mem;
619 * Someone might have modified the buffer before we took the buffer mutex.
622 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
624 if (bo->mem.mem_type != mem_type)
627 ret = drm_bo_wait(bo, 0, 0, no_wait);
629 if (ret && ret != -EAGAIN) {
630 DRM_ERROR("Failed to expire fence before "
631 "buffer eviction.\n");
636 evict_mem.mm_node = NULL;
638 if (bo->type == drm_bo_type_fake) {
639 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
640 bo->mem.mm_node = NULL;
645 evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
646 ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
650 DRM_ERROR("Failed to find memory space for "
651 "buffer 0x%p eviction.\n", bo);
655 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
659 DRM_ERROR("Buffer eviction failed\n");
664 mutex_lock(&dev->struct_mutex);
665 if (evict_mem.mm_node) {
666 if (evict_mem.mm_node != bo->pinned_node)
667 drm_mm_put_block(evict_mem.mm_node);
668 evict_mem.mm_node = NULL;
671 drm_bo_add_to_lru(bo);
672 mutex_unlock(&dev->struct_mutex);
674 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
675 _DRM_BO_FLAG_EVICTED);
681 static int drm_bo_mem_force_space(drm_device_t * dev,
682 drm_bo_mem_reg_t * mem,
683 uint32_t mem_type, int no_wait)
686 drm_buffer_manager_t *bm = &dev->bm;
687 drm_buffer_object_t *entry;
688 drm_mem_type_manager_t *man = &bm->man[mem_type];
689 struct list_head *lru;
690 unsigned long num_pages = mem->num_pages;
693 mutex_lock(&dev->struct_mutex);
695 node = drm_mm_search_free(&man->manager, num_pages,
696 mem->page_alignment, 1);
701 if (lru->next == lru)
704 entry = list_entry(lru->next, drm_buffer_object_t, lru);
705 atomic_inc(&entry->usage);
706 mutex_unlock(&dev->struct_mutex);
707 mutex_lock(&entry->mutex);
708 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
710 ret = drm_bo_evict(entry, mem_type, no_wait);
711 mutex_unlock(&entry->mutex);
712 drm_bo_usage_deref_unlocked(entry);
715 mutex_lock(&dev->struct_mutex);
719 mutex_unlock(&dev->struct_mutex);
723 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
724 mutex_unlock(&dev->struct_mutex);
726 mem->mem_type = mem_type;
730 static int drm_bo_mt_compatible(drm_mem_type_manager_t * man,
732 uint32_t mask, uint32_t * res_mask)
734 uint32_t cur_flags = drm_bo_type_flags(mem_type);
737 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
738 cur_flags |= DRM_BO_FLAG_CACHED;
739 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
740 cur_flags |= DRM_BO_FLAG_MAPPABLE;
741 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
742 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
744 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
747 if (mem_type == DRM_BO_MEM_LOCAL) {
748 *res_mask = cur_flags;
752 flag_diff = (mask ^ cur_flags);
753 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
754 (!(mask & DRM_BO_FLAG_CACHED) ||
755 (mask & DRM_BO_FLAG_FORCE_CACHING)))
758 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
759 ((mask & DRM_BO_FLAG_MAPPABLE) ||
760 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
763 *res_mask = cur_flags;
767 int drm_bo_mem_space(drm_buffer_object_t * bo,
768 drm_bo_mem_reg_t * mem, int no_wait)
770 drm_device_t *dev = bo->dev;
771 drm_buffer_manager_t *bm = &dev->bm;
772 drm_mem_type_manager_t *man;
774 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
775 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
777 uint32_t mem_type = DRM_BO_MEM_LOCAL;
782 drm_mm_node_t *node = NULL;
786 for (i = 0; i < num_prios; ++i) {
788 man = &bm->man[mem_type];
790 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
796 if (mem_type == DRM_BO_MEM_LOCAL)
799 if ((mem_type == bo->pinned_mem_type) &&
800 (bo->pinned_node != NULL)) {
801 node = bo->pinned_node;
805 mutex_lock(&dev->struct_mutex);
806 if (man->has_type && man->use_type) {
808 node = drm_mm_search_free(&man->manager, mem->num_pages,
809 mem->page_alignment, 1);
811 node = drm_mm_get_block(node, mem->num_pages,
812 mem->page_alignment);
814 mutex_unlock(&dev->struct_mutex);
819 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
821 mem->mem_type = mem_type;
822 mem->flags = cur_flags;
829 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
830 prios = dev->driver->bo_driver->mem_busy_prio;
832 for (i = 0; i < num_prios; ++i) {
834 man = &bm->man[mem_type];
839 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
842 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
845 mem->flags = cur_flags;
853 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
857 EXPORT_SYMBOL(drm_bo_mem_space);
859 static int drm_bo_new_mask(drm_buffer_object_t * bo,
860 uint32_t new_mask, uint32_t hint)
864 if (bo->type == drm_bo_type_user) {
865 DRM_ERROR("User buffers are not supported yet\n");
868 if (bo->type == drm_bo_type_fake &&
869 !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
870 DRM_ERROR("Fake buffers must be pinned.\n");
874 if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
876 ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
881 new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
885 DRM_ERROR("Invalid buffer object rwx properties\n");
889 bo->mem.mask = new_mask;
894 * Call dev->struct_mutex locked.
897 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
898 uint32_t handle, int check_owner)
900 drm_user_object_t *uo;
901 drm_buffer_object_t *bo;
903 uo = drm_lookup_user_object(priv, handle);
905 if (!uo || (uo->type != drm_buffer_type)) {
906 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
910 if (check_owner && priv != uo->owner) {
911 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
915 bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
916 atomic_inc(&bo->usage);
921 * Call bo->mutex locked.
922 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
923 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
926 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
928 drm_fence_object_t *fence = bo->fence;
930 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
932 drm_device_t *dev = bo->dev;
933 if (drm_fence_object_signaled(fence, bo->fence_type)) {
934 drm_fence_usage_deref_unlocked(dev, fence);
944 * Call bo->mutex locked.
945 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
948 static int drm_bo_busy(drm_buffer_object_t * bo)
950 drm_fence_object_t *fence = bo->fence;
952 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
954 drm_device_t *dev = bo->dev;
955 if (drm_fence_object_signaled(fence, bo->fence_type)) {
956 drm_fence_usage_deref_unlocked(dev, fence);
960 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
961 if (drm_fence_object_signaled(fence, bo->fence_type)) {
962 drm_fence_usage_deref_unlocked(dev, fence);
971 static int drm_bo_read_cached(drm_buffer_object_t * bo)
975 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
977 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
982 * Wait until a buffer is unmapped.
985 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
989 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
992 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
993 atomic_read(&bo->mapped) == -1);
1001 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
1005 mutex_lock(&bo->mutex);
1006 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1007 mutex_unlock(&bo->mutex);
1012 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1013 * Until then, we cannot really do anything with it except delete it.
1014 * The unfenced list is a PITA, and the operations
1016 * 2) submitting commands
1018 * Should really be an atomic operation.
1019 * We now "solve" this problem by keeping
1020 * the buffer "unfenced" after validating, but before fencing.
1023 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
1026 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1034 mutex_unlock(&bo->mutex);
1035 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1036 !drm_bo_check_unfenced(bo));
1037 mutex_lock(&bo->mutex);
1040 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1042 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1052 * Fill in the ioctl reply argument with buffer info.
1056 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
1057 drm_bo_arg_reply_t * rep)
1059 rep->handle = bo->base.hash.key;
1060 rep->flags = bo->mem.flags;
1061 rep->size = bo->mem.num_pages * PAGE_SIZE;
1062 rep->offset = bo->offset;
1063 rep->arg_handle = bo->map_list.user_token;
1064 rep->mask = bo->mem.mask;
1065 rep->buffer_start = bo->buffer_start;
1066 rep->fence_flags = bo->fence_type;
1068 rep->page_alignment = bo->mem.page_alignment;
1070 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1071 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1077 * Wait for buffer idle and register that we've mapped the buffer.
1078 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1079 * so that if the client dies, the mapping is automatically
1083 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1084 uint32_t map_flags, unsigned hint,
1085 drm_bo_arg_reply_t * rep)
1087 drm_buffer_object_t *bo;
1088 drm_device_t *dev = priv->head->dev;
1090 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1092 mutex_lock(&dev->struct_mutex);
1093 bo = drm_lookup_buffer_object(priv, handle, 1);
1094 mutex_unlock(&dev->struct_mutex);
1099 mutex_lock(&bo->mutex);
1100 if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1101 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1107 * If this returns true, we are currently unmapped.
1108 * We need to do this test, because unmapping can
1109 * be done without the bo->mutex held.
1113 if (atomic_inc_and_test(&bo->mapped)) {
1114 if (no_wait && drm_bo_busy(bo)) {
1115 atomic_dec(&bo->mapped);
1119 ret = drm_bo_wait(bo, 0, 0, no_wait);
1121 atomic_dec(&bo->mapped);
1125 if ((map_flags & DRM_BO_FLAG_READ) &&
1126 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1127 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1128 drm_bo_read_cached(bo);
1131 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1132 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1133 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1136 * We are already mapped with different flags.
1137 * need to wait for unmap.
1140 ret = drm_bo_wait_unmapped(bo, no_wait);
1149 mutex_lock(&dev->struct_mutex);
1150 ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1151 mutex_unlock(&dev->struct_mutex);
1153 if (atomic_add_negative(-1, &bo->mapped))
1154 DRM_WAKEUP(&bo->event_queue);
1157 drm_bo_fill_rep_arg(bo, rep);
1159 mutex_unlock(&bo->mutex);
1160 drm_bo_usage_deref_unlocked(bo);
1164 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1166 drm_device_t *dev = priv->head->dev;
1167 drm_buffer_object_t *bo;
1168 drm_ref_object_t *ro;
1171 mutex_lock(&dev->struct_mutex);
1173 bo = drm_lookup_buffer_object(priv, handle, 1);
1179 ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1185 drm_remove_ref_object(priv, ro);
1186 drm_bo_usage_deref_locked(bo);
1188 mutex_unlock(&dev->struct_mutex);
1193 * Call struct-sem locked.
1196 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1197 drm_user_object_t * uo,
1200 drm_buffer_object_t *bo =
1201 drm_user_object_entry(uo, drm_buffer_object_t, base);
1204 * We DON'T want to take the bo->lock here, because we want to
1205 * hold it when we wait for unmapped buffer.
1208 BUG_ON(action != _DRM_REF_TYPE1);
1210 if (atomic_add_negative(-1, &bo->mapped))
1211 DRM_WAKEUP(&bo->event_queue);
1216 * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1219 int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1220 int no_wait, int move_unfenced)
1222 drm_device_t *dev = bo->dev;
1223 drm_buffer_manager_t *bm = &dev->bm;
1225 drm_bo_mem_reg_t mem;
1227 * Flush outstanding fences.
1233 * Wait for outstanding fences.
1236 ret = drm_bo_wait(bo, 0, 0, no_wait);
1240 mem.num_pages = bo->mem.num_pages;
1241 mem.size = mem.num_pages << PAGE_SHIFT;
1242 mem.mask = new_mem_flags;
1243 mem.page_alignment = bo->mem.page_alignment;
1245 mutex_lock(&bm->evict_mutex);
1246 mutex_lock(&dev->struct_mutex);
1248 list_add_tail(&bo->lru, &bm->unfenced);
1249 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1250 _DRM_BO_FLAG_UNFENCED);
1251 mutex_unlock(&dev->struct_mutex);
1254 * Determine where to move the buffer.
1256 ret = drm_bo_mem_space(bo, &mem, no_wait);
1260 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1263 if (ret || !move_unfenced) {
1264 mutex_lock(&dev->struct_mutex);
1266 if (mem.mm_node != bo->pinned_node)
1267 drm_mm_put_block(mem.mm_node);
1270 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1271 DRM_WAKEUP(&bo->event_queue);
1273 drm_bo_add_to_lru(bo);
1274 mutex_unlock(&dev->struct_mutex);
1277 mutex_unlock(&bm->evict_mutex);
1281 static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem)
1283 uint32_t flag_diff = (mem->mask ^ mem->flags);
1285 if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1287 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1288 (!(mem->mask & DRM_BO_FLAG_CACHED) ||
1289 (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1292 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1293 ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1294 (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1299 static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
1301 drm_buffer_manager_t *bm = &dev->bm;
1302 drm_mem_type_manager_t *man;
1303 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1304 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1307 uint32_t mem_type = 0;
1310 if (drm_bo_mem_compat(mem))
1313 BUG_ON(mem->mm_node);
1315 for (i = 0; i < num_prios; ++i) {
1316 mem_type = prios[i];
1317 man = &bm->man[mem_type];
1318 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1325 mem->mm_node = NULL;
1326 mem->mem_type = mem_type;
1327 mem->flags = cur_flags;
1328 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1332 DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask);
1340 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1341 int move_unfenced, int no_wait)
1343 drm_device_t *dev = bo->dev;
1344 drm_buffer_manager_t *bm = &dev->bm;
1345 drm_bo_driver_t *driver = dev->driver->bo_driver;
1348 DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask,
1351 driver->fence_type(bo, &bo->fence_class, &bo->fence_type);
1353 DRM_ERROR("Driver did not support given buffer permissions\n");
1357 ret = drm_bo_wait_unmapped(bo, no_wait);
1361 if (bo->type == drm_bo_type_fake) {
1362 ret = drm_bo_check_fake(dev, &bo->mem);
1368 * Check whether we need to move buffer.
1371 if (!drm_bo_mem_compat(&bo->mem)) {
1372 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1376 DRM_ERROR("Failed moving buffer.\n");
1385 if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1386 bo->pinned_mem_type = bo->mem.mem_type;
1387 mutex_lock(&dev->struct_mutex);
1388 list_del_init(&bo->pinned_lru);
1389 drm_bo_add_to_pinned_lru(bo);
1391 if (bo->pinned_node != bo->mem.mm_node) {
1392 if (bo->pinned_node != NULL)
1393 drm_mm_put_block(bo->pinned_node);
1394 bo->pinned_node = bo->mem.mm_node;
1397 mutex_unlock(&dev->struct_mutex);
1399 } else if (bo->pinned_node != NULL) {
1401 mutex_lock(&dev->struct_mutex);
1402 drm_mm_put_block(bo->pinned_node);
1403 list_del_init(&bo->pinned_lru);
1404 bo->pinned_node = NULL;
1405 mutex_unlock(&dev->struct_mutex);
1410 * We might need to add a TTM.
1413 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1414 ret = drm_bo_add_ttm(bo);
1418 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1421 * Finally, adjust lru to be sure.
1424 mutex_lock(&dev->struct_mutex);
1426 if (move_unfenced) {
1427 list_add_tail(&bo->lru, &bm->unfenced);
1428 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1429 _DRM_BO_FLAG_UNFENCED);
1431 drm_bo_add_to_lru(bo);
1432 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1433 DRM_WAKEUP(&bo->event_queue);
1434 DRM_FLAG_MASKED(bo->priv_flags, 0,
1435 _DRM_BO_FLAG_UNFENCED);
1438 mutex_unlock(&dev->struct_mutex);
1443 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1444 uint32_t flags, uint32_t mask, uint32_t hint,
1445 drm_bo_arg_reply_t * rep)
1447 drm_buffer_object_t *bo;
1449 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1451 bo = drm_lookup_buffer_object(priv, handle, 1);
1456 mutex_lock(&bo->mutex);
1457 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1462 DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1463 ret = drm_bo_new_mask(bo, flags, hint);
1468 drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE),
1470 drm_bo_fill_rep_arg(bo, rep);
1474 mutex_unlock(&bo->mutex);
1476 drm_bo_usage_deref_unlocked(bo);
1480 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1481 drm_bo_arg_reply_t * rep)
1483 drm_buffer_object_t *bo;
1485 bo = drm_lookup_buffer_object(priv, handle, 1);
1489 mutex_lock(&bo->mutex);
1490 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1491 (void)drm_bo_busy(bo);
1492 drm_bo_fill_rep_arg(bo, rep);
1493 mutex_unlock(&bo->mutex);
1494 drm_bo_usage_deref_unlocked(bo);
1498 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1499 uint32_t hint, drm_bo_arg_reply_t * rep)
1501 drm_buffer_object_t *bo;
1502 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1505 bo = drm_lookup_buffer_object(priv, handle, 1);
1510 mutex_lock(&bo->mutex);
1511 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1514 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1518 drm_bo_fill_rep_arg(bo, rep);
1521 mutex_unlock(&bo->mutex);
1522 drm_bo_usage_deref_unlocked(bo);
1526 int drm_buffer_object_create(drm_file_t * priv,
1531 uint32_t page_alignment,
1532 unsigned long buffer_start,
1533 drm_buffer_object_t ** buf_obj)
1535 drm_device_t *dev = priv->head->dev;
1536 drm_buffer_manager_t *bm = &dev->bm;
1537 drm_buffer_object_t *bo;
1539 unsigned long num_pages;
1541 if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1542 DRM_ERROR("Invalid buffer object start.\n");
1545 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1546 if (num_pages == 0) {
1547 DRM_ERROR("Illegal buffer object size.\n");
1551 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1556 mutex_init(&bo->mutex);
1557 mutex_lock(&bo->mutex);
1559 atomic_set(&bo->usage, 1);
1560 atomic_set(&bo->mapped, -1);
1561 DRM_INIT_WAITQUEUE(&bo->event_queue);
1562 INIT_LIST_HEAD(&bo->lru);
1563 INIT_LIST_HEAD(&bo->pinned_lru);
1564 INIT_LIST_HEAD(&bo->ddestroy);
1565 #ifdef DRM_ODD_MM_COMPAT
1566 INIT_LIST_HEAD(&bo->p_mm_list);
1567 INIT_LIST_HEAD(&bo->vma_list);
1571 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1572 bo->mem.num_pages = num_pages;
1573 bo->mem.mm_node = NULL;
1574 bo->mem.page_alignment = page_alignment;
1575 if (bo->type == drm_bo_type_fake) {
1576 bo->offset = buffer_start;
1577 bo->buffer_start = 0;
1579 bo->buffer_start = buffer_start;
1584 atomic_inc(&bm->count);
1585 ret = drm_bo_new_mask(bo, mask, hint);
1590 if (bo->type == drm_bo_type_dc) {
1591 mutex_lock(&dev->struct_mutex);
1592 ret = drm_bo_setup_vm_locked(bo);
1593 mutex_unlock(&dev->struct_mutex);
1597 ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1601 mutex_unlock(&bo->mutex);
1606 mutex_unlock(&bo->mutex);
1608 drm_bo_usage_deref_unlocked(bo);
1612 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1615 drm_device_t *dev = priv->head->dev;
1618 mutex_lock(&dev->struct_mutex);
1619 ret = drm_add_user_object(priv, &bo->base, shareable);
1623 bo->base.remove = drm_bo_base_deref_locked;
1624 bo->base.type = drm_buffer_type;
1625 bo->base.ref_struct_locked = NULL;
1626 bo->base.unref = drm_buffer_user_object_unmap;
1629 mutex_unlock(&dev->struct_mutex);
1633 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1635 LOCK_TEST_WITH_RETURN(dev, filp);
1639 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1643 drm_bo_arg_request_t *req = &arg.d.req;
1644 drm_bo_arg_reply_t rep;
1646 drm_user_object_t *uo;
1647 drm_buffer_object_t *entry;
1649 if (!dev->bm.initialized) {
1650 DRM_ERROR("Buffer object manager is not initialized.\n");
1655 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1665 rep.ret = drm_bo_lock_test(dev, filp);
1669 drm_buffer_object_create(priv, req->size,
1673 req->page_alignment,
1674 req->buffer_start, &entry);
1679 drm_bo_add_user_object(priv, entry,
1682 DRM_BO_FLAG_SHAREABLE);
1684 drm_bo_usage_deref_unlocked(entry);
1689 mutex_lock(&entry->mutex);
1690 drm_bo_fill_rep_arg(entry, &rep);
1691 mutex_unlock(&entry->mutex);
1694 rep.ret = drm_buffer_object_unmap(priv, req->handle);
1697 rep.ret = drm_buffer_object_map(priv, req->handle,
1701 case drm_bo_destroy:
1702 mutex_lock(&dev->struct_mutex);
1703 uo = drm_lookup_user_object(priv, req->handle);
1704 if (!uo || (uo->type != drm_buffer_type)
1705 || uo->owner != priv) {
1706 mutex_unlock(&dev->struct_mutex);
1710 rep.ret = drm_remove_user_object(priv, uo);
1711 mutex_unlock(&dev->struct_mutex);
1713 case drm_bo_reference:
1714 rep.ret = drm_user_object_ref(priv, req->handle,
1715 drm_buffer_type, &uo);
1720 * Note: The following code is only to
1721 * fill in the rep argument. drm_lookup_user_object ups the
1722 * refcount which is decreased again when we're done with the bo.
1725 mutex_lock(&dev->struct_mutex);
1726 uo = drm_lookup_user_object(priv, req->handle);
1728 drm_user_object_entry(uo, drm_buffer_object_t,
1730 atomic_dec(&entry->usage);
1731 mutex_unlock(&dev->struct_mutex);
1732 mutex_lock(&entry->mutex);
1733 drm_bo_fill_rep_arg(entry, &rep);
1734 mutex_unlock(&entry->mutex);
1736 case drm_bo_unreference:
1737 rep.ret = drm_user_object_unref(priv, req->handle,
1740 case drm_bo_validate:
1741 rep.ret = drm_bo_lock_test(dev, filp);
1746 drm_bo_handle_validate(priv, req->handle, req->mask,
1747 req->arg_handle, req->hint,
1751 rep.ret = drm_bo_lock_test(dev, filp);
1756 rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1758 case drm_bo_wait_idle:
1759 rep.ret = drm_bo_handle_wait(priv, req->handle,
1762 case drm_bo_ref_fence:
1764 DRM_ERROR("Function is not implemented yet.\n");
1771 * A signal interrupted us. Make sure the ioctl is restartable.
1774 if (rep.ret == -EAGAIN)
1779 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1786 *Clean the unfenced list and put on regular LRU.
1787 *This is part of the memory manager cleanup and should only be
1788 *called with the DRI lock held.
1789 *Call dev->struct_sem locked.
1792 static void drm_bo_clean_unfenced(drm_device_t *dev)
1794 drm_buffer_manager_t *bm = &dev->bm;
1795 struct list_head *head, *list;
1796 drm_buffer_object_t *entry;
1798 head = &bm->unfenced;
1801 while(list != head) {
1802 prefetch(list->next);
1803 entry = list_entry(list, drm_buffer_object_t, lru);
1805 atomic_inc(&entry->usage);
1806 mutex_unlock(&dev->struct_mutex);
1807 mutex_lock(&entry->mutex);
1808 mutex_lock(&dev->struct_mutex);
1810 list_del(&entry->lru);
1811 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1812 drm_bo_add_to_lru(entry);
1813 mutex_unlock(&entry->mutex);
1818 static int drm_bo_leave_list(drm_buffer_object_t * bo,
1820 int free_pinned, int allow_errors)
1822 drm_device_t *dev = bo->dev;
1825 mutex_lock(&bo->mutex);
1827 ret = drm_bo_expire_fence(bo, allow_errors);
1832 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1833 mutex_lock(&dev->struct_mutex);
1834 list_del_init(&bo->pinned_lru);
1835 if (bo->pinned_node == bo->mem.mm_node)
1836 bo->pinned_node = NULL;
1837 if (bo->pinned_node != NULL) {
1838 drm_mm_put_block(bo->pinned_node);
1839 bo->pinned_node = NULL;
1841 mutex_unlock(&dev->struct_mutex);
1844 if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1845 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1846 "cleanup. Removing flag and evicting.\n");
1847 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1848 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1851 if (bo->mem.mem_type == mem_type)
1852 ret = drm_bo_evict(bo, mem_type, 0);
1859 DRM_ERROR("Cleanup eviction failed\n");
1864 mutex_unlock(&bo->mutex);
1869 static drm_buffer_object_t *drm_bo_entry(struct list_head *list,
1873 return list_entry(list, drm_buffer_object_t, pinned_lru);
1875 return list_entry(list, drm_buffer_object_t, lru);
1879 * dev->struct_mutex locked.
1882 static int drm_bo_force_list_clean(drm_device_t * dev,
1883 struct list_head *head,
1889 struct list_head *list, *next, *prev;
1890 drm_buffer_object_t *entry, *nentry;
1895 * The list traversal is a bit odd here, because an item may
1896 * disappear from the list when we release the struct_mutex or
1897 * when we decrease the usage count. Also we're not guaranteed
1898 * to drain pinned lists, so we can't always restart.
1903 list_for_each_safe(list, next, head) {
1906 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
1907 atomic_inc(&entry->usage);
1909 atomic_dec(&nentry->usage);
1914 * Protect the next item from destruction, so we can check
1915 * its list pointers later on.
1919 nentry = drm_bo_entry(next, pinned_list);
1920 atomic_inc(&nentry->usage);
1922 mutex_unlock(&dev->struct_mutex);
1924 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
1926 mutex_lock(&dev->struct_mutex);
1928 drm_bo_usage_deref_locked(entry);
1933 * Has the next item disappeared from the list?
1936 do_restart = ((next->prev != list) && (next->prev != prev));
1938 if (nentry != NULL && do_restart) {
1939 drm_bo_usage_deref_locked(nentry);
1949 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1951 drm_buffer_manager_t *bm = &dev->bm;
1952 drm_mem_type_manager_t *man = &bm->man[mem_type];
1955 if (mem_type >= DRM_BO_MEM_TYPES) {
1956 DRM_ERROR("Illegal memory type %d\n", mem_type);
1960 if (!man->has_type) {
1961 DRM_ERROR("Trying to take down uninitialized "
1962 "memory manager type\n");
1971 drm_bo_clean_unfenced(dev);
1972 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
1973 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
1975 if (drm_mm_clean(&man->manager)) {
1976 drm_mm_takedown(&man->manager);
1986 *Evict all buffers of a particular mem_type, but leave memory manager
1987 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
1988 *point since we have the hardware lock.
1991 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1994 drm_buffer_manager_t *bm = &dev->bm;
1995 drm_mem_type_manager_t *man = &bm->man[mem_type];
1997 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1998 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2002 if (!man->has_type) {
2003 DRM_ERROR("Memory type %u has not been initialized.\n",
2008 drm_bo_clean_unfenced(dev);
2009 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2012 ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2017 int drm_bo_init_mm(drm_device_t * dev,
2019 unsigned long p_offset, unsigned long p_size)
2021 drm_buffer_manager_t *bm = &dev->bm;
2023 drm_mem_type_manager_t *man;
2025 if (type >= DRM_BO_MEM_TYPES) {
2026 DRM_ERROR("Illegal memory type %d\n", type);
2030 man = &bm->man[type];
2031 if (man->has_type) {
2032 DRM_ERROR("Memory manager already initialized for type %d\n",
2037 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2042 if (type != DRM_BO_MEM_LOCAL) {
2044 DRM_ERROR("Zero size memory manager type %d\n", type);
2047 ret = drm_mm_init(&man->manager, p_offset, p_size);
2054 INIT_LIST_HEAD(&man->lru);
2055 INIT_LIST_HEAD(&man->pinned);
2059 EXPORT_SYMBOL(drm_bo_init_mm);
2062 * This is called from lastclose, so we don't need to bother about
2063 * any clients still running when we set the initialized flag to zero.
2066 int drm_bo_driver_finish(drm_device_t * dev)
2068 drm_buffer_manager_t *bm = &dev->bm;
2070 unsigned i = DRM_BO_MEM_TYPES;
2071 drm_mem_type_manager_t *man;
2073 mutex_lock(&dev->bm.init_mutex);
2074 mutex_lock(&dev->struct_mutex);
2076 if (!bm->initialized)
2078 bm->initialized = 0;
2082 if (man->has_type) {
2084 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2086 DRM_ERROR("DRM memory manager type %d "
2087 "is not clean.\n", i);
2092 mutex_unlock(&dev->struct_mutex);
2094 if (!cancel_delayed_work(&bm->wq)) {
2095 flush_scheduled_work();
2097 mutex_lock(&dev->struct_mutex);
2098 drm_bo_delayed_delete(dev, 1);
2099 if (list_empty(&bm->ddestroy)) {
2100 DRM_DEBUG("Delayed destroy list was clean\n");
2102 if (list_empty(&bm->man[0].lru)) {
2103 DRM_DEBUG("Swap list was clean\n");
2105 if (list_empty(&bm->man[0].pinned)) {
2106 DRM_DEBUG("NO_MOVE list was clean\n");
2108 if (list_empty(&bm->unfenced)) {
2109 DRM_DEBUG("Unfenced list was clean\n");
2112 mutex_unlock(&dev->struct_mutex);
2113 mutex_unlock(&dev->bm.init_mutex);
2117 int drm_bo_driver_init(drm_device_t * dev)
2119 drm_bo_driver_t *driver = dev->driver->bo_driver;
2120 drm_buffer_manager_t *bm = &dev->bm;
2123 mutex_lock(&dev->bm.init_mutex);
2124 mutex_lock(&dev->struct_mutex);
2129 * Initialize the system memory buffer type.
2130 * Other types need to be driver / IOCTL initialized.
2133 ret = drm_bo_init_mm(dev, 0, 0, 0);
2137 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2138 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2140 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2142 bm->initialized = 1;
2144 atomic_set(&bm->count, 0);
2146 INIT_LIST_HEAD(&bm->unfenced);
2147 INIT_LIST_HEAD(&bm->ddestroy);
2149 mutex_unlock(&dev->struct_mutex);
2150 mutex_unlock(&dev->bm.init_mutex);
2154 EXPORT_SYMBOL(drm_bo_driver_init);
2156 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
2161 drm_mm_init_arg_t arg;
2162 drm_buffer_manager_t *bm = &dev->bm;
2163 drm_bo_driver_t *driver = dev->driver->bo_driver;
2166 DRM_ERROR("Buffer objects are not supported by this driver\n");
2170 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2172 switch (arg.req.op) {
2175 mutex_lock(&dev->bm.init_mutex);
2176 mutex_lock(&dev->struct_mutex);
2177 if (!bm->initialized) {
2178 DRM_ERROR("DRM memory manager was not initialized.\n");
2181 if (arg.req.mem_type == 0) {
2183 ("System memory buffers already initialized.\n");
2186 ret = drm_bo_init_mm(dev, arg.req.mem_type,
2187 arg.req.p_offset, arg.req.p_size);
2190 LOCK_TEST_WITH_RETURN(dev, filp);
2191 mutex_lock(&dev->bm.init_mutex);
2192 mutex_lock(&dev->struct_mutex);
2194 if (!bm->initialized) {
2195 DRM_ERROR("DRM memory manager was not initialized\n");
2198 if (arg.req.mem_type == 0) {
2199 DRM_ERROR("No takedown for System memory buffers.\n");
2203 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
2204 DRM_ERROR("Memory manager type %d not clean. "
2205 "Delaying takedown\n", arg.req.mem_type);
2209 LOCK_TEST_WITH_RETURN(dev, filp);
2210 mutex_lock(&dev->bm.init_mutex);
2211 mutex_lock(&dev->struct_mutex);
2212 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
2215 LOCK_TEST_WITH_RETURN(dev, filp);
2216 mutex_lock(&dev->bm.init_mutex);
2217 mutex_lock(&dev->struct_mutex);
2221 DRM_ERROR("Function not implemented yet\n");
2225 mutex_unlock(&dev->struct_mutex);
2226 mutex_unlock(&dev->bm.init_mutex);
2230 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2235 * buffer object vm functions.
2238 int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem)
2240 drm_buffer_manager_t *bm = &dev->bm;
2241 drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2243 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2244 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2247 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2250 if (mem->flags & DRM_BO_FLAG_CACHED)
2256 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2259 * \c Get the PCI offset for the buffer object memory.
2261 * \param bo The buffer object.
2262 * \param bus_base On return the base of the PCI region
2263 * \param bus_offset On return the byte offset into the PCI region
2264 * \param bus_size On return the byte size of the buffer object or zero if
2265 * the buffer object memory is not accessible through a PCI region.
2266 * \return Failure indication.
2268 * Returns -EINVAL if the buffer object is currently not mappable.
2269 * Otherwise returns zero.
2272 int drm_bo_pci_offset(drm_device_t * dev,
2273 drm_bo_mem_reg_t * mem,
2274 unsigned long *bus_base,
2275 unsigned long *bus_offset, unsigned long *bus_size)
2277 drm_buffer_manager_t *bm = &dev->bm;
2278 drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2281 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2284 if (drm_mem_reg_is_pci(dev, mem)) {
2285 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2286 *bus_size = mem->num_pages << PAGE_SHIFT;
2287 *bus_base = man->io_offset;
2294 * \c Kill all user-space virtual mappings of this buffer object.
2296 * \param bo The buffer object.
2298 * Call bo->mutex locked.
2301 void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
2303 drm_device_t *dev = bo->dev;
2304 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2305 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2307 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2310 static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
2312 drm_map_list_t *list = &bo->map_list;
2313 drm_local_map_t *map;
2314 drm_device_t *dev = bo->dev;
2316 if (list->user_token) {
2317 drm_ht_remove_item(&dev->map_hash, &list->hash);
2318 list->user_token = 0;
2320 if (list->file_offset_node) {
2321 drm_mm_put_block(list->file_offset_node);
2322 list->file_offset_node = NULL;
2329 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2331 list->user_token = 0ULL;
2332 drm_bo_usage_deref_locked(bo);
2335 static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
2337 drm_map_list_t *list = &bo->map_list;
2338 drm_local_map_t *map;
2339 drm_device_t *dev = bo->dev;
2341 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2347 map->type = _DRM_TTM;
2348 map->flags = _DRM_REMOVABLE;
2349 map->size = bo->mem.num_pages * PAGE_SIZE;
2350 atomic_inc(&bo->usage);
2351 map->handle = (void *)bo;
2353 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2354 bo->mem.num_pages, 0, 0);
2356 if (!list->file_offset_node) {
2357 drm_bo_takedown_vm_locked(bo);
2361 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2362 bo->mem.num_pages, 0);
2364 list->hash.key = list->file_offset_node->start;
2365 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2366 drm_bo_takedown_vm_locked(bo);
2370 list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;