1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 **************************************************************************/
29 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
35 * Locking may look a bit complicated but isn't really:
37 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
38 * when there is a chance that it can be zero before or after the operation.
40 * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
43 * bo->mutex protects the buffer object itself excluding the usage field.
44 * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
45 * both the bo->mutex and the dev->struct_mutex.
47 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
48 * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
49 * traversal will, in general, need to be restarted.
55 static void drm_bo_destroy_locked(drm_buffer_object_t *bo);
56 static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo);
57 static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo);
58 static void drm_bo_unmap_virtual(drm_buffer_object_t *bo);
59 static int drm_bo_mem_space(drm_device_t *dev, drm_bo_mem_reg_t *mem,
62 #define DRM_FLAG_MASKED(_old, _new, _mask) {\
63 (_old) ^= (((_old) ^ (_new)) & (_mask)); \
66 static inline uint32_t drm_bo_type_flags(unsigned type)
68 return (1 << (24 + type));
72 * bo locked. dev->struct_mutex locked.
75 static void drm_bo_add_to_lru(drm_buffer_object_t * bo,
76 drm_buffer_manager_t * bm)
78 struct list_head *list;
79 drm_mem_type_manager_t *man;
83 switch(bo->mem.flags & DRM_BO_MASK_MEM) {
84 case DRM_BO_FLAG_MEM_TT:
85 bo->mem.mem_type = DRM_BO_MEM_TT;
87 case DRM_BO_FLAG_MEM_VRAM:
88 bo->mem.mem_type = DRM_BO_MEM_VRAM;
90 case DRM_BO_FLAG_MEM_LOCAL:
91 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
97 man = &bm->man[bo->mem.mem_type];
98 list = (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
99 &man->pinned : &man->lru;
100 list_add_tail(&bo->lru, list);
108 static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict,
111 drm_device_t *dev = bo->dev;
114 if (bo->mem.mm_node) {
115 #ifdef DRM_ODD_MM_COMPAT
116 mutex_lock(&dev->struct_mutex);
117 ret = drm_bo_lock_kmm(bo);
119 mutex_unlock(&dev->struct_mutex);
124 drm_bo_unmap_virtual(bo);
125 drm_bo_finish_unmap(bo);
126 drm_bo_unlock_kmm(bo);
128 drm_bo_unmap_virtual(bo);
129 mutex_lock(&dev->struct_mutex);
132 drm_ttm_evict(bo->ttm);
134 drm_ttm_unbind(bo->ttm);
136 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
137 if (!(bo->mem.flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) {
138 drm_mm_put_block(bo->mem.mm_node);
139 bo->mem.mm_node = NULL;
141 mutex_unlock(&dev->struct_mutex);
144 bo->mem.flags &= ~DRM_BO_FLAG_MEM_TT;
145 bo->mem.flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
152 * Call bo->mutex locked.
153 * Wait until the buffer is idle.
156 static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
160 drm_fence_object_t *fence = bo->fence;
164 drm_device_t *dev = bo->dev;
165 if (drm_fence_object_signaled(fence, bo->fence_type)) {
166 drm_fence_usage_deref_unlocked(dev, fence);
174 drm_fence_object_wait(dev, fence, lazy, ignore_signals,
179 drm_fence_usage_deref_unlocked(dev, fence);
187 * Call dev->struct_mutex locked.
188 * Attempts to remove all private references to a buffer by expiring its
189 * fence object and removing from lru lists and memory managers.
193 static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all)
195 drm_device_t *dev = bo->dev;
196 drm_buffer_manager_t *bm = &dev->bm;
198 atomic_inc(&bo->usage);
199 mutex_unlock(&dev->struct_mutex);
200 mutex_lock(&bo->mutex);
202 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
204 if (bo->fence && drm_fence_object_signaled(bo->fence,
206 drm_fence_usage_deref_locked(dev, bo->fence);
210 if (bo->fence && remove_all) {
212 unsigned long _end = jiffies + 3 * DRM_HZ;
215 ret = drm_bo_wait(bo, 0, 1, 0);
216 } while (ret && !time_after_eq(jiffies, _end));
220 DRM_ERROR("Detected GPU lockup or "
221 "fence driver was taken down. "
222 "Evicting waiting buffers.\n");
225 drm_fence_usage_deref_unlocked(dev, bo->fence);
230 mutex_lock(&dev->struct_mutex);
232 if (!atomic_dec_and_test(&bo->usage)) {
237 list_del_init(&bo->lru);
238 if (bo->mem.mm_node) {
239 drm_mm_put_block(bo->mem.mm_node);
240 bo->mem.mm_node = NULL;
242 list_del_init(&bo->ddestroy);
243 mutex_unlock(&bo->mutex);
244 drm_bo_destroy_locked(bo);
248 if (list_empty(&bo->ddestroy)) {
249 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
250 list_add_tail(&bo->ddestroy, &bm->ddestroy);
251 schedule_delayed_work(&bm->wq,
253 1) ? 1 : DRM_HZ / 100);
257 mutex_unlock(&bo->mutex);
263 * Verify that refcount is 0 and that there are no internal references
264 * to the buffer object. Then destroy it.
267 static void drm_bo_destroy_locked(drm_buffer_object_t *bo)
269 drm_device_t *dev = bo->dev;
270 drm_buffer_manager_t *bm = &dev->bm;
272 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && atomic_read(&bo->usage) == 0) {
273 BUG_ON(bo->fence != NULL);
275 #ifdef DRM_ODD_MM_COMPAT
276 BUG_ON(!list_empty(&bo->vma_list));
277 BUG_ON(!list_empty(&bo->p_mm_list));
281 drm_ttm_unbind(bo->ttm);
282 drm_destroy_ttm(bo->ttm);
285 atomic_dec(&bm->count);
287 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
293 * Some stuff is still trying to reference the buffer object.
294 * Get rid of those references.
297 drm_bo_cleanup_refs(bo, 0);
304 * Call dev->struct_mutex locked.
307 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
309 drm_buffer_manager_t *bm = &dev->bm;
311 drm_buffer_object_t *entry, *nentry;
312 struct list_head *list, *next;
314 list_for_each_safe(list, next, &bm->ddestroy) {
315 entry = list_entry(list, drm_buffer_object_t, ddestroy);
318 if (next != &bm->ddestroy) {
319 nentry = list_entry(next, drm_buffer_object_t,
321 atomic_inc(&nentry->usage);
324 drm_bo_cleanup_refs(entry, remove_all);
327 atomic_dec(&nentry->usage);
333 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
334 static void drm_bo_delayed_workqueue(void *data)
336 static void drm_bo_delayed_workqueue(struct work_struct *work)
339 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
340 drm_device_t *dev = (drm_device_t *) data;
341 drm_buffer_manager_t *bm = &dev->bm;
343 drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work);
344 drm_device_t *dev = container_of(bm, drm_device_t, bm);
348 DRM_DEBUG("Delayed delete Worker\n");
350 mutex_lock(&dev->struct_mutex);
351 if (!bm->initialized) {
352 mutex_unlock(&dev->struct_mutex);
355 drm_bo_delayed_delete(dev, 0);
356 if (bm->initialized && !list_empty(&bm->ddestroy)) {
357 schedule_delayed_work(&bm->wq,
358 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
360 mutex_unlock(&dev->struct_mutex);
363 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
365 if (atomic_dec_and_test(&bo->usage)) {
366 drm_bo_destroy_locked(bo);
370 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
372 drm_buffer_object_t *bo =
373 drm_user_object_entry(uo, drm_buffer_object_t, base);
375 drm_bo_takedown_vm_locked(bo);
376 drm_bo_usage_deref_locked(bo);
379 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
381 drm_device_t *dev = bo->dev;
383 if (atomic_dec_and_test(&bo->usage)) {
384 mutex_lock(&dev->struct_mutex);
385 if (atomic_read(&bo->usage) == 0)
386 drm_bo_destroy_locked(bo);
387 mutex_unlock(&dev->struct_mutex);
392 * Note. The caller has to register (if applicable)
393 * and deregister fence object usage.
396 int drm_fence_buffer_objects(drm_file_t * priv,
397 struct list_head *list,
398 uint32_t fence_flags,
399 drm_fence_object_t * fence,
400 drm_fence_object_t ** used_fence)
402 drm_device_t *dev = priv->head->dev;
403 drm_buffer_manager_t *bm = &dev->bm;
405 drm_buffer_object_t *entry;
406 uint32_t fence_type = 0;
412 mutex_lock(&dev->struct_mutex);
415 list = &bm->unfenced;
417 list_for_each_entry(entry, list, lru) {
418 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
419 fence_type |= entry->fence_type;
420 if (entry->fence_class != 0) {
421 DRM_ERROR("Fence class %d is not implemented yet.\n",
435 * Transfer to a local list before we release the dev->struct_mutex;
436 * This is so we don't get any new unfenced objects while fencing
437 * the ones we already have..
440 list_splice_init(list, &f_list);
443 if ((fence_type & fence->type) != fence_type) {
444 DRM_ERROR("Given fence doesn't match buffers "
445 "on unfenced list.\n");
450 mutex_unlock(&dev->struct_mutex);
451 ret = drm_fence_object_create(dev, fence_type,
452 fence_flags | DRM_FENCE_FLAG_EMIT,
454 mutex_lock(&dev->struct_mutex);
461 while (l != &f_list) {
462 entry = list_entry(l, drm_buffer_object_t, lru);
463 atomic_inc(&entry->usage);
464 mutex_unlock(&dev->struct_mutex);
465 mutex_lock(&entry->mutex);
466 mutex_lock(&dev->struct_mutex);
468 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
471 drm_fence_usage_deref_locked(dev, entry->fence);
472 entry->fence = fence;
473 DRM_FLAG_MASKED(entry->priv_flags, 0,
474 _DRM_BO_FLAG_UNFENCED);
475 DRM_WAKEUP(&entry->event_queue);
476 drm_bo_add_to_lru(entry, bm);
478 mutex_unlock(&entry->mutex);
479 drm_bo_usage_deref_locked(entry);
482 atomic_add(count, &fence->usage);
483 DRM_DEBUG("Fenced %d buffers\n", count);
485 mutex_unlock(&dev->struct_mutex);
490 EXPORT_SYMBOL(drm_fence_buffer_objects);
496 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
497 int no_wait, int force_no_move)
500 drm_device_t *dev = bo->dev;
501 drm_buffer_manager_t *bm = &dev->bm;
502 drm_bo_mem_reg_t evict_mem;
505 * Someone might have modified the buffer before we took the buffer mutex.
508 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
510 if (!(bo->mem.flags & drm_bo_type_flags(mem_type)))
513 ret = drm_bo_wait(bo, 0, 0, no_wait);
515 if (ret && ret != -EAGAIN) {
516 DRM_ERROR("Failed to expire fence before "
517 "buffer eviction.\n");
522 evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type);
524 ret = drm_bo_mem_space(dev, &evict_mem, no_wait);
526 if (ret && ret != -EAGAIN) {
527 DRM_ERROR("Failed to find memory space for "
528 "buffer eviction.\n");
532 if ((mem_type != DRM_BO_MEM_TT) &&
533 (evict_mem.mem_type != DRM_BO_MEM_LOCAL)) {
535 DRM_ERROR("Unsupported memory types for eviction.\n");
539 ret = drm_move_tt_to_local(bo, 1, force_no_move);
542 mutex_lock(&dev->struct_mutex);
543 list_del_init(&bo->lru);
544 drm_bo_add_to_lru(bo, bm);
545 mutex_unlock(&dev->struct_mutex);
550 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
551 _DRM_BO_FLAG_EVICTED);
558 static int drm_bo_mem_force_space(drm_device_t *dev,
559 drm_bo_mem_reg_t *mem,
564 drm_buffer_manager_t *bm = &dev->bm;
565 drm_buffer_object_t *entry;
566 drm_mem_type_manager_t *man = &bm->man[mem_type];
567 struct list_head *lru;
568 unsigned long num_pages = mem->num_pages;
571 mutex_lock(&dev->struct_mutex);
573 node = drm_mm_search_free(&man->manager, num_pages,
574 mem->page_alignment, 1);
579 if (lru->next == lru)
582 entry = list_entry(lru->next, drm_buffer_object_t, lru);
583 atomic_inc(&entry->usage);
584 mutex_unlock(&dev->struct_mutex);
585 mutex_lock(&entry->mutex);
586 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
588 ret = drm_bo_evict(entry, mem_type, no_wait, 0);
589 mutex_unlock(&entry->mutex);
590 drm_bo_usage_deref_unlocked(entry);
593 mutex_lock(&dev->struct_mutex);
597 mutex_unlock(&dev->struct_mutex);
601 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
602 mutex_unlock(&dev->struct_mutex);
604 mem->mem_type = mem_type;
605 mem->flags = drm_bo_type_flags(mem_type);
610 static int drm_bo_mem_space(drm_device_t *dev,
611 drm_bo_mem_reg_t *mem,
614 drm_buffer_manager_t *bm= &dev->bm;
615 drm_mem_type_manager_t *man;
617 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
618 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
620 uint32_t mem_type = DRM_BO_MEM_LOCAL;
624 drm_mm_node_t *node = NULL;
627 for (i=0; i<num_prios; ++i) {
629 type_ok = drm_bo_type_flags(mem_type) & mem->mask ;
633 if (mem_type == DRM_BO_MEM_LOCAL)
636 man = &bm->man[mem_type];
637 mutex_lock(&dev->struct_mutex);
638 if (man->has_type && man->use_type) {
640 node = drm_mm_search_free(&man->manager, mem->num_pages,
641 mem->page_alignment, 1);
643 node = drm_mm_get_block(node, mem->num_pages,
644 mem->page_alignment);
646 mutex_unlock(&dev->struct_mutex);
651 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
653 mem->mem_type = mem_type;
654 mem->flags = drm_bo_type_flags(mem_type);
659 DRM_ERROR("Requested memory types are not supported\n");
663 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
664 prios = dev->driver->bo_driver->mem_busy_prio;
666 for (i=0; i<num_prios; ++i) {
668 if (!(drm_bo_type_flags(mem_type) & mem->mask))
671 man = &bm->man[mem_type];
672 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
681 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
688 static int drm_move_local_to_tt(drm_buffer_object_t * bo,
689 drm_bo_mem_reg_t * mem,
692 drm_device_t *dev = bo->dev;
695 bo->mem.mm_node = mem->mm_node;
697 DRM_DEBUG("Flipping in to AGP 0x%08lx 0x%08lx\n",
698 bo->mem.mm_node->start, bo->mem.mm_node->size);
700 #ifdef DRM_ODD_MM_COMPAT
701 mutex_lock(&dev->struct_mutex);
702 ret = drm_bo_lock_kmm(bo);
704 mutex_unlock(&dev->struct_mutex);
708 drm_bo_unmap_virtual(bo);
709 ret = drm_bind_ttm(bo->ttm, bo->mem.flags & DRM_BO_FLAG_BIND_CACHED,
710 bo->mem.mm_node->start);
713 #ifdef DRM_ODD_MM_COMPAT
714 drm_bo_unlock_kmm(bo);
715 mutex_unlock(&dev->struct_mutex);
720 if (!(bo->mem.flags & DRM_BO_FLAG_BIND_CACHED))
721 bo->mem.flags &= DRM_BO_FLAG_CACHED;
722 bo->mem.flags &= ~DRM_BO_MASK_MEM;
723 bo->mem.flags |= DRM_BO_FLAG_MEM_TT;
724 bo->mem.mem_type = DRM_BO_MEM_TT;
725 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
727 #ifdef DRM_ODD_MM_COMPAT
728 ret = drm_bo_remap_bound(bo);
732 drm_bo_unlock_kmm(bo);
733 mutex_unlock(&dev->struct_mutex);
736 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
737 ret = dev->driver->bo_driver->invalidate_caches(dev, bo->mem.flags);
739 DRM_ERROR("Could not flush read caches\n");
741 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED);
746 mutex_lock(&dev->struct_mutex);
747 drm_mm_put_block(bo->mem.mm_node);
748 bo->mem.mm_node = NULL;
749 mutex_unlock(&dev->struct_mutex);
753 static int drm_bo_new_flags(drm_device_t * dev,
754 uint32_t flags, uint32_t new_mask, uint32_t hint,
755 int init, uint32_t * n_flags, uint32_t * n_mask)
757 uint32_t new_flags = 0;
759 drm_buffer_manager_t *bm = &dev->bm;
763 * First adjust the mask to take away nonexistant memory types.
766 for (i = 0; i < DRM_BO_MEM_TYPES; ++i) {
767 if (!bm->man[i].use_type)
768 new_mask &= ~drm_bo_type_flags(i);
771 if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
773 ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
777 if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
778 if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
779 !(bm->man[DRM_BO_MEM_TT].flags &
780 _DRM_FLAG_MEMTYPE_CACHED) &&
781 ((new_mask & DRM_BO_FLAG_MEM_VRAM)
782 && !(bm->man[DRM_BO_MEM_VRAM].flags &
783 _DRM_FLAG_MEMTYPE_CACHED)))) {
784 new_mask &= ~DRM_BO_FLAG_BIND_CACHED;
786 if (!(bm->man[DRM_BO_MEM_TT].flags &
787 _DRM_FLAG_MEMTYPE_CACHED))
788 new_flags &= DRM_BO_FLAG_MEM_TT;
789 if (!(bm->man[DRM_BO_MEM_VRAM].flags &
790 _DRM_FLAG_MEMTYPE_CACHED))
791 new_flags &= DRM_BO_FLAG_MEM_VRAM;
795 if ((new_mask & DRM_BO_FLAG_READ_CACHED) &&
796 !(new_mask & DRM_BO_FLAG_BIND_CACHED)) {
797 if ((new_mask & DRM_BO_FLAG_NO_EVICT) &&
798 !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) {
800 ("Cannot read cached from a pinned VRAM / TT buffer\n");
806 * Determine new memory location:
809 if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) {
811 new_flags = new_mask & DRM_BO_MASK_MEM;
814 DRM_ERROR("Invalid buffer object memory flags\n");
818 if (new_flags & DRM_BO_FLAG_MEM_LOCAL) {
819 if ((hint & DRM_BO_HINT_AVOID_LOCAL) &&
820 new_flags & (DRM_BO_FLAG_MEM_VRAM |
821 DRM_BO_FLAG_MEM_TT)) {
822 new_flags &= ~DRM_BO_FLAG_MEM_LOCAL;
824 new_flags = DRM_BO_FLAG_MEM_LOCAL;
827 if (new_flags & DRM_BO_FLAG_MEM_TT) {
828 if ((new_mask & DRM_BO_FLAG_PREFER_VRAM) &&
829 new_flags & DRM_BO_FLAG_MEM_VRAM) {
830 new_flags = DRM_BO_FLAG_MEM_VRAM;
832 new_flags = DRM_BO_FLAG_MEM_TT;
836 new_flags = flags & DRM_BO_MASK_MEM;
839 new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
843 DRM_ERROR("Invalid buffer object rwx properties\n");
847 new_flags |= new_mask & ~DRM_BO_MASK_MEM;
849 if (((flags ^ new_flags) & DRM_BO_FLAG_BIND_CACHED) &&
850 (new_flags & DRM_BO_FLAG_NO_EVICT) &&
851 (flags & (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM))) {
852 if (!(flags & DRM_BO_FLAG_CACHED)) {
854 ("Cannot change caching policy of pinned buffer\n");
857 new_flags &= ~DRM_BO_FLAG_CACHED;
861 *n_flags = new_flags;
867 * Call dev->struct_mutex locked.
870 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
871 uint32_t handle, int check_owner)
873 drm_user_object_t *uo;
874 drm_buffer_object_t *bo;
876 uo = drm_lookup_user_object(priv, handle);
878 if (!uo || (uo->type != drm_buffer_type)) {
879 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
883 if (check_owner && priv != uo->owner) {
884 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
888 bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
889 atomic_inc(&bo->usage);
894 * Call bo->mutex locked.
895 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
896 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
899 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
901 drm_fence_object_t *fence = bo->fence;
903 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
905 drm_device_t *dev = bo->dev;
906 if (drm_fence_object_signaled(fence, bo->fence_type)) {
907 drm_fence_usage_deref_unlocked(dev, fence);
917 * Call bo->mutex locked.
918 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
921 static int drm_bo_busy(drm_buffer_object_t * bo)
923 drm_fence_object_t *fence = bo->fence;
925 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
927 drm_device_t *dev = bo->dev;
928 if (drm_fence_object_signaled(fence, bo->fence_type)) {
929 drm_fence_usage_deref_unlocked(dev, fence);
933 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
934 if (drm_fence_object_signaled(fence, bo->fence_type)) {
935 drm_fence_usage_deref_unlocked(dev, fence);
944 static int drm_bo_read_cached(drm_buffer_object_t * bo)
948 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
950 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
955 * Wait until a buffer is unmapped.
958 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
962 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
965 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
966 atomic_read(&bo->mapped) == -1);
974 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
978 mutex_lock(&bo->mutex);
979 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
980 mutex_unlock(&bo->mutex);
985 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
986 * Until then, we cannot really do anything with it except delete it.
987 * The unfenced list is a PITA, and the operations
989 * 2) submitting commands
991 * Should really be an atomic operation.
992 * We now "solve" this problem by keeping
993 * the buffer "unfenced" after validating, but before fencing.
996 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
999 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1000 unsigned long _end = jiffies + 3 * DRM_HZ;
1008 mutex_unlock(&bo->mutex);
1009 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1010 !drm_bo_check_unfenced(bo));
1011 mutex_lock(&bo->mutex);
1016 ("Error waiting for buffer to become fenced\n");
1019 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1020 } while (ret && !time_after_eq(jiffies, _end));
1022 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1032 * Fill in the ioctl reply argument with buffer info.
1036 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
1037 drm_bo_arg_reply_t * rep)
1039 rep->handle = bo->base.hash.key;
1040 rep->flags = bo->mem.flags;
1041 rep->size = bo->mem.num_pages * PAGE_SIZE;
1042 rep->offset = bo->offset;
1043 rep->arg_handle = bo->map_list.user_token;
1044 rep->mask = bo->mem.mask;
1045 rep->buffer_start = bo->buffer_start;
1046 rep->fence_flags = bo->fence_type;
1048 rep->page_alignment = bo->mem.page_alignment;
1050 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1051 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1057 * Wait for buffer idle and register that we've mapped the buffer.
1058 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1059 * so that if the client dies, the mapping is automatically
1063 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1064 uint32_t map_flags, unsigned hint,
1065 drm_bo_arg_reply_t * rep)
1067 drm_buffer_object_t *bo;
1068 drm_device_t *dev = priv->head->dev;
1070 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1072 mutex_lock(&dev->struct_mutex);
1073 bo = drm_lookup_buffer_object(priv, handle, 1);
1074 mutex_unlock(&dev->struct_mutex);
1079 mutex_lock(&bo->mutex);
1080 if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1081 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1087 * If this returns true, we are currently unmapped.
1088 * We need to do this test, because unmapping can
1089 * be done without the bo->mutex held.
1093 if (atomic_inc_and_test(&bo->mapped)) {
1094 if (no_wait && drm_bo_busy(bo)) {
1095 atomic_dec(&bo->mapped);
1099 ret = drm_bo_wait(bo, 0, 0, no_wait);
1101 atomic_dec(&bo->mapped);
1105 if ((map_flags & DRM_BO_FLAG_READ) &&
1106 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1107 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1108 drm_bo_read_cached(bo);
1111 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1112 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1113 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1116 * We are already mapped with different flags.
1117 * need to wait for unmap.
1120 ret = drm_bo_wait_unmapped(bo, no_wait);
1129 mutex_lock(&dev->struct_mutex);
1130 ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1131 mutex_unlock(&dev->struct_mutex);
1133 if (atomic_add_negative(-1, &bo->mapped))
1134 DRM_WAKEUP(&bo->event_queue);
1137 drm_bo_fill_rep_arg(bo, rep);
1139 mutex_unlock(&bo->mutex);
1140 drm_bo_usage_deref_unlocked(bo);
1144 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1146 drm_device_t *dev = priv->head->dev;
1147 drm_buffer_object_t *bo;
1148 drm_ref_object_t *ro;
1151 mutex_lock(&dev->struct_mutex);
1153 bo = drm_lookup_buffer_object(priv, handle, 1);
1159 ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1165 drm_remove_ref_object(priv, ro);
1166 drm_bo_usage_deref_locked(bo);
1168 mutex_unlock(&dev->struct_mutex);
1173 * Call struct-sem locked.
1176 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1177 drm_user_object_t * uo,
1180 drm_buffer_object_t *bo =
1181 drm_user_object_entry(uo, drm_buffer_object_t, base);
1184 * We DON'T want to take the bo->lock here, because we want to
1185 * hold it when we wait for unmapped buffer.
1188 BUG_ON(action != _DRM_REF_TYPE1);
1190 if (atomic_add_negative(-1, &bo->mapped))
1191 DRM_WAKEUP(&bo->event_queue);
1198 static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1199 int no_wait, int force_no_move)
1201 drm_device_t *dev = bo->dev;
1202 drm_buffer_manager_t *bm = &dev->bm;
1204 drm_bo_mem_reg_t mem;
1207 * Flush outstanding fences.
1213 * Make sure we're not mapped.
1216 ret = drm_bo_wait_unmapped(bo, no_wait);
1221 * Wait for outstanding fences.
1224 ret = drm_bo_wait(bo, 0, 0, no_wait);
1229 mem.num_pages = bo->mem.num_pages;
1230 mem.size = mem.num_pages << PAGE_SHIFT;
1231 mem.mask = new_mem_flags;
1232 mem.page_alignment = bo->mem.page_alignment;
1234 mutex_lock(&bm->evict_mutex);
1235 mutex_lock(&dev->struct_mutex);
1237 list_add_tail(&bo->lru,&bm->unfenced);
1238 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1239 _DRM_BO_FLAG_UNFENCED);
1240 mutex_unlock(&dev->struct_mutex);
1243 * Determine where to move the buffer.
1246 ret = drm_bo_mem_space(dev, &mem, no_wait);
1247 mutex_unlock(&bm->evict_mutex);
1252 if (mem.mem_type == DRM_BO_MEM_TT) {
1253 ret = drm_move_local_to_tt(bo, &mem, no_wait);
1255 mutex_lock(&dev->struct_mutex);
1256 list_del_init(&bo->lru);
1257 drm_bo_add_to_lru(bo, bm);
1258 mutex_unlock(&dev->struct_mutex);
1259 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1263 drm_move_tt_to_local(bo, 0, force_no_move);
1272 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1274 int move_unfenced, int no_wait)
1276 drm_device_t *dev = bo->dev;
1277 drm_buffer_manager_t *bm = &dev->bm;
1278 uint32_t flag_diff = (new_flags ^ bo->mem.flags);
1279 drm_bo_driver_t *driver = dev->driver->bo_driver;
1283 if (new_flags & DRM_BO_FLAG_MEM_VRAM) {
1284 DRM_ERROR("Vram support not implemented yet\n");
1288 DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->mem.flags);
1289 ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type);
1291 DRM_ERROR("Driver did not support given buffer permissions\n");
1296 * Move out if we need to change caching policy.
1299 if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
1300 !(bo->mem.flags & DRM_BO_FLAG_MEM_LOCAL)) {
1301 if (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1302 DRM_ERROR("Cannot change caching policy of "
1303 "pinned buffer.\n");
1306 ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0);
1309 DRM_ERROR("Failed moving buffer.\n");
1313 DRM_MASK_VAL(bo->mem.flags, DRM_BO_FLAG_BIND_CACHED, new_flags);
1314 flag_diff = (new_flags ^ bo->mem.flags);
1317 * Check whether we dropped no_move policy, and in that case,
1318 * release reserved manager regions.
1321 if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
1322 !(new_flags & DRM_BO_FLAG_NO_MOVE)) {
1323 mutex_lock(&dev->struct_mutex);
1324 if (bo->mem.mm_node) {
1325 drm_mm_put_block(bo->mem.mm_node);
1326 bo->mem.mm_node = NULL;
1328 mutex_unlock(&dev->struct_mutex);
1332 * Check whether we need to move buffer.
1335 if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) {
1336 ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1);
1339 DRM_ERROR("Failed moving buffer.\n");
1344 if (move_unfenced) {
1347 * Place on unfenced list.
1350 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1351 _DRM_BO_FLAG_UNFENCED);
1352 mutex_lock(&dev->struct_mutex);
1354 list_add_tail(&bo->lru, &bm->unfenced);
1355 mutex_unlock(&dev->struct_mutex);
1357 DRM_FLAG_MASKED(bo->priv_flags, 0,
1358 _DRM_BO_FLAG_UNFENCED);
1359 mutex_lock(&dev->struct_mutex);
1360 list_del_init(&bo->lru);
1361 drm_bo_add_to_lru(bo, bm);
1362 mutex_unlock(&dev->struct_mutex);
1365 bo->mem.flags = new_flags;
1369 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1370 uint32_t flags, uint32_t mask, uint32_t hint,
1371 drm_bo_arg_reply_t * rep)
1373 drm_buffer_object_t *bo;
1374 drm_device_t *dev = priv->head->dev;
1376 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1379 bo = drm_lookup_buffer_object(priv, handle, 1);
1384 mutex_lock(&bo->mutex);
1385 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1390 ret = drm_bo_new_flags(dev, bo->mem.flags,
1391 (flags & mask) | (bo->mem.mask & ~mask), hint,
1392 0, &new_flags, &bo->mem.mask);
1398 drm_buffer_object_validate(bo, new_flags,
1399 !(hint & DRM_BO_HINT_DONT_FENCE),
1401 drm_bo_fill_rep_arg(bo, rep);
1405 mutex_unlock(&bo->mutex);
1406 drm_bo_usage_deref_unlocked(bo);
1410 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1411 drm_bo_arg_reply_t * rep)
1413 drm_buffer_object_t *bo;
1415 bo = drm_lookup_buffer_object(priv, handle, 1);
1419 mutex_lock(&bo->mutex);
1420 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1421 (void)drm_bo_busy(bo);
1422 drm_bo_fill_rep_arg(bo, rep);
1423 mutex_unlock(&bo->mutex);
1424 drm_bo_usage_deref_unlocked(bo);
1428 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1429 uint32_t hint, drm_bo_arg_reply_t * rep)
1431 drm_buffer_object_t *bo;
1432 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1435 bo = drm_lookup_buffer_object(priv, handle, 1);
1440 mutex_lock(&bo->mutex);
1441 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1444 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1448 drm_bo_fill_rep_arg(bo, rep);
1451 mutex_unlock(&bo->mutex);
1452 drm_bo_usage_deref_unlocked(bo);
1457 * Call bo->mutex locked.
1460 static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo)
1462 drm_device_t *dev = bo->dev;
1466 bo->map_list.user_token = 0ULL;
1469 case drm_bo_type_dc:
1470 mutex_lock(&dev->struct_mutex);
1471 ret = drm_bo_setup_vm_locked(bo);
1472 mutex_unlock(&dev->struct_mutex);
1475 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
1479 case drm_bo_type_user:
1480 case drm_bo_type_fake:
1483 DRM_ERROR("Illegal buffer object type\n");
1492 * Transfer a buffer object's memory and LRU status to a newly
1493 * created object. User-space references remains with the old
1494 * object. Call bo->mutex locked.
1497 int drm_buffer_object_transfer(drm_buffer_object_t *bo,
1498 drm_buffer_object_t **new_obj)
1500 drm_buffer_object_t *fbo;
1501 drm_device_t *dev = bo->dev;
1502 drm_buffer_manager_t *bm = &dev->bm;
1504 fbo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1509 mutex_init(&fbo->mutex);
1510 mutex_lock(&fbo->mutex);
1511 mutex_lock(&dev->struct_mutex);
1513 INIT_LIST_HEAD(&fbo->ddestroy);
1514 INIT_LIST_HEAD(&fbo->lru);
1515 list_splice_init(&bo->lru, &fbo->lru);
1517 bo->mem.mm_node = NULL;
1522 fbo->mem.mm_node->private = (void *)fbo;
1523 atomic_set(&fbo->usage, 1);
1524 atomic_inc(&bm->count);
1525 mutex_unlock(&dev->struct_mutex);
1526 mutex_unlock(&fbo->mutex);
1533 int drm_buffer_object_create(drm_file_t * priv,
1538 uint32_t page_alignment,
1539 unsigned long buffer_start,
1540 drm_buffer_object_t ** buf_obj)
1542 drm_device_t *dev = priv->head->dev;
1543 drm_buffer_manager_t *bm = &dev->bm;
1544 drm_buffer_object_t *bo;
1547 unsigned long num_pages;
1549 if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1550 DRM_ERROR("Invalid buffer object start.\n");
1553 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1554 if (num_pages == 0) {
1555 DRM_ERROR("Illegal buffer object size.\n");
1559 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1564 mutex_init(&bo->mutex);
1565 mutex_lock(&bo->mutex);
1567 atomic_set(&bo->usage, 1);
1568 atomic_set(&bo->mapped, -1);
1569 DRM_INIT_WAITQUEUE(&bo->event_queue);
1570 INIT_LIST_HEAD(&bo->lru);
1571 INIT_LIST_HEAD(&bo->ddestroy);
1572 #ifdef DRM_ODD_MM_COMPAT
1573 INIT_LIST_HEAD(&bo->p_mm_list);
1574 INIT_LIST_HEAD(&bo->vma_list);
1578 bo->mem.num_pages = num_pages;
1579 bo->mem.mm_node = NULL;
1580 bo->mem.page_alignment = page_alignment;
1581 if (bo->type == drm_bo_type_fake) {
1582 bo->offset = buffer_start;
1583 bo->buffer_start = 0;
1585 bo->buffer_start = buffer_start;
1588 bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
1589 atomic_inc(&bm->count);
1590 ret = drm_bo_new_flags(dev, bo->mem.flags, mask, hint,
1591 1, &new_flags, &bo->mem.mask);
1594 ret = drm_bo_add_ttm(priv, bo);
1598 ret = drm_buffer_object_validate(bo, new_flags, 0,
1599 hint & DRM_BO_HINT_DONT_BLOCK);
1603 mutex_unlock(&bo->mutex);
1608 mutex_unlock(&bo->mutex);
1609 drm_bo_usage_deref_unlocked(bo);
1613 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1616 drm_device_t *dev = priv->head->dev;
1619 mutex_lock(&dev->struct_mutex);
1620 ret = drm_add_user_object(priv, &bo->base, shareable);
1624 bo->base.remove = drm_bo_base_deref_locked;
1625 bo->base.type = drm_buffer_type;
1626 bo->base.ref_struct_locked = NULL;
1627 bo->base.unref = drm_buffer_user_object_unmap;
1630 mutex_unlock(&dev->struct_mutex);
1634 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1636 LOCK_TEST_WITH_RETURN(dev, filp);
1640 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1644 drm_bo_arg_request_t *req = &arg.d.req;
1645 drm_bo_arg_reply_t rep;
1647 drm_user_object_t *uo;
1648 drm_buffer_object_t *entry;
1650 if (!dev->bm.initialized) {
1651 DRM_ERROR("Buffer object manager is not initialized.\n");
1656 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1667 drm_buffer_object_create(priv, req->size,
1671 req->page_alignment,
1672 req->buffer_start, &entry);
1677 drm_bo_add_user_object(priv, entry,
1680 DRM_BO_FLAG_SHAREABLE);
1682 drm_bo_usage_deref_unlocked(entry);
1687 mutex_lock(&entry->mutex);
1688 drm_bo_fill_rep_arg(entry, &rep);
1689 mutex_unlock(&entry->mutex);
1692 rep.ret = drm_buffer_object_unmap(priv, req->handle);
1695 rep.ret = drm_buffer_object_map(priv, req->handle,
1699 case drm_bo_destroy:
1700 mutex_lock(&dev->struct_mutex);
1701 uo = drm_lookup_user_object(priv, req->handle);
1702 if (!uo || (uo->type != drm_buffer_type)
1703 || uo->owner != priv) {
1704 mutex_unlock(&dev->struct_mutex);
1708 rep.ret = drm_remove_user_object(priv, uo);
1709 mutex_unlock(&dev->struct_mutex);
1711 case drm_bo_reference:
1712 rep.ret = drm_user_object_ref(priv, req->handle,
1713 drm_buffer_type, &uo);
1716 mutex_lock(&dev->struct_mutex);
1717 uo = drm_lookup_user_object(priv, req->handle);
1719 drm_user_object_entry(uo, drm_buffer_object_t,
1721 atomic_dec(&entry->usage);
1722 mutex_unlock(&dev->struct_mutex);
1723 mutex_lock(&entry->mutex);
1724 drm_bo_fill_rep_arg(entry, &rep);
1725 mutex_unlock(&entry->mutex);
1727 case drm_bo_unreference:
1728 rep.ret = drm_user_object_unref(priv, req->handle,
1731 case drm_bo_validate:
1732 rep.ret = drm_bo_lock_test(dev, filp);
1737 drm_bo_handle_validate(priv, req->handle, req->mask,
1738 req->arg_handle, req->hint,
1742 rep.ret = drm_bo_lock_test(dev, filp);
1747 rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1749 case drm_bo_wait_idle:
1750 rep.ret = drm_bo_handle_wait(priv, req->handle,
1753 case drm_bo_ref_fence:
1755 DRM_ERROR("Function is not implemented yet.\n");
1762 * A signal interrupted us. Make sure the ioctl is restartable.
1765 if (rep.ret == -EAGAIN)
1770 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1777 * dev->struct_sem locked.
1780 static int drm_bo_force_list_clean(drm_device_t * dev,
1781 struct list_head *head,
1783 int force_no_move, int allow_errors)
1785 drm_buffer_manager_t *bm = &dev->bm;
1786 struct list_head *list, *next, *prev;
1787 drm_buffer_object_t *entry;
1793 list_for_each_safe(list, next, head) {
1795 entry = list_entry(list, drm_buffer_object_t, lru);
1796 atomic_inc(&entry->usage);
1797 mutex_unlock(&dev->struct_mutex);
1798 mutex_lock(&entry->mutex);
1799 mutex_lock(&dev->struct_mutex);
1801 if (prev != list->prev || next != list->next) {
1802 mutex_unlock(&entry->mutex);
1803 drm_bo_usage_deref_locked(entry);
1806 if (entry->mem.mm_node) {
1813 mutex_unlock(&dev->struct_mutex);
1814 if (entry->fence && bm->nice_mode) {
1815 unsigned long _end = jiffies + 3 * DRM_HZ;
1817 ret = drm_bo_wait(entry, 0, 1, 0);
1818 if (ret && allow_errors) {
1823 } while (ret && !time_after_eq(jiffies, _end));
1827 DRM_ERROR("Detected GPU hang or "
1828 "fence manager was taken down. "
1829 "Evicting waiting buffers\n");
1833 drm_fence_usage_deref_unlocked(dev,
1835 entry->fence = NULL;
1838 DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED,
1841 if (force_no_move) {
1842 DRM_MASK_VAL(entry->mem.flags, DRM_BO_FLAG_NO_MOVE,
1845 if (entry->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1846 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1847 "cleanup. Removing flag and evicting.\n");
1848 entry->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1849 entry->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1852 ret = drm_bo_evict(entry, mem_type, 1, force_no_move);
1857 DRM_ERROR("Aargh. Eviction failed.\n");
1860 mutex_lock(&dev->struct_mutex);
1862 mutex_unlock(&entry->mutex);
1863 drm_bo_usage_deref_locked(entry);
1864 if (prev != list->prev || next != list->next) {
1872 mutex_unlock(&entry->mutex);
1873 drm_bo_usage_deref_unlocked(entry);
1874 mutex_lock(&dev->struct_mutex);
1878 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1880 drm_buffer_manager_t *bm = &dev->bm;
1881 drm_mem_type_manager_t *man = &bm->man[mem_type];
1882 drm_mem_type_manager_t *local_man = &bm->man[DRM_BO_MEM_LOCAL];
1885 if (mem_type >= DRM_BO_MEM_TYPES) {
1886 DRM_ERROR("Illegal memory type %d\n", mem_type);
1890 if (!man->has_type) {
1891 DRM_ERROR("Trying to take down uninitialized "
1892 "memory manager type\n");
1902 * Throw out unfenced buffers.
1905 drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0);
1908 * Throw out evicted no-move buffers.
1911 drm_bo_force_list_clean(dev, &local_man->pinned, mem_type, 1, 0);
1912 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0);
1913 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0);
1915 if (drm_mm_clean(&man->manager)) {
1916 drm_mm_takedown(&man->manager);
1925 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1928 drm_buffer_manager_t *bm = &dev->bm;
1929 drm_mem_type_manager_t *man = &bm->man[mem_type];
1931 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1932 DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
1936 ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
1939 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1);
1943 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1);
1947 static int drm_bo_init_mm(drm_device_t * dev,
1949 unsigned long p_offset, unsigned long p_size)
1951 drm_buffer_manager_t *bm = &dev->bm;
1953 drm_mem_type_manager_t *man;
1955 if (type >= DRM_BO_MEM_TYPES) {
1956 DRM_ERROR("Illegal memory type %d\n", type);
1960 man = &bm->man[type];
1961 if (man->has_type) {
1962 DRM_ERROR("Memory manager already initialized for type %d\n",
1967 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
1972 if (type != DRM_BO_MEM_LOCAL) {
1974 DRM_ERROR("Zero size memory manager type %d\n", type);
1977 ret = drm_mm_init(&man->manager, p_offset, p_size);
1984 INIT_LIST_HEAD(&man->lru);
1985 INIT_LIST_HEAD(&man->pinned);
1991 * This is called from lastclose, so we don't need to bother about
1992 * any clients still running when we set the initialized flag to zero.
1995 int drm_bo_driver_finish(drm_device_t * dev)
1997 drm_buffer_manager_t *bm = &dev->bm;
1999 unsigned i = DRM_BO_MEM_TYPES;
2000 drm_mem_type_manager_t *man;
2002 mutex_lock(&dev->bm.init_mutex);
2003 mutex_lock(&dev->struct_mutex);
2005 if (!bm->initialized)
2007 bm->initialized = 0;
2011 if (man->has_type) {
2013 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2015 DRM_ERROR("DRM memory manager type %d "
2016 "is not clean.\n", i);
2021 mutex_unlock(&dev->struct_mutex);
2022 if (!cancel_delayed_work(&bm->wq)) {
2023 flush_scheduled_work();
2025 mutex_lock(&dev->struct_mutex);
2026 drm_bo_delayed_delete(dev, 1);
2027 if (list_empty(&bm->ddestroy)) {
2028 DRM_DEBUG("Delayed destroy list was clean\n");
2030 if (list_empty(&bm->man[0].lru)) {
2031 DRM_DEBUG("Swap list was clean\n");
2033 if (list_empty(&bm->man[0].pinned)) {
2034 DRM_DEBUG("NO_MOVE list was clean\n");
2036 if (list_empty(&bm->unfenced)) {
2037 DRM_DEBUG("Unfenced list was clean\n");
2040 mutex_unlock(&dev->struct_mutex);
2041 mutex_unlock(&dev->bm.init_mutex);
2045 int drm_bo_driver_init(drm_device_t * dev)
2047 drm_bo_driver_t *driver = dev->driver->bo_driver;
2048 drm_buffer_manager_t *bm = &dev->bm;
2051 mutex_lock(&dev->bm.init_mutex);
2052 mutex_lock(&dev->struct_mutex);
2057 * Initialize the system memory buffer type.
2058 * Other types need to be driver / IOCTL initialized.
2061 ret = drm_bo_init_mm(dev, 0, 0, 0);
2065 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2066 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2068 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2070 bm->initialized = 1;
2072 atomic_set(&bm->count, 0);
2074 INIT_LIST_HEAD(&bm->unfenced);
2075 INIT_LIST_HEAD(&bm->ddestroy);
2077 mutex_unlock(&dev->struct_mutex);
2078 mutex_unlock(&dev->bm.init_mutex);
2082 EXPORT_SYMBOL(drm_bo_driver_init);
2084 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
2089 drm_mm_init_arg_t arg;
2090 drm_buffer_manager_t *bm = &dev->bm;
2091 drm_bo_driver_t *driver = dev->driver->bo_driver;
2094 DRM_ERROR("Buffer objects are not supported by this driver\n");
2098 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
2100 switch (arg.req.op) {
2103 mutex_lock(&dev->bm.init_mutex);
2104 mutex_lock(&dev->struct_mutex);
2105 if (!bm->initialized) {
2106 DRM_ERROR("DRM memory manager was not initialized.\n");
2109 if (arg.req.mem_type == 0) {
2111 ("System memory buffers already initialized.\n");
2114 ret = drm_bo_init_mm(dev, arg.req.mem_type,
2115 arg.req.p_offset, arg.req.p_size);
2118 LOCK_TEST_WITH_RETURN(dev, filp);
2119 mutex_lock(&dev->bm.init_mutex);
2120 mutex_lock(&dev->struct_mutex);
2122 if (!bm->initialized) {
2123 DRM_ERROR("DRM memory manager was not initialized\n");
2126 if (arg.req.mem_type == 0) {
2127 DRM_ERROR("No takedown for System memory buffers.\n");
2131 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
2132 DRM_ERROR("Memory manager type %d not clean. "
2133 "Delaying takedown\n", arg.req.mem_type);
2137 LOCK_TEST_WITH_RETURN(dev, filp);
2138 mutex_lock(&dev->bm.init_mutex);
2139 mutex_lock(&dev->struct_mutex);
2140 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
2143 LOCK_TEST_WITH_RETURN(dev, filp);
2144 mutex_lock(&dev->bm.init_mutex);
2145 mutex_lock(&dev->struct_mutex);
2149 DRM_ERROR("Function not implemented yet\n");
2153 mutex_unlock(&dev->struct_mutex);
2154 mutex_unlock(&dev->bm.init_mutex);
2158 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2163 * buffer object vm functions.
2167 * \c Get the PCI offset for the buffer object memory.
2169 * \param bo The buffer object.
2170 * \param bus_base On return the base of the PCI region
2171 * \param bus_offset On return the byte offset into the PCI region
2172 * \param bus_size On return the byte size of the buffer object or zero if
2173 * the buffer object memory is not accessible through a PCI region.
2174 * \return Failure indication.
2176 * Returns -EINVAL if the buffer object is currently not mappable.
2177 * Otherwise returns zero. Call bo->mutex locked.
2180 int drm_bo_pci_offset(const drm_buffer_object_t *bo,
2181 unsigned long *bus_base,
2182 unsigned long *bus_offset,
2183 unsigned long *bus_size)
2185 drm_device_t *dev = bo->dev;
2186 drm_buffer_manager_t *bm = &dev->bm;
2187 drm_mem_type_manager_t *man = &bm->man[bo->mem.mem_type];
2190 if (bo->type != drm_bo_type_dc)
2193 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2196 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2197 drm_ttm_t *ttm = bo->ttm;
2203 drm_ttm_fixup_caching(ttm);
2205 if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
2207 if (ttm->be->flags & DRM_BE_FLAG_CMA)
2209 *bus_base = ttm->be->aperture_base;
2211 *bus_base = man->io_offset;
2214 *bus_offset = bo->mem.mm_node->start << PAGE_SHIFT;
2215 *bus_size = bo->mem.num_pages << PAGE_SHIFT;
2220 * \c Return a kernel virtual address to the buffer object PCI memory.
2222 * \param bo The buffer object.
2223 * \return Failure indication.
2225 * Returns -EINVAL if the buffer object is currently not mappable.
2226 * Returns -ENOMEM if the ioremap operation failed.
2227 * Otherwise returns zero.
2229 * After a successfull call, bo->iomap contains the virtual address, or NULL
2230 * if the buffer object content is not accessible through PCI space.
2231 * Call bo->mutex locked.
2234 int drm_bo_ioremap(drm_buffer_object_t *bo)
2236 drm_device_t *dev = bo->dev;
2237 drm_buffer_manager_t *bm = &dev->bm;
2238 drm_mem_type_manager_t *man = &bm->man[bo->mem.mem_type];
2239 unsigned long bus_offset;
2240 unsigned long bus_size;
2241 unsigned long bus_base;
2246 ret = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size);
2247 if (ret || bus_size == 0)
2250 if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
2251 bo->iomap = (void *) (((u8 *)man->io_addr) + bus_offset);
2253 bo->iomap = ioremap_nocache(bus_base + bus_offset, bus_size);
2262 * \c Unmap mapping obtained using drm_bo_ioremap
2264 * \param bo The buffer object.
2266 * Call bo->mutex locked.
2269 void drm_bo_iounmap(drm_buffer_object_t *bo)
2271 drm_device_t *dev = bo->dev;
2272 drm_buffer_manager_t *bm;
2273 drm_mem_type_manager_t *man;
2277 man = &bm->man[bo->mem.mem_type];
2279 if (bo->iomap && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
2286 * \c Kill all user-space virtual mappings of this buffer object.
2288 * \param bo The buffer object.
2290 * Call bo->mutex locked.
2293 void drm_bo_unmap_virtual(drm_buffer_object_t *bo)
2295 drm_device_t *dev = bo->dev;
2296 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2297 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2299 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2302 static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo)
2304 drm_map_list_t *list = &bo->map_list;
2305 drm_local_map_t *map;
2306 drm_device_t *dev = bo->dev;
2308 if (list->user_token) {
2309 drm_ht_remove_item(&dev->map_hash, &list->hash);
2310 list->user_token = 0;
2312 if (list->file_offset_node) {
2313 drm_mm_put_block(list->file_offset_node);
2314 list->file_offset_node = NULL;
2321 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2323 list->user_token = 0ULL;
2324 drm_bo_usage_deref_locked(bo);
2327 static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo)
2329 drm_map_list_t *list = &bo->map_list;
2330 drm_local_map_t *map;
2331 drm_device_t *dev = bo->dev;
2333 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2339 map->type = _DRM_TTM;
2340 map->flags = _DRM_REMOVABLE;
2341 map->size = bo->mem.num_pages * PAGE_SIZE;
2342 atomic_inc(&bo->usage);
2343 map->handle = (void *) bo;
2345 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2346 bo->mem.num_pages, 0, 0);
2348 if (!list->file_offset_node) {
2349 drm_bo_takedown_vm_locked(bo);
2353 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2354 bo->mem.num_pages, 0);
2356 list->hash.key = list->file_offset_node->start;
2357 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2358 drm_bo_takedown_vm_locked(bo);
2362 list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;