1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44 * both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47 * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48 * traversal will, in general, need to be restarted.
54 static void drm_bo_destroy_locked(drm_buffer_object_t *bo);
55 static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo);
56 static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo);
57 static void drm_bo_unmap_virtual(drm_buffer_object_t *bo);
59 static inline uint32_t drm_bo_type_flags(unsigned type)
61 return (1 << (24 + type));
65 * bo locked. dev->struct_mutex locked.
68 void drm_bo_add_to_lru(drm_buffer_object_t * bo,
69 drm_buffer_manager_t * bm)
71 struct list_head *list;
72 drm_mem_type_manager_t *man;
74 man = &bm->man[bo->mem.mem_type];
75 list = (bo->mem.flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
76 &man->pinned : &man->lru;
77 list_add_tail(&bo->lru, list);
81 static int drm_bo_vm_pre_move(drm_buffer_object_t *bo,
84 #ifdef DRM_ODD_MM_COMPAT
87 ret = drm_bo_lock_kmm(bo);
93 drm_bo_unmap_virtual(bo);
95 drm_bo_finish_unmap(bo);
97 drm_bo_unmap_virtual(bo);
102 static void drm_bo_vm_post_move(drm_buffer_object_t *bo)
104 #ifdef DRM_ODD_MM_COMPAT
107 ret = drm_bo_remap_bound(bo);
109 DRM_ERROR("Failed to remap a bound buffer object.\n"
110 "\tThis might cause a sigbus later.\n");
112 drm_bo_unlock_kmm(bo);
117 * Call bo->mutex locked.
120 static int drm_bo_add_ttm(drm_buffer_object_t * bo)
122 drm_device_t *dev = bo->dev;
128 bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
132 case drm_bo_type_user:
133 case drm_bo_type_fake:
136 DRM_ERROR("Illegal buffer object type\n");
145 static int drm_bo_handle_move_mem(drm_buffer_object_t *bo,
146 drm_bo_mem_reg_t *mem,
150 drm_device_t *dev = bo->dev;
151 drm_buffer_manager_t *bm = &dev->bm;
152 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
153 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
154 drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
155 drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
159 if (old_is_pci || new_is_pci)
160 ret = drm_bo_vm_pre_move(bo, old_is_pci);
164 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
166 ret = drm_bo_add_ttm(bo);
170 if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
171 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
172 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
173 } else if (dev->driver->bo_driver->move) {
174 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
176 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
179 if (old_is_pci || new_is_pci)
180 drm_bo_vm_post_move(bo);
185 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
186 ret = dev->driver->bo_driver->invalidate_caches(dev, bo->mem.flags);
188 DRM_ERROR("Can not flush read caches\n");
191 DRM_FLAG_MASKED(bo->priv_flags,
192 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
193 _DRM_BO_FLAG_EVICTED);
196 bo->offset = bo->mem.mm_node->start << PAGE_SHIFT;
202 * Call bo->mutex locked.
203 * Wait until the buffer is idle.
206 int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
210 drm_fence_object_t *fence = bo->fence;
214 drm_device_t *dev = bo->dev;
215 if (drm_fence_object_signaled(fence, bo->fence_type)) {
216 drm_fence_usage_deref_unlocked(dev, fence);
224 drm_fence_object_wait(dev, fence, lazy, ignore_signals,
229 drm_fence_usage_deref_unlocked(dev, fence);
237 * Call dev->struct_mutex locked.
238 * Attempts to remove all private references to a buffer by expiring its
239 * fence object and removing from lru lists and memory managers.
243 static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all)
245 drm_device_t *dev = bo->dev;
246 drm_buffer_manager_t *bm = &dev->bm;
248 atomic_inc(&bo->usage);
249 mutex_unlock(&dev->struct_mutex);
250 mutex_lock(&bo->mutex);
252 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
254 if (bo->fence && drm_fence_object_signaled(bo->fence,
256 drm_fence_usage_deref_locked(dev, bo->fence);
260 if (bo->fence && remove_all) {
262 unsigned long _end = jiffies + 3 * DRM_HZ;
265 ret = drm_bo_wait(bo, 0, 1, 0);
266 } while (ret && !time_after_eq(jiffies, _end));
270 DRM_ERROR("Detected GPU lockup or "
271 "fence driver was taken down. "
272 "Evicting waiting buffers.\n");
275 drm_fence_usage_deref_unlocked(dev, bo->fence);
280 mutex_lock(&dev->struct_mutex);
282 if (!atomic_dec_and_test(&bo->usage)) {
287 list_del_init(&bo->lru);
288 if (bo->mem.mm_node) {
289 drm_mm_put_block(bo->mem.mm_node);
290 bo->mem.mm_node = NULL;
292 list_del_init(&bo->ddestroy);
293 mutex_unlock(&bo->mutex);
294 drm_bo_destroy_locked(bo);
298 if (list_empty(&bo->ddestroy)) {
299 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
300 list_add_tail(&bo->ddestroy, &bm->ddestroy);
301 schedule_delayed_work(&bm->wq,
303 1) ? 1 : DRM_HZ / 100);
307 mutex_unlock(&bo->mutex);
313 * Verify that refcount is 0 and that there are no internal references
314 * to the buffer object. Then destroy it.
317 static void drm_bo_destroy_locked(drm_buffer_object_t *bo)
319 drm_device_t *dev = bo->dev;
320 drm_buffer_manager_t *bm = &dev->bm;
322 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL && atomic_read(&bo->usage) == 0) {
323 BUG_ON(bo->fence != NULL);
325 #ifdef DRM_ODD_MM_COMPAT
326 BUG_ON(!list_empty(&bo->vma_list));
327 BUG_ON(!list_empty(&bo->p_mm_list));
331 drm_ttm_unbind(bo->ttm);
332 drm_destroy_ttm(bo->ttm);
336 atomic_dec(&bm->count);
338 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
344 * Some stuff is still trying to reference the buffer object.
345 * Get rid of those references.
348 drm_bo_cleanup_refs(bo, 0);
355 * Call dev->struct_mutex locked.
358 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
360 drm_buffer_manager_t *bm = &dev->bm;
362 drm_buffer_object_t *entry, *nentry;
363 struct list_head *list, *next;
365 list_for_each_safe(list, next, &bm->ddestroy) {
366 entry = list_entry(list, drm_buffer_object_t, ddestroy);
369 if (next != &bm->ddestroy) {
370 nentry = list_entry(next, drm_buffer_object_t,
372 atomic_inc(&nentry->usage);
375 drm_bo_cleanup_refs(entry, remove_all);
378 atomic_dec(&nentry->usage);
384 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
385 static void drm_bo_delayed_workqueue(void *data)
387 static void drm_bo_delayed_workqueue(struct work_struct *work)
390 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
391 drm_device_t *dev = (drm_device_t *) data;
392 drm_buffer_manager_t *bm = &dev->bm;
394 drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work);
395 drm_device_t *dev = container_of(bm, drm_device_t, bm);
399 DRM_DEBUG("Delayed delete Worker\n");
401 mutex_lock(&dev->struct_mutex);
402 if (!bm->initialized) {
403 mutex_unlock(&dev->struct_mutex);
406 drm_bo_delayed_delete(dev, 0);
407 if (bm->initialized && !list_empty(&bm->ddestroy)) {
408 schedule_delayed_work(&bm->wq,
409 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
411 mutex_unlock(&dev->struct_mutex);
414 void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
416 if (atomic_dec_and_test(&bo->usage)) {
417 drm_bo_destroy_locked(bo);
421 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
423 drm_buffer_object_t *bo =
424 drm_user_object_entry(uo, drm_buffer_object_t, base);
426 drm_bo_takedown_vm_locked(bo);
427 drm_bo_usage_deref_locked(bo);
430 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
432 drm_device_t *dev = bo->dev;
434 if (atomic_dec_and_test(&bo->usage)) {
435 mutex_lock(&dev->struct_mutex);
436 if (atomic_read(&bo->usage) == 0)
437 drm_bo_destroy_locked(bo);
438 mutex_unlock(&dev->struct_mutex);
443 * Note. The caller has to register (if applicable)
444 * and deregister fence object usage.
447 int drm_fence_buffer_objects(drm_file_t * priv,
448 struct list_head *list,
449 uint32_t fence_flags,
450 drm_fence_object_t * fence,
451 drm_fence_object_t ** used_fence)
453 drm_device_t *dev = priv->head->dev;
454 drm_buffer_manager_t *bm = &dev->bm;
456 drm_buffer_object_t *entry;
457 uint32_t fence_type = 0;
463 mutex_lock(&dev->struct_mutex);
466 list = &bm->unfenced;
468 list_for_each_entry(entry, list, lru) {
469 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
470 fence_type |= entry->fence_type;
471 if (entry->fence_class != 0) {
472 DRM_ERROR("Fence class %d is not implemented yet.\n",
486 * Transfer to a local list before we release the dev->struct_mutex;
487 * This is so we don't get any new unfenced objects while fencing
488 * the ones we already have..
491 list_splice_init(list, &f_list);
494 if ((fence_type & fence->type) != fence_type) {
495 DRM_ERROR("Given fence doesn't match buffers "
496 "on unfenced list.\n");
501 mutex_unlock(&dev->struct_mutex);
502 ret = drm_fence_object_create(dev, fence_type,
503 fence_flags | DRM_FENCE_FLAG_EMIT,
505 mutex_lock(&dev->struct_mutex);
512 while (l != &f_list) {
513 entry = list_entry(l, drm_buffer_object_t, lru);
514 atomic_inc(&entry->usage);
515 mutex_unlock(&dev->struct_mutex);
516 mutex_lock(&entry->mutex);
517 mutex_lock(&dev->struct_mutex);
519 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
522 drm_fence_usage_deref_locked(dev, entry->fence);
523 entry->fence = fence;
524 DRM_FLAG_MASKED(entry->priv_flags, 0,
525 _DRM_BO_FLAG_UNFENCED);
526 DRM_WAKEUP(&entry->event_queue);
527 drm_bo_add_to_lru(entry, bm);
529 mutex_unlock(&entry->mutex);
530 drm_bo_usage_deref_locked(entry);
533 atomic_add(count, &fence->usage);
534 DRM_DEBUG("Fenced %d buffers\n", count);
536 mutex_unlock(&dev->struct_mutex);
541 EXPORT_SYMBOL(drm_fence_buffer_objects);
547 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
548 int no_wait, int force_no_move)
551 drm_device_t *dev = bo->dev;
552 drm_buffer_manager_t *bm = &dev->bm;
553 drm_bo_mem_reg_t evict_mem;
556 * Someone might have modified the buffer before we took the buffer mutex.
559 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
561 if (bo->mem.mem_type != mem_type)
564 ret = drm_bo_wait(bo, 0, 0, no_wait);
566 if (ret && ret != -EAGAIN) {
567 DRM_ERROR("Failed to expire fence before "
568 "buffer eviction.\n");
572 if (bo->type != drm_bo_type_dc)
576 evict_mem.mask = dev->driver->bo_driver->evict_flags(dev, mem_type);
577 ret = drm_bo_mem_space(dev, &evict_mem, no_wait);
581 DRM_ERROR("Failed to find memory space for "
582 "buffer eviction.\n");
586 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
590 DRM_ERROR("Buffer eviction failed\n");
595 mutex_lock(&dev->struct_mutex);
596 if (evict_mem.mm_node) {
597 drm_mm_put_block(evict_mem.mm_node);
598 evict_mem.mm_node = NULL;
601 drm_bo_add_to_lru(bo, bm);
602 mutex_unlock(&dev->struct_mutex);
604 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
605 _DRM_BO_FLAG_EVICTED);
613 static int drm_bo_mem_force_space(drm_device_t *dev,
614 drm_bo_mem_reg_t *mem,
619 drm_buffer_manager_t *bm = &dev->bm;
620 drm_buffer_object_t *entry;
621 drm_mem_type_manager_t *man = &bm->man[mem_type];
622 struct list_head *lru;
623 unsigned long num_pages = mem->num_pages;
626 mutex_lock(&dev->struct_mutex);
628 node = drm_mm_search_free(&man->manager, num_pages,
629 mem->page_alignment, 1);
634 if (lru->next == lru)
637 entry = list_entry(lru->next, drm_buffer_object_t, lru);
638 atomic_inc(&entry->usage);
639 mutex_unlock(&dev->struct_mutex);
640 mutex_lock(&entry->mutex);
641 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
643 ret = drm_bo_evict(entry, mem_type, no_wait, 0);
644 mutex_unlock(&entry->mutex);
645 drm_bo_usage_deref_unlocked(entry);
648 mutex_lock(&dev->struct_mutex);
652 mutex_unlock(&dev->struct_mutex);
656 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
657 mutex_unlock(&dev->struct_mutex);
659 mem->mem_type = mem_type;
664 static int drm_bo_mt_compatible(drm_mem_type_manager_t *man,
669 uint32_t cur_flags = drm_bo_type_flags(mem_type);
672 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
673 cur_flags |= DRM_BO_FLAG_CACHED;
674 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
675 cur_flags |= DRM_BO_FLAG_MAPPABLE;
676 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
677 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
679 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0) {
682 flag_diff = (mask ^ cur_flags);
683 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
684 (mask & DRM_BO_FLAG_FORCE_CACHING)) {
687 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
688 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) {
692 *res_mask = cur_flags;
697 int drm_bo_mem_space(drm_device_t *dev,
698 drm_bo_mem_reg_t *mem,
701 drm_buffer_manager_t *bm= &dev->bm;
702 drm_mem_type_manager_t *man;
704 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
705 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
707 uint32_t mem_type = DRM_BO_MEM_LOCAL;
712 drm_mm_node_t *node = NULL;
715 for (i=0; i<num_prios; ++i) {
717 man = &bm->man[mem_type];
719 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
725 if (mem_type == DRM_BO_MEM_LOCAL)
728 mutex_lock(&dev->struct_mutex);
729 if (man->has_type && man->use_type) {
731 node = drm_mm_search_free(&man->manager, mem->num_pages,
732 mem->page_alignment, 1);
734 node = drm_mm_get_block(node, mem->num_pages,
735 mem->page_alignment);
737 mutex_unlock(&dev->struct_mutex);
742 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
744 mem->mem_type = mem_type;
745 mem->flags = cur_flags;
752 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
753 prios = dev->driver->bo_driver->mem_busy_prio;
755 for (i=0; i<num_prios; ++i) {
757 man = &bm->man[mem_type];
759 if (!drm_bo_mt_compatible(man, mem_type, mem->mask,
763 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
766 mem->flags = cur_flags;
774 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
777 EXPORT_SYMBOL(drm_bo_mem_space);
780 static int drm_bo_new_mask(drm_buffer_object_t *bo,
781 uint32_t new_mask, uint32_t hint)
785 if (bo->type == drm_bo_type_user) {
786 DRM_ERROR("User buffers are not supported yet\n");
789 if (bo->type == drm_bo_type_fake &&
790 !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
791 DRM_ERROR("Fake buffers must be pinned.\n");
795 if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
797 ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
802 new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
806 DRM_ERROR("Invalid buffer object rwx properties\n");
811 * FIXME: Check what can be done about pinned buffers here.
814 bo->mem.mask = new_mask;
819 * Call dev->struct_mutex locked.
822 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
823 uint32_t handle, int check_owner)
825 drm_user_object_t *uo;
826 drm_buffer_object_t *bo;
828 uo = drm_lookup_user_object(priv, handle);
830 if (!uo || (uo->type != drm_buffer_type)) {
831 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
835 if (check_owner && priv != uo->owner) {
836 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
840 bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
841 atomic_inc(&bo->usage);
846 * Call bo->mutex locked.
847 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
848 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
851 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
853 drm_fence_object_t *fence = bo->fence;
855 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
857 drm_device_t *dev = bo->dev;
858 if (drm_fence_object_signaled(fence, bo->fence_type)) {
859 drm_fence_usage_deref_unlocked(dev, fence);
869 * Call bo->mutex locked.
870 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
873 static int drm_bo_busy(drm_buffer_object_t * bo)
875 drm_fence_object_t *fence = bo->fence;
877 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
879 drm_device_t *dev = bo->dev;
880 if (drm_fence_object_signaled(fence, bo->fence_type)) {
881 drm_fence_usage_deref_unlocked(dev, fence);
885 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
886 if (drm_fence_object_signaled(fence, bo->fence_type)) {
887 drm_fence_usage_deref_unlocked(dev, fence);
896 static int drm_bo_read_cached(drm_buffer_object_t * bo)
900 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
902 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
907 * Wait until a buffer is unmapped.
910 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
914 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
917 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
918 atomic_read(&bo->mapped) == -1);
926 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
930 mutex_lock(&bo->mutex);
931 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
932 mutex_unlock(&bo->mutex);
937 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
938 * Until then, we cannot really do anything with it except delete it.
939 * The unfenced list is a PITA, and the operations
941 * 2) submitting commands
943 * Should really be an atomic operation.
944 * We now "solve" this problem by keeping
945 * the buffer "unfenced" after validating, but before fencing.
948 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
951 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
952 unsigned long _end = jiffies + 3 * DRM_HZ;
960 mutex_unlock(&bo->mutex);
961 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
962 !drm_bo_check_unfenced(bo));
963 mutex_lock(&bo->mutex);
968 ("Error waiting for buffer to become fenced\n");
971 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
972 } while (ret && !time_after_eq(jiffies, _end));
974 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
984 * Fill in the ioctl reply argument with buffer info.
988 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
989 drm_bo_arg_reply_t * rep)
991 rep->handle = bo->base.hash.key;
992 rep->flags = bo->mem.flags;
993 rep->size = bo->mem.num_pages * PAGE_SIZE;
994 rep->offset = bo->offset;
995 rep->arg_handle = bo->map_list.user_token;
996 rep->mask = bo->mem.mask;
997 rep->buffer_start = bo->buffer_start;
998 rep->fence_flags = bo->fence_type;
1000 rep->page_alignment = bo->mem.page_alignment;
1002 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1003 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1009 * Wait for buffer idle and register that we've mapped the buffer.
1010 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1011 * so that if the client dies, the mapping is automatically
1015 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
1016 uint32_t map_flags, unsigned hint,
1017 drm_bo_arg_reply_t * rep)
1019 drm_buffer_object_t *bo;
1020 drm_device_t *dev = priv->head->dev;
1022 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1024 mutex_lock(&dev->struct_mutex);
1025 bo = drm_lookup_buffer_object(priv, handle, 1);
1026 mutex_unlock(&dev->struct_mutex);
1031 mutex_lock(&bo->mutex);
1032 if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1033 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1039 * If this returns true, we are currently unmapped.
1040 * We need to do this test, because unmapping can
1041 * be done without the bo->mutex held.
1045 if (atomic_inc_and_test(&bo->mapped)) {
1046 if (no_wait && drm_bo_busy(bo)) {
1047 atomic_dec(&bo->mapped);
1051 ret = drm_bo_wait(bo, 0, 0, no_wait);
1053 atomic_dec(&bo->mapped);
1057 if ((map_flags & DRM_BO_FLAG_READ) &&
1058 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1059 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1060 drm_bo_read_cached(bo);
1063 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1064 (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1065 (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1068 * We are already mapped with different flags.
1069 * need to wait for unmap.
1072 ret = drm_bo_wait_unmapped(bo, no_wait);
1081 mutex_lock(&dev->struct_mutex);
1082 ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1083 mutex_unlock(&dev->struct_mutex);
1085 if (atomic_add_negative(-1, &bo->mapped))
1086 DRM_WAKEUP(&bo->event_queue);
1089 drm_bo_fill_rep_arg(bo, rep);
1091 mutex_unlock(&bo->mutex);
1092 drm_bo_usage_deref_unlocked(bo);
1096 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1098 drm_device_t *dev = priv->head->dev;
1099 drm_buffer_object_t *bo;
1100 drm_ref_object_t *ro;
1103 mutex_lock(&dev->struct_mutex);
1105 bo = drm_lookup_buffer_object(priv, handle, 1);
1111 ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1117 drm_remove_ref_object(priv, ro);
1118 drm_bo_usage_deref_locked(bo);
1120 mutex_unlock(&dev->struct_mutex);
1125 * Call struct-sem locked.
1128 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1129 drm_user_object_t * uo,
1132 drm_buffer_object_t *bo =
1133 drm_user_object_entry(uo, drm_buffer_object_t, base);
1136 * We DON'T want to take the bo->lock here, because we want to
1137 * hold it when we wait for unmapped buffer.
1140 BUG_ON(action != _DRM_REF_TYPE1);
1142 if (atomic_add_negative(-1, &bo->mapped))
1143 DRM_WAKEUP(&bo->event_queue);
1150 static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
1151 int no_wait, int force_no_move, int move_unfenced)
1153 drm_device_t *dev = bo->dev;
1154 drm_buffer_manager_t *bm = &dev->bm;
1156 drm_bo_mem_reg_t mem;
1158 * Flush outstanding fences.
1164 * Wait for outstanding fences.
1167 ret = drm_bo_wait(bo, 0, 0, no_wait);
1172 mem.num_pages = bo->mem.num_pages;
1173 mem.size = mem.num_pages << PAGE_SHIFT;
1174 mem.mask = new_mem_flags;
1175 mem.page_alignment = bo->mem.page_alignment;
1177 mutex_lock(&bm->evict_mutex);
1178 mutex_lock(&dev->struct_mutex);
1180 list_add_tail(&bo->lru, &bm->unfenced);
1181 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1182 _DRM_BO_FLAG_UNFENCED);
1183 mutex_unlock(&dev->struct_mutex);
1186 * Determine where to move the buffer.
1188 ret = drm_bo_mem_space(dev, &mem, no_wait);
1193 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1196 if (ret || !move_unfenced) {
1197 mutex_lock(&dev->struct_mutex);
1199 drm_mm_put_block(mem.mm_node);
1202 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1204 drm_bo_add_to_lru(bo, bm);
1205 mutex_unlock(&dev->struct_mutex);
1208 mutex_unlock(&bm->evict_mutex);
1213 static int drm_bo_mem_compat(drm_bo_mem_reg_t *mem)
1216 flag_diff = (mem->mask ^ mem->flags);
1218 if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1220 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1221 (mem->mask & DRM_BO_FLAG_FORCE_CACHING))
1223 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1224 (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE))
1229 static int drm_bo_check_fake(drm_device_t *dev, drm_bo_mem_reg_t *mem)
1231 drm_buffer_manager_t *bm = &dev->bm;
1232 drm_mem_type_manager_t *man;
1233 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1234 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1237 uint32_t mem_type = 0;
1240 if (drm_bo_mem_compat(mem))
1243 BUG_ON(mem->mm_node);
1245 for (i=0; i<num_prios; ++i) {
1246 mem_type = prios[i];
1247 man = &bm->man[mem_type];
1248 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1255 mem->mm_node = NULL;
1256 mem->mem_type = mem_type;
1257 mem->flags = cur_flags;
1258 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1262 DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask);
1270 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1271 int move_unfenced, int no_wait)
1273 drm_device_t *dev = bo->dev;
1274 drm_buffer_manager_t *bm = &dev->bm;
1275 uint32_t flag_diff = (bo->mem.mask ^ bo->mem.flags);
1276 drm_bo_driver_t *driver = dev->driver->bo_driver;
1280 DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask,
1282 ret = driver->fence_type(bo->mem.mask, &bo->fence_class, &bo->fence_type);
1284 DRM_ERROR("Driver did not support given buffer permissions\n");
1288 ret = drm_bo_wait_unmapped(bo, no_wait);
1292 if (bo->type == drm_bo_type_fake) {
1293 ret = drm_bo_check_fake(dev, &bo->mem);
1299 * Check whether we dropped no_move policy, and in that case,
1300 * release reserved manager regions, if we're evicted.
1303 if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
1304 !(bo->mem.mask & DRM_BO_FLAG_NO_MOVE)) {
1309 * Check whether we need to move buffer.
1312 if (!drm_bo_mem_compat(&bo->mem)) {
1313 ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE,
1314 no_wait, 1, move_unfenced);
1317 DRM_ERROR("Failed moving buffer.\n");
1323 * We might need to add a TTM.
1326 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1327 ret = drm_bo_add_ttm(bo);
1332 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1337 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1338 uint32_t flags, uint32_t mask, uint32_t hint,
1339 drm_bo_arg_reply_t * rep)
1341 drm_buffer_object_t *bo;
1343 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1345 bo = drm_lookup_buffer_object(priv, handle, 1);
1350 mutex_lock(&bo->mutex);
1351 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1356 DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1357 ret = drm_bo_new_mask(bo, flags, hint);
1362 drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE),
1364 drm_bo_fill_rep_arg(bo, rep);
1368 mutex_unlock(&bo->mutex);
1369 drm_bo_usage_deref_unlocked(bo);
1373 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1374 drm_bo_arg_reply_t * rep)
1376 drm_buffer_object_t *bo;
1378 bo = drm_lookup_buffer_object(priv, handle, 1);
1382 mutex_lock(&bo->mutex);
1383 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1384 (void)drm_bo_busy(bo);
1385 drm_bo_fill_rep_arg(bo, rep);
1386 mutex_unlock(&bo->mutex);
1387 drm_bo_usage_deref_unlocked(bo);
1391 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1392 uint32_t hint, drm_bo_arg_reply_t * rep)
1394 drm_buffer_object_t *bo;
1395 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1398 bo = drm_lookup_buffer_object(priv, handle, 1);
1403 mutex_lock(&bo->mutex);
1404 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1407 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1411 drm_bo_fill_rep_arg(bo, rep);
1414 mutex_unlock(&bo->mutex);
1415 drm_bo_usage_deref_unlocked(bo);
1419 int drm_buffer_object_create(drm_file_t * priv,
1424 uint32_t page_alignment,
1425 unsigned long buffer_start,
1426 drm_buffer_object_t ** buf_obj)
1428 drm_device_t *dev = priv->head->dev;
1429 drm_buffer_manager_t *bm = &dev->bm;
1430 drm_buffer_object_t *bo;
1432 unsigned long num_pages;
1434 if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1435 DRM_ERROR("Invalid buffer object start.\n");
1438 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1439 if (num_pages == 0) {
1440 DRM_ERROR("Illegal buffer object size.\n");
1444 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1449 mutex_init(&bo->mutex);
1450 mutex_lock(&bo->mutex);
1452 atomic_set(&bo->usage, 1);
1453 atomic_set(&bo->mapped, -1);
1454 DRM_INIT_WAITQUEUE(&bo->event_queue);
1455 INIT_LIST_HEAD(&bo->lru);
1456 INIT_LIST_HEAD(&bo->ddestroy);
1457 #ifdef DRM_ODD_MM_COMPAT
1458 INIT_LIST_HEAD(&bo->p_mm_list);
1459 INIT_LIST_HEAD(&bo->vma_list);
1463 bo->mem.num_pages = num_pages;
1464 bo->mem.mm_node = NULL;
1465 bo->mem.page_alignment = page_alignment;
1466 if (bo->type == drm_bo_type_fake) {
1467 bo->offset = buffer_start;
1468 bo->buffer_start = 0;
1470 bo->buffer_start = buffer_start;
1473 bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
1474 bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
1475 atomic_inc(&bm->count);
1476 ret = drm_bo_new_mask(bo, mask, hint);
1481 if (bo->type == drm_bo_type_dc) {
1482 mutex_lock(&dev->struct_mutex);
1483 ret = drm_bo_setup_vm_locked(bo);
1484 mutex_unlock(&dev->struct_mutex);
1488 ret = drm_buffer_object_validate(bo, 0,
1489 hint & DRM_BO_HINT_DONT_BLOCK);
1493 mutex_unlock(&bo->mutex);
1498 mutex_unlock(&bo->mutex);
1499 drm_bo_usage_deref_unlocked(bo);
1503 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1506 drm_device_t *dev = priv->head->dev;
1509 mutex_lock(&dev->struct_mutex);
1510 ret = drm_add_user_object(priv, &bo->base, shareable);
1514 bo->base.remove = drm_bo_base_deref_locked;
1515 bo->base.type = drm_buffer_type;
1516 bo->base.ref_struct_locked = NULL;
1517 bo->base.unref = drm_buffer_user_object_unmap;
1520 mutex_unlock(&dev->struct_mutex);
1524 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1526 LOCK_TEST_WITH_RETURN(dev, filp);
1530 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1534 drm_bo_arg_request_t *req = &arg.d.req;
1535 drm_bo_arg_reply_t rep;
1537 drm_user_object_t *uo;
1538 drm_buffer_object_t *entry;
1540 if (!dev->bm.initialized) {
1541 DRM_ERROR("Buffer object manager is not initialized.\n");
1546 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1557 drm_buffer_object_create(priv, req->size,
1561 req->page_alignment,
1562 req->buffer_start, &entry);
1567 drm_bo_add_user_object(priv, entry,
1570 DRM_BO_FLAG_SHAREABLE);
1572 drm_bo_usage_deref_unlocked(entry);
1577 mutex_lock(&entry->mutex);
1578 drm_bo_fill_rep_arg(entry, &rep);
1579 mutex_unlock(&entry->mutex);
1582 rep.ret = drm_buffer_object_unmap(priv, req->handle);
1585 rep.ret = drm_buffer_object_map(priv, req->handle,
1589 case drm_bo_destroy:
1590 mutex_lock(&dev->struct_mutex);
1591 uo = drm_lookup_user_object(priv, req->handle);
1592 if (!uo || (uo->type != drm_buffer_type)
1593 || uo->owner != priv) {
1594 mutex_unlock(&dev->struct_mutex);
1598 rep.ret = drm_remove_user_object(priv, uo);
1599 mutex_unlock(&dev->struct_mutex);
1601 case drm_bo_reference:
1602 rep.ret = drm_user_object_ref(priv, req->handle,
1603 drm_buffer_type, &uo);
1606 mutex_lock(&dev->struct_mutex);
1607 uo = drm_lookup_user_object(priv, req->handle);
1609 drm_user_object_entry(uo, drm_buffer_object_t,
1611 atomic_dec(&entry->usage);
1612 mutex_unlock(&dev->struct_mutex);
1613 mutex_lock(&entry->mutex);
1614 drm_bo_fill_rep_arg(entry, &rep);
1615 mutex_unlock(&entry->mutex);
1617 case drm_bo_unreference:
1618 rep.ret = drm_user_object_unref(priv, req->handle,
1621 case drm_bo_validate:
1622 rep.ret = drm_bo_lock_test(dev, filp);
1627 drm_bo_handle_validate(priv, req->handle, req->mask,
1628 req->arg_handle, req->hint,
1632 rep.ret = drm_bo_lock_test(dev, filp);
1637 rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1639 case drm_bo_wait_idle:
1640 rep.ret = drm_bo_handle_wait(priv, req->handle,
1643 case drm_bo_ref_fence:
1645 DRM_ERROR("Function is not implemented yet.\n");
1652 * A signal interrupted us. Make sure the ioctl is restartable.
1655 if (rep.ret == -EAGAIN)
1660 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1667 * dev->struct_sem locked.
1670 static int drm_bo_force_list_clean(drm_device_t * dev,
1671 struct list_head *head,
1673 int force_no_move, int allow_errors)
1675 drm_buffer_manager_t *bm = &dev->bm;
1676 struct list_head *list, *next, *prev;
1677 drm_buffer_object_t *entry;
1683 list_for_each_safe(list, next, head) {
1685 entry = list_entry(list, drm_buffer_object_t, lru);
1686 atomic_inc(&entry->usage);
1687 mutex_unlock(&dev->struct_mutex);
1688 mutex_lock(&entry->mutex);
1689 mutex_lock(&dev->struct_mutex);
1691 if (prev != list->prev || next != list->next) {
1692 mutex_unlock(&entry->mutex);
1693 drm_bo_usage_deref_locked(entry);
1696 if (entry->mem.mm_node) {
1703 mutex_unlock(&dev->struct_mutex);
1704 if (entry->fence && bm->nice_mode) {
1705 unsigned long _end = jiffies + 3 * DRM_HZ;
1707 ret = drm_bo_wait(entry, 0, 1, 0);
1708 if (ret && allow_errors)
1711 } while (ret && !time_after_eq(jiffies, _end));
1715 DRM_ERROR("Detected GPU hang or "
1716 "fence manager was taken down. "
1717 "Evicting waiting buffers\n");
1721 drm_fence_usage_deref_unlocked(dev,
1723 entry->fence = NULL;
1726 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1728 if (force_no_move) {
1729 DRM_FLAG_MASKED(entry->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1731 if (entry->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1732 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1733 "cleanup. Removing flag and evicting.\n");
1734 entry->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1735 entry->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
1738 ret = drm_bo_evict(entry, mem_type, 1, force_no_move);
1743 DRM_ERROR("Aargh. Eviction failed.\n");
1746 mutex_lock(&dev->struct_mutex);
1748 mutex_unlock(&entry->mutex);
1749 drm_bo_usage_deref_locked(entry);
1750 if (prev != list->prev || next != list->next) {
1758 mutex_unlock(&entry->mutex);
1759 drm_bo_usage_deref_unlocked(entry);
1760 mutex_lock(&dev->struct_mutex);
1764 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1766 drm_buffer_manager_t *bm = &dev->bm;
1767 drm_mem_type_manager_t *man = &bm->man[mem_type];
1768 drm_mem_type_manager_t *local_man = &bm->man[DRM_BO_MEM_LOCAL];
1771 if (mem_type >= DRM_BO_MEM_TYPES) {
1772 DRM_ERROR("Illegal memory type %d\n", mem_type);
1776 if (!man->has_type) {
1777 DRM_ERROR("Trying to take down uninitialized "
1778 "memory manager type\n");
1788 * Throw out unfenced buffers.
1791 drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0);
1794 * Throw out evicted no-move buffers.
1797 drm_bo_force_list_clean(dev, &local_man->pinned, mem_type, 1, 0);
1798 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0);
1799 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0);
1801 if (drm_mm_clean(&man->manager)) {
1802 drm_mm_takedown(&man->manager);
1811 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1814 drm_buffer_manager_t *bm = &dev->bm;
1815 drm_mem_type_manager_t *man = &bm->man[mem_type];
1817 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1818 DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
1822 ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
1825 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1);
1829 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1);
1833 static int drm_bo_init_mm(drm_device_t * dev,
1835 unsigned long p_offset, unsigned long p_size)
1837 drm_buffer_manager_t *bm = &dev->bm;
1839 drm_mem_type_manager_t *man;
1841 if (type >= DRM_BO_MEM_TYPES) {
1842 DRM_ERROR("Illegal memory type %d\n", type);
1846 man = &bm->man[type];
1847 if (man->has_type) {
1848 DRM_ERROR("Memory manager already initialized for type %d\n",
1853 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
1858 if (type != DRM_BO_MEM_LOCAL) {
1860 DRM_ERROR("Zero size memory manager type %d\n", type);
1863 ret = drm_mm_init(&man->manager, p_offset, p_size);
1870 INIT_LIST_HEAD(&man->lru);
1871 INIT_LIST_HEAD(&man->pinned);
1877 * This is called from lastclose, so we don't need to bother about
1878 * any clients still running when we set the initialized flag to zero.
1881 int drm_bo_driver_finish(drm_device_t * dev)
1883 drm_buffer_manager_t *bm = &dev->bm;
1885 unsigned i = DRM_BO_MEM_TYPES;
1886 drm_mem_type_manager_t *man;
1888 mutex_lock(&dev->bm.init_mutex);
1889 mutex_lock(&dev->struct_mutex);
1891 if (!bm->initialized)
1893 bm->initialized = 0;
1897 if (man->has_type) {
1899 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
1901 DRM_ERROR("DRM memory manager type %d "
1902 "is not clean.\n", i);
1907 mutex_unlock(&dev->struct_mutex);
1908 if (!cancel_delayed_work(&bm->wq)) {
1909 flush_scheduled_work();
1911 mutex_lock(&dev->struct_mutex);
1912 drm_bo_delayed_delete(dev, 1);
1913 if (list_empty(&bm->ddestroy)) {
1914 DRM_DEBUG("Delayed destroy list was clean\n");
1916 if (list_empty(&bm->man[0].lru)) {
1917 DRM_DEBUG("Swap list was clean\n");
1919 if (list_empty(&bm->man[0].pinned)) {
1920 DRM_DEBUG("NO_MOVE list was clean\n");
1922 if (list_empty(&bm->unfenced)) {
1923 DRM_DEBUG("Unfenced list was clean\n");
1926 mutex_unlock(&dev->struct_mutex);
1927 mutex_unlock(&dev->bm.init_mutex);
1931 int drm_bo_driver_init(drm_device_t * dev)
1933 drm_bo_driver_t *driver = dev->driver->bo_driver;
1934 drm_buffer_manager_t *bm = &dev->bm;
1937 mutex_lock(&dev->bm.init_mutex);
1938 mutex_lock(&dev->struct_mutex);
1943 * Initialize the system memory buffer type.
1944 * Other types need to be driver / IOCTL initialized.
1947 ret = drm_bo_init_mm(dev, 0, 0, 0);
1951 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1952 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
1954 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
1956 bm->initialized = 1;
1958 atomic_set(&bm->count, 0);
1960 INIT_LIST_HEAD(&bm->unfenced);
1961 INIT_LIST_HEAD(&bm->ddestroy);
1963 mutex_unlock(&dev->struct_mutex);
1964 mutex_unlock(&dev->bm.init_mutex);
1968 EXPORT_SYMBOL(drm_bo_driver_init);
1970 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
1975 drm_mm_init_arg_t arg;
1976 drm_buffer_manager_t *bm = &dev->bm;
1977 drm_bo_driver_t *driver = dev->driver->bo_driver;
1980 DRM_ERROR("Buffer objects are not supported by this driver\n");
1984 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1986 switch (arg.req.op) {
1989 mutex_lock(&dev->bm.init_mutex);
1990 mutex_lock(&dev->struct_mutex);
1991 if (!bm->initialized) {
1992 DRM_ERROR("DRM memory manager was not initialized.\n");
1995 if (arg.req.mem_type == 0) {
1997 ("System memory buffers already initialized.\n");
2000 ret = drm_bo_init_mm(dev, arg.req.mem_type,
2001 arg.req.p_offset, arg.req.p_size);
2004 LOCK_TEST_WITH_RETURN(dev, filp);
2005 mutex_lock(&dev->bm.init_mutex);
2006 mutex_lock(&dev->struct_mutex);
2008 if (!bm->initialized) {
2009 DRM_ERROR("DRM memory manager was not initialized\n");
2012 if (arg.req.mem_type == 0) {
2013 DRM_ERROR("No takedown for System memory buffers.\n");
2017 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
2018 DRM_ERROR("Memory manager type %d not clean. "
2019 "Delaying takedown\n", arg.req.mem_type);
2023 LOCK_TEST_WITH_RETURN(dev, filp);
2024 mutex_lock(&dev->bm.init_mutex);
2025 mutex_lock(&dev->struct_mutex);
2026 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
2029 LOCK_TEST_WITH_RETURN(dev, filp);
2030 mutex_lock(&dev->bm.init_mutex);
2031 mutex_lock(&dev->struct_mutex);
2035 DRM_ERROR("Function not implemented yet\n");
2039 mutex_unlock(&dev->struct_mutex);
2040 mutex_unlock(&dev->bm.init_mutex);
2044 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
2049 * buffer object vm functions.
2052 int drm_mem_reg_is_pci(drm_device_t *dev, drm_bo_mem_reg_t *mem)
2054 drm_buffer_manager_t *bm = &dev->bm;
2055 drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2057 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2058 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2061 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2064 if (mem->flags & DRM_BO_FLAG_CACHED)
2069 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2072 * \c Get the PCI offset for the buffer object memory.
2074 * \param bo The buffer object.
2075 * \param bus_base On return the base of the PCI region
2076 * \param bus_offset On return the byte offset into the PCI region
2077 * \param bus_size On return the byte size of the buffer object or zero if
2078 * the buffer object memory is not accessible through a PCI region.
2079 * \return Failure indication.
2081 * Returns -EINVAL if the buffer object is currently not mappable.
2082 * Otherwise returns zero.
2085 int drm_bo_pci_offset(drm_device_t *dev,
2086 drm_bo_mem_reg_t *mem,
2087 unsigned long *bus_base,
2088 unsigned long *bus_offset,
2089 unsigned long *bus_size)
2091 drm_buffer_manager_t *bm = &dev->bm;
2092 drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
2095 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2098 if (drm_mem_reg_is_pci(dev, mem)) {
2099 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2100 *bus_size = mem->num_pages << PAGE_SHIFT;
2101 *bus_base = man->io_offset;
2109 * \c Kill all user-space virtual mappings of this buffer object.
2111 * \param bo The buffer object.
2113 * Call bo->mutex locked.
2116 void drm_bo_unmap_virtual(drm_buffer_object_t *bo)
2118 drm_device_t *dev = bo->dev;
2119 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2120 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2122 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2125 static void drm_bo_takedown_vm_locked(drm_buffer_object_t *bo)
2127 drm_map_list_t *list = &bo->map_list;
2128 drm_local_map_t *map;
2129 drm_device_t *dev = bo->dev;
2131 if (list->user_token) {
2132 drm_ht_remove_item(&dev->map_hash, &list->hash);
2133 list->user_token = 0;
2135 if (list->file_offset_node) {
2136 drm_mm_put_block(list->file_offset_node);
2137 list->file_offset_node = NULL;
2144 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2146 list->user_token = 0ULL;
2147 drm_bo_usage_deref_locked(bo);
2150 static int drm_bo_setup_vm_locked(drm_buffer_object_t *bo)
2152 drm_map_list_t *list = &bo->map_list;
2153 drm_local_map_t *map;
2154 drm_device_t *dev = bo->dev;
2156 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2162 map->type = _DRM_TTM;
2163 map->flags = _DRM_REMOVABLE;
2164 map->size = bo->mem.num_pages * PAGE_SIZE;
2165 atomic_inc(&bo->usage);
2166 map->handle = (void *) bo;
2168 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2169 bo->mem.num_pages, 0, 0);
2171 if (!list->file_offset_node) {
2172 drm_bo_takedown_vm_locked(bo);
2176 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2177 bo->mem.num_pages, 0);
2179 list->hash.key = list->file_offset_node->start;
2180 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2181 drm_bo_takedown_vm_locked(bo);
2185 list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;