1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads,
40 * Hash tables and hash heads.
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those,
44 * we need both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
47 * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
48 * the list traversal will, in general, need to be restarted.
52 static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
57 static inline uint64_t drm_bo_type_flags(unsigned type)
59 return (1ULL << (24 + type));
63 * bo locked. dev->struct_mutex locked.
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
68 struct drm_mem_type_manager *man;
70 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71 DRM_ASSERT_LOCKED(&bo->mutex);
73 man = &bo->dev->bm.man[bo->pinned_mem_type];
74 list_add_tail(&bo->pinned_lru, &man->pinned);
77 void drm_bo_add_to_lru(struct drm_buffer_object *bo)
79 struct drm_mem_type_manager *man;
81 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
83 if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84 || bo->mem.mem_type != bo->pinned_mem_type) {
85 man = &bo->dev->bm.man[bo->mem.mem_type];
86 list_add_tail(&bo->lru, &man->lru);
88 INIT_LIST_HEAD(&bo->lru);
92 static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
94 #ifdef DRM_ODD_MM_COMPAT
97 if (!bo->map_list.map)
100 ret = drm_bo_lock_kmm(bo);
103 drm_bo_unmap_virtual(bo);
105 drm_bo_finish_unmap(bo);
107 if (!bo->map_list.map)
110 drm_bo_unmap_virtual(bo);
115 static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
117 #ifdef DRM_ODD_MM_COMPAT
120 if (!bo->map_list.map)
123 ret = drm_bo_remap_bound(bo);
125 DRM_ERROR("Failed to remap a bound buffer object.\n"
126 "\tThis might cause a sigbus later.\n");
128 drm_bo_unlock_kmm(bo);
133 * Call bo->mutex locked.
136 static int drm_bo_add_ttm(struct drm_buffer_object *bo)
138 struct drm_device *dev = bo->dev;
140 uint32_t page_flags = 0;
142 DRM_ASSERT_LOCKED(&bo->mutex);
145 if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
146 page_flags |= DRM_TTM_PAGE_WRITE;
149 case drm_bo_type_device:
150 case drm_bo_type_kernel:
151 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
152 page_flags, dev->bm.dummy_read_page);
156 case drm_bo_type_user:
157 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
158 page_flags | DRM_TTM_PAGE_USER,
159 dev->bm.dummy_read_page);
163 ret = drm_ttm_set_user(bo->ttm, current,
171 DRM_ERROR("Illegal buffer object type\n");
179 static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
180 struct drm_bo_mem_reg *mem,
181 int evict, int no_wait)
183 struct drm_device *dev = bo->dev;
184 struct drm_buffer_manager *bm = &dev->bm;
185 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
186 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
187 struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
188 struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
191 if (old_is_pci || new_is_pci ||
192 ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
193 ret = drm_bo_vm_pre_move(bo, old_is_pci);
198 * Create and bind a ttm if required.
201 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
202 ret = drm_bo_add_ttm(bo);
206 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
207 ret = drm_ttm_bind(bo->ttm, mem);
213 if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
215 struct drm_bo_mem_reg *old_mem = &bo->mem;
216 uint64_t save_flags = old_mem->flags;
217 uint64_t save_proposed_flags = old_mem->proposed_flags;
221 old_mem->proposed_flags = save_proposed_flags;
222 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
224 } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
225 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
227 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
229 } else if (dev->driver->bo_driver->move) {
230 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
234 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
241 if (old_is_pci || new_is_pci)
242 drm_bo_vm_post_move(bo);
244 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
246 dev->driver->bo_driver->invalidate_caches(dev,
249 DRM_ERROR("Can not flush read caches\n");
252 DRM_FLAG_MASKED(bo->priv_flags,
253 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
254 _DRM_BO_FLAG_EVICTED);
257 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
258 bm->man[bo->mem.mem_type].gpu_offset;
264 if (old_is_pci || new_is_pci)
265 drm_bo_vm_post_move(bo);
267 new_man = &bm->man[bo->mem.mem_type];
268 if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
269 drm_ttm_unbind(bo->ttm);
270 drm_ttm_destroy(bo->ttm);
278 * Call bo->mutex locked.
279 * Wait until the buffer is idle.
282 int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
287 DRM_ASSERT_LOCKED(&bo->mutex);
290 if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
291 drm_fence_usage_deref_unlocked(&bo->fence);
297 ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
302 drm_fence_usage_deref_unlocked(&bo->fence);
306 EXPORT_SYMBOL(drm_bo_wait);
308 static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
310 struct drm_device *dev = bo->dev;
311 struct drm_buffer_manager *bm = &dev->bm;
315 unsigned long _end = jiffies + 3 * DRM_HZ;
318 ret = drm_bo_wait(bo, 0, 1, 0);
319 if (ret && allow_errors)
322 } while (ret && !time_after_eq(jiffies, _end));
326 DRM_ERROR("Detected GPU lockup or "
327 "fence driver was taken down. "
328 "Evicting buffer.\n");
332 drm_fence_usage_deref_unlocked(&bo->fence);
338 * Call dev->struct_mutex locked.
339 * Attempts to remove all private references to a buffer by expiring its
340 * fence object and removing from lru lists and memory managers.
343 static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
345 struct drm_device *dev = bo->dev;
346 struct drm_buffer_manager *bm = &dev->bm;
348 DRM_ASSERT_LOCKED(&dev->struct_mutex);
350 atomic_inc(&bo->usage);
351 mutex_unlock(&dev->struct_mutex);
352 mutex_lock(&bo->mutex);
354 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
356 if (bo->fence && drm_fence_object_signaled(bo->fence,
358 drm_fence_usage_deref_unlocked(&bo->fence);
360 if (bo->fence && remove_all)
361 (void)drm_bo_expire_fence(bo, 0);
363 mutex_lock(&dev->struct_mutex);
365 if (!atomic_dec_and_test(&bo->usage))
369 list_del_init(&bo->lru);
370 if (bo->mem.mm_node) {
371 drm_mm_put_block(bo->mem.mm_node);
372 if (bo->pinned_node == bo->mem.mm_node)
373 bo->pinned_node = NULL;
374 bo->mem.mm_node = NULL;
376 list_del_init(&bo->pinned_lru);
377 if (bo->pinned_node) {
378 drm_mm_put_block(bo->pinned_node);
379 bo->pinned_node = NULL;
381 list_del_init(&bo->ddestroy);
382 mutex_unlock(&bo->mutex);
383 drm_bo_destroy_locked(bo);
387 if (list_empty(&bo->ddestroy)) {
388 drm_fence_object_flush(bo->fence, bo->fence_type);
389 list_add_tail(&bo->ddestroy, &bm->ddestroy);
390 schedule_delayed_work(&bm->wq,
391 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
395 mutex_unlock(&bo->mutex);
400 * Verify that refcount is 0 and that there are no internal references
401 * to the buffer object. Then destroy it.
404 static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
406 struct drm_device *dev = bo->dev;
407 struct drm_buffer_manager *bm = &dev->bm;
409 DRM_ASSERT_LOCKED(&dev->struct_mutex);
411 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
412 list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
413 list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
414 if (bo->fence != NULL) {
415 DRM_ERROR("Fence was non-zero.\n");
416 drm_bo_cleanup_refs(bo, 0);
420 #ifdef DRM_ODD_MM_COMPAT
421 BUG_ON(!list_empty(&bo->vma_list));
422 BUG_ON(!list_empty(&bo->p_mm_list));
426 drm_ttm_unbind(bo->ttm);
427 drm_ttm_destroy(bo->ttm);
431 atomic_dec(&bm->count);
433 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
439 * Some stuff is still trying to reference the buffer object.
440 * Get rid of those references.
443 drm_bo_cleanup_refs(bo, 0);
449 * Call dev->struct_mutex locked.
452 static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
454 struct drm_buffer_manager *bm = &dev->bm;
456 struct drm_buffer_object *entry, *nentry;
457 struct list_head *list, *next;
459 list_for_each_safe(list, next, &bm->ddestroy) {
460 entry = list_entry(list, struct drm_buffer_object, ddestroy);
463 if (next != &bm->ddestroy) {
464 nentry = list_entry(next, struct drm_buffer_object,
466 atomic_inc(&nentry->usage);
469 drm_bo_cleanup_refs(entry, remove_all);
472 atomic_dec(&nentry->usage);
476 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
477 static void drm_bo_delayed_workqueue(void *data)
479 static void drm_bo_delayed_workqueue(struct work_struct *work)
482 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
483 struct drm_device *dev = (struct drm_device *) data;
484 struct drm_buffer_manager *bm = &dev->bm;
486 struct drm_buffer_manager *bm =
487 container_of(work, struct drm_buffer_manager, wq.work);
488 struct drm_device *dev = container_of(bm, struct drm_device, bm);
491 DRM_DEBUG("Delayed delete Worker\n");
493 mutex_lock(&dev->struct_mutex);
494 if (!bm->initialized) {
495 mutex_unlock(&dev->struct_mutex);
498 drm_bo_delayed_delete(dev, 0);
499 if (bm->initialized && !list_empty(&bm->ddestroy)) {
500 schedule_delayed_work(&bm->wq,
501 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
503 mutex_unlock(&dev->struct_mutex);
506 void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
508 struct drm_buffer_object *tmp_bo = *bo;
511 DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
513 if (atomic_dec_and_test(&tmp_bo->usage))
514 drm_bo_destroy_locked(tmp_bo);
516 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
518 static void drm_bo_base_deref_locked(struct drm_file *file_priv,
519 struct drm_user_object *uo)
521 struct drm_buffer_object *bo =
522 drm_user_object_entry(uo, struct drm_buffer_object, base);
524 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
526 drm_bo_takedown_vm_locked(bo);
527 drm_bo_usage_deref_locked(&bo);
530 void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
532 struct drm_buffer_object *tmp_bo = *bo;
533 struct drm_device *dev = tmp_bo->dev;
536 if (atomic_dec_and_test(&tmp_bo->usage)) {
537 mutex_lock(&dev->struct_mutex);
538 if (atomic_read(&tmp_bo->usage) == 0)
539 drm_bo_destroy_locked(tmp_bo);
540 mutex_unlock(&dev->struct_mutex);
543 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
545 void drm_putback_buffer_objects(struct drm_device *dev)
547 struct drm_buffer_manager *bm = &dev->bm;
548 struct list_head *list = &bm->unfenced;
549 struct drm_buffer_object *entry, *next;
551 mutex_lock(&dev->struct_mutex);
552 list_for_each_entry_safe(entry, next, list, lru) {
553 atomic_inc(&entry->usage);
554 mutex_unlock(&dev->struct_mutex);
556 mutex_lock(&entry->mutex);
557 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
558 mutex_lock(&dev->struct_mutex);
560 list_del_init(&entry->lru);
561 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
562 wake_up_all(&entry->event_queue);
565 * FIXME: Might want to put back on head of list
566 * instead of tail here.
569 drm_bo_add_to_lru(entry);
570 mutex_unlock(&entry->mutex);
571 drm_bo_usage_deref_locked(&entry);
573 mutex_unlock(&dev->struct_mutex);
575 EXPORT_SYMBOL(drm_putback_buffer_objects);
579 * Note. The caller has to register (if applicable)
580 * and deregister fence object usage.
583 int drm_fence_buffer_objects(struct drm_device *dev,
584 struct list_head *list,
585 uint32_t fence_flags,
586 struct drm_fence_object *fence,
587 struct drm_fence_object **used_fence)
589 struct drm_buffer_manager *bm = &dev->bm;
590 struct drm_buffer_object *entry;
591 uint32_t fence_type = 0;
592 uint32_t fence_class = ~0;
597 mutex_lock(&dev->struct_mutex);
600 list = &bm->unfenced;
603 fence_class = fence->fence_class;
605 list_for_each_entry(entry, list, lru) {
606 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
607 fence_type |= entry->new_fence_type;
608 if (fence_class == ~0)
609 fence_class = entry->new_fence_class;
610 else if (entry->new_fence_class != fence_class) {
611 DRM_ERROR("Unmatching fence classes on unfenced list: "
614 entry->new_fence_class);
627 if ((fence_type & fence->type) != fence_type ||
628 (fence->fence_class != fence_class)) {
629 DRM_ERROR("Given fence doesn't match buffers "
630 "on unfenced list.\n");
635 mutex_unlock(&dev->struct_mutex);
636 ret = drm_fence_object_create(dev, fence_class, fence_type,
637 fence_flags | DRM_FENCE_FLAG_EMIT,
639 mutex_lock(&dev->struct_mutex);
648 entry = list_entry(l, struct drm_buffer_object, lru);
649 atomic_inc(&entry->usage);
650 mutex_unlock(&dev->struct_mutex);
651 mutex_lock(&entry->mutex);
652 mutex_lock(&dev->struct_mutex);
654 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
657 drm_fence_usage_deref_locked(&entry->fence);
658 entry->fence = drm_fence_reference_locked(fence);
659 entry->fence_class = entry->new_fence_class;
660 entry->fence_type = entry->new_fence_type;
661 DRM_FLAG_MASKED(entry->priv_flags, 0,
662 _DRM_BO_FLAG_UNFENCED);
663 wake_up_all(&entry->event_queue);
664 drm_bo_add_to_lru(entry);
666 mutex_unlock(&entry->mutex);
667 drm_bo_usage_deref_locked(&entry);
670 DRM_DEBUG("Fenced %d buffers\n", count);
672 mutex_unlock(&dev->struct_mutex);
676 EXPORT_SYMBOL(drm_fence_buffer_objects);
682 static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
686 struct drm_device *dev = bo->dev;
687 struct drm_bo_mem_reg evict_mem;
690 * Someone might have modified the buffer before we took the
694 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
696 if (bo->mem.mem_type != mem_type)
699 ret = drm_bo_wait(bo, 0, 0, no_wait);
701 if (ret && ret != -EAGAIN) {
702 DRM_ERROR("Failed to expire fence before "
703 "buffer eviction.\n");
708 evict_mem.mm_node = NULL;
711 evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
712 ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
716 DRM_ERROR("Failed to find memory space for "
717 "buffer 0x%p eviction.\n", bo);
721 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
725 DRM_ERROR("Buffer eviction failed\n");
729 mutex_lock(&dev->struct_mutex);
730 if (evict_mem.mm_node) {
731 if (evict_mem.mm_node != bo->pinned_node)
732 drm_mm_put_block(evict_mem.mm_node);
733 evict_mem.mm_node = NULL;
736 drm_bo_add_to_lru(bo);
737 mutex_unlock(&dev->struct_mutex);
739 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
740 _DRM_BO_FLAG_EVICTED);
747 * Repeatedly evict memory from the LRU for @mem_type until we create enough
748 * space, or we've evicted everything and there isn't enough space.
750 static int drm_bo_mem_force_space(struct drm_device *dev,
751 struct drm_bo_mem_reg *mem,
752 uint32_t mem_type, int no_wait)
754 struct drm_mm_node *node;
755 struct drm_buffer_manager *bm = &dev->bm;
756 struct drm_buffer_object *entry;
757 struct drm_mem_type_manager *man = &bm->man[mem_type];
758 struct list_head *lru;
759 unsigned long num_pages = mem->num_pages;
762 mutex_lock(&dev->struct_mutex);
764 node = drm_mm_search_free(&man->manager, num_pages,
765 mem->page_alignment, 1);
770 if (lru->next == lru)
773 entry = list_entry(lru->next, struct drm_buffer_object, lru);
774 atomic_inc(&entry->usage);
775 mutex_unlock(&dev->struct_mutex);
776 mutex_lock(&entry->mutex);
777 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
779 ret = drm_bo_evict(entry, mem_type, no_wait);
780 mutex_unlock(&entry->mutex);
781 drm_bo_usage_deref_unlocked(&entry);
784 mutex_lock(&dev->struct_mutex);
788 mutex_unlock(&dev->struct_mutex);
792 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
793 mutex_unlock(&dev->struct_mutex);
795 mem->mem_type = mem_type;
799 static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
802 uint64_t mask, uint32_t *res_mask)
804 uint64_t cur_flags = drm_bo_type_flags(mem_type);
807 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
809 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
810 cur_flags |= DRM_BO_FLAG_CACHED;
811 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
812 cur_flags |= DRM_BO_FLAG_MAPPABLE;
813 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
814 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
816 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
819 if (mem_type == DRM_BO_MEM_LOCAL) {
820 *res_mask = cur_flags;
824 flag_diff = (mask ^ cur_flags);
825 if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
826 cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
828 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
829 (!(mask & DRM_BO_FLAG_CACHED) ||
830 (mask & DRM_BO_FLAG_FORCE_CACHING)))
833 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
834 ((mask & DRM_BO_FLAG_MAPPABLE) ||
835 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
838 *res_mask = cur_flags;
843 * Creates space for memory region @mem according to its type.
845 * This function first searches for free space in compatible memory types in
846 * the priority order defined by the driver. If free space isn't found, then
847 * drm_bo_mem_force_space is attempted in priority order to evict and find
850 int drm_bo_mem_space(struct drm_buffer_object *bo,
851 struct drm_bo_mem_reg *mem, int no_wait)
853 struct drm_device *dev = bo->dev;
854 struct drm_buffer_manager *bm = &dev->bm;
855 struct drm_mem_type_manager *man;
857 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
858 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
860 uint32_t mem_type = DRM_BO_MEM_LOCAL;
865 struct drm_mm_node *node = NULL;
869 for (i = 0; i < num_prios; ++i) {
871 man = &bm->man[mem_type];
873 type_ok = drm_bo_mt_compatible(man,
874 bo->type == drm_bo_type_user,
875 mem_type, mem->proposed_flags,
881 if (mem_type == DRM_BO_MEM_LOCAL)
884 if ((mem_type == bo->pinned_mem_type) &&
885 (bo->pinned_node != NULL)) {
886 node = bo->pinned_node;
890 mutex_lock(&dev->struct_mutex);
891 if (man->has_type && man->use_type) {
893 node = drm_mm_search_free(&man->manager, mem->num_pages,
894 mem->page_alignment, 1);
896 node = drm_mm_get_block(node, mem->num_pages,
897 mem->page_alignment);
899 mutex_unlock(&dev->struct_mutex);
904 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
906 mem->mem_type = mem_type;
907 mem->flags = cur_flags;
914 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
915 prios = dev->driver->bo_driver->mem_busy_prio;
917 for (i = 0; i < num_prios; ++i) {
919 man = &bm->man[mem_type];
924 if (!drm_bo_mt_compatible(man,
925 bo->type == drm_bo_type_user,
931 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
933 if (ret == 0 && mem->mm_node) {
934 mem->flags = cur_flags;
942 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
945 EXPORT_SYMBOL(drm_bo_mem_space);
948 * drm_bo_propose_flags:
950 * @bo: the buffer object getting new flags
952 * @new_flags: the new set of proposed flag bits
954 * @new_mask: the mask of bits changed in new_flags
956 * Modify the proposed_flag bits in @bo
958 static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
959 uint64_t new_flags, uint64_t new_mask)
963 /* Copy unchanging bits from existing proposed_flags */
964 DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
966 if (bo->type == drm_bo_type_user &&
967 ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
968 (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
969 DRM_ERROR("User buffers require cache-coherent memory.\n");
973 if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
974 DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
978 if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
979 DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
983 new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
986 if (new_access == 0) {
987 DRM_ERROR("Invalid buffer object rwx properties\n");
991 bo->mem.proposed_flags = new_flags;
996 * Call dev->struct_mutex locked.
999 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
1000 uint32_t handle, int check_owner)
1002 struct drm_user_object *uo;
1003 struct drm_buffer_object *bo;
1005 uo = drm_lookup_user_object(file_priv, handle);
1007 if (!uo || (uo->type != drm_buffer_type)) {
1008 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
1012 if (check_owner && file_priv != uo->owner) {
1013 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
1017 bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
1018 atomic_inc(&bo->usage);
1021 EXPORT_SYMBOL(drm_lookup_buffer_object);
1024 * Call bo->mutex locked.
1025 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1026 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
1029 static int drm_bo_quick_busy(struct drm_buffer_object *bo)
1031 struct drm_fence_object *fence = bo->fence;
1033 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1035 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1036 drm_fence_usage_deref_unlocked(&bo->fence);
1045 * Call bo->mutex locked.
1046 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1049 static int drm_bo_busy(struct drm_buffer_object *bo)
1051 struct drm_fence_object *fence = bo->fence;
1053 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1055 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1056 drm_fence_usage_deref_unlocked(&bo->fence);
1059 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
1060 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1061 drm_fence_usage_deref_unlocked(&bo->fence);
1069 int drm_bo_evict_cached(struct drm_buffer_object *bo)
1073 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1074 if (bo->mem.mm_node)
1075 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1079 EXPORT_SYMBOL(drm_bo_evict_cached);
1081 * Wait until a buffer is unmapped.
1084 static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
1088 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1091 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1092 atomic_read(&bo->mapped) == -1);
1100 static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
1104 mutex_lock(&bo->mutex);
1105 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1106 mutex_unlock(&bo->mutex);
1111 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1112 * Until then, we cannot really do anything with it except delete it.
1115 static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
1118 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1126 mutex_unlock(&bo->mutex);
1127 DRM_WAIT_ON (ret, bo->event_queue, 3 * DRM_HZ,
1128 !drm_bo_check_unfenced(bo));
1129 mutex_lock(&bo->mutex);
1132 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1134 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1144 * Fill in the ioctl reply argument with buffer info.
1148 static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
1149 struct drm_bo_info_rep *rep)
1154 rep->handle = bo->base.hash.key;
1155 rep->flags = bo->mem.flags;
1156 rep->size = bo->num_pages * PAGE_SIZE;
1157 rep->offset = bo->offset;
1160 * drm_bo_type_device buffers have user-visible
1161 * handles which can be used to share across
1162 * processes. Hand that back to the application
1164 if (bo->type == drm_bo_type_device)
1165 rep->arg_handle = bo->map_list.user_token;
1167 rep->arg_handle = 0;
1169 rep->proposed_flags = bo->mem.proposed_flags;
1170 rep->buffer_start = bo->buffer_start;
1171 rep->fence_flags = bo->fence_type;
1173 rep->page_alignment = bo->mem.page_alignment;
1175 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1176 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1182 * Wait for buffer idle and register that we've mapped the buffer.
1183 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1184 * so that if the client dies, the mapping is automatically
1188 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1189 uint32_t map_flags, unsigned hint,
1190 struct drm_bo_info_rep *rep)
1192 struct drm_buffer_object *bo;
1193 struct drm_device *dev = file_priv->minor->dev;
1195 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1197 mutex_lock(&dev->struct_mutex);
1198 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1199 mutex_unlock(&dev->struct_mutex);
1204 mutex_lock(&bo->mutex);
1205 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1210 * If this returns true, we are currently unmapped.
1211 * We need to do this test, because unmapping can
1212 * be done without the bo->mutex held.
1216 if (atomic_inc_and_test(&bo->mapped)) {
1217 if (no_wait && drm_bo_busy(bo)) {
1218 atomic_dec(&bo->mapped);
1222 ret = drm_bo_wait(bo, 0, 0, no_wait);
1224 atomic_dec(&bo->mapped);
1228 if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
1229 drm_bo_evict_cached(bo);
1232 } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
1235 * We are already mapped with different flags.
1236 * need to wait for unmap.
1239 ret = drm_bo_wait_unmapped(bo, no_wait);
1248 mutex_lock(&dev->struct_mutex);
1249 ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1250 mutex_unlock(&dev->struct_mutex);
1252 if (atomic_add_negative(-1, &bo->mapped))
1253 wake_up_all(&bo->event_queue);
1256 drm_bo_fill_rep_arg(bo, rep);
1258 mutex_unlock(&bo->mutex);
1259 drm_bo_usage_deref_unlocked(&bo);
1263 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1265 struct drm_device *dev = file_priv->minor->dev;
1266 struct drm_buffer_object *bo;
1267 struct drm_ref_object *ro;
1270 mutex_lock(&dev->struct_mutex);
1272 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1278 ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1284 drm_remove_ref_object(file_priv, ro);
1285 drm_bo_usage_deref_locked(&bo);
1287 mutex_unlock(&dev->struct_mutex);
1292 * Call struct-sem locked.
1295 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1296 struct drm_user_object *uo,
1297 enum drm_ref_type action)
1299 struct drm_buffer_object *bo =
1300 drm_user_object_entry(uo, struct drm_buffer_object, base);
1303 * We DON'T want to take the bo->lock here, because we want to
1304 * hold it when we wait for unmapped buffer.
1307 BUG_ON(action != _DRM_REF_TYPE1);
1309 if (atomic_add_negative(-1, &bo->mapped))
1310 wake_up_all(&bo->event_queue);
1315 * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
1318 int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
1319 int no_wait, int move_unfenced)
1321 struct drm_device *dev = bo->dev;
1322 struct drm_buffer_manager *bm = &dev->bm;
1324 struct drm_bo_mem_reg mem;
1326 * Flush outstanding fences.
1332 * Wait for outstanding fences.
1335 ret = drm_bo_wait(bo, 0, 0, no_wait);
1339 mem.num_pages = bo->num_pages;
1340 mem.size = mem.num_pages << PAGE_SHIFT;
1341 mem.proposed_flags = new_mem_flags;
1342 mem.page_alignment = bo->mem.page_alignment;
1344 mutex_lock(&bm->evict_mutex);
1345 mutex_lock(&dev->struct_mutex);
1346 list_del_init(&bo->lru);
1347 mutex_unlock(&dev->struct_mutex);
1350 * Determine where to move the buffer.
1352 ret = drm_bo_mem_space(bo, &mem, no_wait);
1356 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1359 mutex_lock(&dev->struct_mutex);
1360 if (ret || !move_unfenced) {
1362 if (mem.mm_node != bo->pinned_node)
1363 drm_mm_put_block(mem.mm_node);
1366 drm_bo_add_to_lru(bo);
1367 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1368 wake_up_all(&bo->event_queue);
1369 DRM_FLAG_MASKED(bo->priv_flags, 0,
1370 _DRM_BO_FLAG_UNFENCED);
1373 list_add_tail(&bo->lru, &bm->unfenced);
1374 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1375 _DRM_BO_FLAG_UNFENCED);
1377 mutex_unlock(&dev->struct_mutex);
1378 mutex_unlock(&bm->evict_mutex);
1382 static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
1384 uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
1386 if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
1388 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1389 (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
1390 (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
1393 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1394 ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
1395 (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
1401 * drm_buffer_object_validate:
1403 * @bo: the buffer object to modify
1405 * @fence_class: the new fence class covering this buffer
1407 * @move_unfenced: a boolean indicating whether switching the
1408 * memory space of this buffer should cause the buffer to
1409 * be placed on the unfenced list.
1411 * @no_wait: whether this function should return -EBUSY instead
1414 * Change buffer access parameters. This can involve moving
1415 * the buffer to the correct memory type, pinning the buffer
1416 * or changing the class/type of fence covering this buffer
1418 * Must be called with bo locked.
1421 static int drm_buffer_object_validate(struct drm_buffer_object *bo,
1422 uint32_t fence_class,
1423 int move_unfenced, int no_wait)
1425 struct drm_device *dev = bo->dev;
1426 struct drm_buffer_manager *bm = &dev->bm;
1427 struct drm_bo_driver *driver = dev->driver->bo_driver;
1431 DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
1432 (unsigned long long) bo->mem.proposed_flags,
1433 (unsigned long long) bo->mem.flags);
1435 ret = driver->fence_type(bo, &fence_class, &ftype);
1438 DRM_ERROR("Driver did not support given buffer permissions\n");
1443 * We're switching command submission mechanism,
1444 * or cannot simply rely on the hardware serializing for us.
1446 * Insert a driver-dependant barrier or wait for buffer idle.
1449 if ((fence_class != bo->fence_class) ||
1450 ((ftype ^ bo->fence_type) & bo->fence_type)) {
1453 if (driver->command_stream_barrier) {
1454 ret = driver->command_stream_barrier(bo,
1460 ret = drm_bo_wait(bo, 0, 0, no_wait);
1467 bo->new_fence_class = fence_class;
1468 bo->new_fence_type = ftype;
1470 ret = drm_bo_wait_unmapped(bo, no_wait);
1472 DRM_ERROR("Timed out waiting for buffer unmap.\n");
1477 * Check whether we need to move buffer.
1480 if (!drm_bo_mem_compat(&bo->mem)) {
1481 ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
1485 DRM_ERROR("Failed moving buffer.\n");
1494 if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1495 bo->pinned_mem_type = bo->mem.mem_type;
1496 mutex_lock(&dev->struct_mutex);
1497 list_del_init(&bo->pinned_lru);
1498 drm_bo_add_to_pinned_lru(bo);
1500 if (bo->pinned_node != bo->mem.mm_node) {
1501 if (bo->pinned_node != NULL)
1502 drm_mm_put_block(bo->pinned_node);
1503 bo->pinned_node = bo->mem.mm_node;
1506 mutex_unlock(&dev->struct_mutex);
1508 } else if (bo->pinned_node != NULL) {
1510 mutex_lock(&dev->struct_mutex);
1512 if (bo->pinned_node != bo->mem.mm_node)
1513 drm_mm_put_block(bo->pinned_node);
1515 list_del_init(&bo->pinned_lru);
1516 bo->pinned_node = NULL;
1517 mutex_unlock(&dev->struct_mutex);
1522 * We might need to add a TTM.
1525 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1526 ret = drm_bo_add_ttm(bo);
1531 * Validation has succeeded, move the access and other
1532 * non-mapping-related flag bits from the proposed flags to
1536 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
1539 * Finally, adjust lru to be sure.
1542 mutex_lock(&dev->struct_mutex);
1544 if (move_unfenced) {
1545 list_add_tail(&bo->lru, &bm->unfenced);
1546 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1547 _DRM_BO_FLAG_UNFENCED);
1549 drm_bo_add_to_lru(bo);
1550 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1551 wake_up_all(&bo->event_queue);
1552 DRM_FLAG_MASKED(bo->priv_flags, 0,
1553 _DRM_BO_FLAG_UNFENCED);
1556 mutex_unlock(&dev->struct_mutex);
1562 * drm_bo_do_validate:
1564 * @bo: the buffer object
1566 * @flags: access rights, mapping parameters and cacheability. See
1567 * the DRM_BO_FLAG_* values in drm.h
1569 * @mask: Which flag values to change; this allows callers to modify
1570 * things without knowing the current state of other flags.
1572 * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1575 * @fence_class: a driver-specific way of doing fences. Presumably,
1576 * this would be used if the driver had more than one submission and
1577 * fencing mechanism. At this point, there isn't any use of this
1578 * from the user mode code.
1580 * @rep: To be stuffed with the reply from validation
1582 * 'validate' a buffer object. This changes where the buffer is
1583 * located, along with changing access modes.
1586 int drm_bo_do_validate(struct drm_buffer_object *bo,
1587 uint64_t flags, uint64_t mask, uint32_t hint,
1588 uint32_t fence_class,
1589 struct drm_bo_info_rep *rep)
1592 int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
1594 mutex_lock(&bo->mutex);
1595 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1600 ret = drm_bo_modify_proposed_flags (bo, flags, mask);
1604 ret = drm_buffer_object_validate(bo,
1606 !(hint & DRM_BO_HINT_DONT_FENCE),
1610 drm_bo_fill_rep_arg(bo, rep);
1612 mutex_unlock(&bo->mutex);
1615 EXPORT_SYMBOL(drm_bo_do_validate);
1618 * drm_bo_handle_validate
1620 * @file_priv: the drm file private, used to get a handle to the user context
1622 * @handle: the buffer object handle
1624 * @flags: access rights, mapping parameters and cacheability. See
1625 * the DRM_BO_FLAG_* values in drm.h
1627 * @mask: Which flag values to change; this allows callers to modify
1628 * things without knowing the current state of other flags.
1630 * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1633 * @fence_class: a driver-specific way of doing fences. Presumably,
1634 * this would be used if the driver had more than one submission and
1635 * fencing mechanism. At this point, there isn't any use of this
1636 * from the user mode code.
1638 * @use_old_fence_class: don't change fence class, pull it from the buffer object
1640 * @rep: To be stuffed with the reply from validation
1642 * @bp_rep: To be stuffed with the buffer object pointer
1644 * Perform drm_bo_do_validate on a buffer referenced by a user-space handle.
1645 * Some permissions checking is done on the parameters, otherwise this
1646 * is a thin wrapper.
1649 int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
1650 uint64_t flags, uint64_t mask,
1652 uint32_t fence_class,
1653 int use_old_fence_class,
1654 struct drm_bo_info_rep *rep,
1655 struct drm_buffer_object **bo_rep)
1657 struct drm_device *dev = file_priv->minor->dev;
1658 struct drm_buffer_object *bo;
1661 mutex_lock(&dev->struct_mutex);
1662 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1663 mutex_unlock(&dev->struct_mutex);
1668 if (use_old_fence_class)
1669 fence_class = bo->fence_class;
1672 * Only allow creator to change shared buffer mask.
1675 if (bo->base.owner != file_priv)
1676 mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1679 ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
1684 drm_bo_usage_deref_unlocked(&bo);
1688 EXPORT_SYMBOL(drm_bo_handle_validate);
1690 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1691 struct drm_bo_info_rep *rep)
1693 struct drm_device *dev = file_priv->minor->dev;
1694 struct drm_buffer_object *bo;
1696 mutex_lock(&dev->struct_mutex);
1697 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1698 mutex_unlock(&dev->struct_mutex);
1703 mutex_lock(&bo->mutex);
1704 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1705 (void)drm_bo_busy(bo);
1706 drm_bo_fill_rep_arg(bo, rep);
1707 mutex_unlock(&bo->mutex);
1708 drm_bo_usage_deref_unlocked(&bo);
1712 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1714 struct drm_bo_info_rep *rep)
1716 struct drm_device *dev = file_priv->minor->dev;
1717 struct drm_buffer_object *bo;
1718 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1721 mutex_lock(&dev->struct_mutex);
1722 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1723 mutex_unlock(&dev->struct_mutex);
1728 mutex_lock(&bo->mutex);
1729 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1732 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1736 drm_bo_fill_rep_arg(bo, rep);
1739 mutex_unlock(&bo->mutex);
1740 drm_bo_usage_deref_unlocked(&bo);
1744 int drm_buffer_object_create(struct drm_device *dev,
1746 enum drm_bo_type type,
1749 uint32_t page_alignment,
1750 unsigned long buffer_start,
1751 struct drm_buffer_object **buf_obj)
1753 struct drm_buffer_manager *bm = &dev->bm;
1754 struct drm_buffer_object *bo;
1756 unsigned long num_pages;
1758 size += buffer_start & ~PAGE_MASK;
1759 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1760 if (num_pages == 0) {
1761 DRM_ERROR("Illegal buffer object size.\n");
1765 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1770 mutex_init(&bo->mutex);
1771 mutex_lock(&bo->mutex);
1773 atomic_set(&bo->usage, 1);
1774 atomic_set(&bo->mapped, -1);
1775 DRM_INIT_WAITQUEUE(&bo->event_queue);
1776 INIT_LIST_HEAD(&bo->lru);
1777 INIT_LIST_HEAD(&bo->pinned_lru);
1778 INIT_LIST_HEAD(&bo->ddestroy);
1779 #ifdef DRM_ODD_MM_COMPAT
1780 INIT_LIST_HEAD(&bo->p_mm_list);
1781 INIT_LIST_HEAD(&bo->vma_list);
1785 bo->num_pages = num_pages;
1786 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1787 bo->mem.num_pages = bo->num_pages;
1788 bo->mem.mm_node = NULL;
1789 bo->mem.page_alignment = page_alignment;
1790 bo->buffer_start = buffer_start & PAGE_MASK;
1792 bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1793 DRM_BO_FLAG_MAPPABLE);
1794 bo->mem.proposed_flags = 0;
1795 atomic_inc(&bm->count);
1797 * Use drm_bo_modify_proposed_flags to error-check the proposed flags
1799 ret = drm_bo_modify_proposed_flags (bo, flags, flags);
1804 * For drm_bo_type_device buffers, allocate
1805 * address space from the device so that applications
1806 * can mmap the buffer from there
1808 if (bo->type == drm_bo_type_device) {
1809 mutex_lock(&dev->struct_mutex);
1810 ret = drm_bo_setup_vm_locked(bo);
1811 mutex_unlock(&dev->struct_mutex);
1816 ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1820 mutex_unlock(&bo->mutex);
1825 mutex_unlock(&bo->mutex);
1827 drm_bo_usage_deref_unlocked(&bo);
1830 EXPORT_SYMBOL(drm_buffer_object_create);
1833 static int drm_bo_add_user_object(struct drm_file *file_priv,
1834 struct drm_buffer_object *bo, int shareable)
1836 struct drm_device *dev = file_priv->minor->dev;
1839 mutex_lock(&dev->struct_mutex);
1840 ret = drm_add_user_object(file_priv, &bo->base, shareable);
1844 bo->base.remove = drm_bo_base_deref_locked;
1845 bo->base.type = drm_buffer_type;
1846 bo->base.ref_struct_locked = NULL;
1847 bo->base.unref = drm_buffer_user_object_unmap;
1850 mutex_unlock(&dev->struct_mutex);
1854 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1856 struct drm_bo_create_arg *arg = data;
1857 struct drm_bo_create_req *req = &arg->d.req;
1858 struct drm_bo_info_rep *rep = &arg->d.rep;
1859 struct drm_buffer_object *entry;
1860 enum drm_bo_type bo_type;
1863 DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
1864 (int)(req->size / 1024), req->page_alignment * 4);
1866 if (!dev->bm.initialized) {
1867 DRM_ERROR("Buffer object manager is not initialized.\n");
1872 * If the buffer creation request comes in with a starting address,
1873 * that points at the desired user pages to map. Otherwise, create
1874 * a drm_bo_type_device buffer, which uses pages allocated from the kernel
1876 bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
1879 * User buffers cannot be shared
1881 if (bo_type == drm_bo_type_user)
1882 req->flags &= ~DRM_BO_FLAG_SHAREABLE;
1884 ret = drm_buffer_object_create(file_priv->minor->dev,
1885 req->size, bo_type, req->flags,
1886 req->hint, req->page_alignment,
1887 req->buffer_start, &entry);
1891 ret = drm_bo_add_user_object(file_priv, entry,
1892 req->flags & DRM_BO_FLAG_SHAREABLE);
1894 drm_bo_usage_deref_unlocked(&entry);
1898 mutex_lock(&entry->mutex);
1899 drm_bo_fill_rep_arg(entry, rep);
1900 mutex_unlock(&entry->mutex);
1906 int drm_bo_setstatus_ioctl(struct drm_device *dev,
1907 void *data, struct drm_file *file_priv)
1909 struct drm_bo_map_wait_idle_arg *arg = data;
1910 struct drm_bo_info_req *req = &arg->d.req;
1911 struct drm_bo_info_rep *rep = &arg->d.rep;
1914 if (!dev->bm.initialized) {
1915 DRM_ERROR("Buffer object manager is not initialized.\n");
1919 ret = drm_bo_read_lock(&dev->bm.bm_lock);
1924 * validate the buffer. note that 'fence_class' will be unused
1925 * as we pass use_old_fence_class=1 here. Note also that
1926 * the libdrm API doesn't pass fence_class to the kernel,
1927 * so it's a good thing it isn't used here.
1929 ret = drm_bo_handle_validate(file_priv, req->handle,
1932 req->hint | DRM_BO_HINT_DONT_FENCE,
1933 req->fence_class, 1,
1936 (void) drm_bo_read_unlock(&dev->bm.bm_lock);
1943 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1945 struct drm_bo_map_wait_idle_arg *arg = data;
1946 struct drm_bo_info_req *req = &arg->d.req;
1947 struct drm_bo_info_rep *rep = &arg->d.rep;
1949 if (!dev->bm.initialized) {
1950 DRM_ERROR("Buffer object manager is not initialized.\n");
1954 ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1962 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1964 struct drm_bo_handle_arg *arg = data;
1966 if (!dev->bm.initialized) {
1967 DRM_ERROR("Buffer object manager is not initialized.\n");
1971 ret = drm_buffer_object_unmap(file_priv, arg->handle);
1976 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1978 struct drm_bo_reference_info_arg *arg = data;
1979 struct drm_bo_handle_arg *req = &arg->d.req;
1980 struct drm_bo_info_rep *rep = &arg->d.rep;
1981 struct drm_user_object *uo;
1984 if (!dev->bm.initialized) {
1985 DRM_ERROR("Buffer object manager is not initialized.\n");
1989 ret = drm_user_object_ref(file_priv, req->handle,
1990 drm_buffer_type, &uo);
1994 ret = drm_bo_handle_info(file_priv, req->handle, rep);
2001 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2003 struct drm_bo_handle_arg *arg = data;
2006 if (!dev->bm.initialized) {
2007 DRM_ERROR("Buffer object manager is not initialized.\n");
2011 ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
2015 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2017 struct drm_bo_reference_info_arg *arg = data;
2018 struct drm_bo_handle_arg *req = &arg->d.req;
2019 struct drm_bo_info_rep *rep = &arg->d.rep;
2022 if (!dev->bm.initialized) {
2023 DRM_ERROR("Buffer object manager is not initialized.\n");
2027 ret = drm_bo_handle_info(file_priv, req->handle, rep);
2034 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2036 struct drm_bo_map_wait_idle_arg *arg = data;
2037 struct drm_bo_info_req *req = &arg->d.req;
2038 struct drm_bo_info_rep *rep = &arg->d.rep;
2040 if (!dev->bm.initialized) {
2041 DRM_ERROR("Buffer object manager is not initialized.\n");
2045 ret = drm_bo_handle_wait(file_priv, req->handle,
2053 static int drm_bo_leave_list(struct drm_buffer_object *bo,
2058 struct drm_device *dev = bo->dev;
2061 mutex_lock(&bo->mutex);
2063 ret = drm_bo_expire_fence(bo, allow_errors);
2068 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2069 mutex_lock(&dev->struct_mutex);
2070 list_del_init(&bo->pinned_lru);
2071 if (bo->pinned_node == bo->mem.mm_node)
2072 bo->pinned_node = NULL;
2073 if (bo->pinned_node != NULL) {
2074 drm_mm_put_block(bo->pinned_node);
2075 bo->pinned_node = NULL;
2077 mutex_unlock(&dev->struct_mutex);
2080 if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2081 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2082 "cleanup. Removing flag and evicting.\n");
2083 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2084 bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
2087 if (bo->mem.mem_type == mem_type)
2088 ret = drm_bo_evict(bo, mem_type, 0);
2095 DRM_ERROR("Cleanup eviction failed\n");
2100 mutex_unlock(&bo->mutex);
2105 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2109 return list_entry(list, struct drm_buffer_object, pinned_lru);
2111 return list_entry(list, struct drm_buffer_object, lru);
2115 * dev->struct_mutex locked.
2118 static int drm_bo_force_list_clean(struct drm_device *dev,
2119 struct list_head *head,
2125 struct list_head *list, *next, *prev;
2126 struct drm_buffer_object *entry, *nentry;
2131 * The list traversal is a bit odd here, because an item may
2132 * disappear from the list when we release the struct_mutex or
2133 * when we decrease the usage count. Also we're not guaranteed
2134 * to drain pinned lists, so we can't always restart.
2139 list_for_each_safe(list, next, head) {
2142 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2143 atomic_inc(&entry->usage);
2145 atomic_dec(&nentry->usage);
2150 * Protect the next item from destruction, so we can check
2151 * its list pointers later on.
2155 nentry = drm_bo_entry(next, pinned_list);
2156 atomic_inc(&nentry->usage);
2158 mutex_unlock(&dev->struct_mutex);
2160 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2162 mutex_lock(&dev->struct_mutex);
2164 drm_bo_usage_deref_locked(&entry);
2169 * Has the next item disappeared from the list?
2172 do_restart = ((next->prev != list) && (next->prev != prev));
2174 if (nentry != NULL && do_restart)
2175 drm_bo_usage_deref_locked(&nentry);
2183 int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type)
2185 struct drm_buffer_manager *bm = &dev->bm;
2186 struct drm_mem_type_manager *man = &bm->man[mem_type];
2189 if (mem_type >= DRM_BO_MEM_TYPES) {
2190 DRM_ERROR("Illegal memory type %d\n", mem_type);
2194 if (!man->has_type) {
2195 DRM_ERROR("Trying to take down uninitialized "
2196 "memory manager type %u\n", mem_type);
2204 BUG_ON(!list_empty(&bm->unfenced));
2205 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2206 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2208 if (drm_mm_clean(&man->manager)) {
2209 drm_mm_takedown(&man->manager);
2217 EXPORT_SYMBOL(drm_bo_clean_mm);
2220 *Evict all buffers of a particular mem_type, but leave memory manager
2221 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2222 *point since we have the hardware lock.
2225 static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
2228 struct drm_buffer_manager *bm = &dev->bm;
2229 struct drm_mem_type_manager *man = &bm->man[mem_type];
2231 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2232 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2236 if (!man->has_type) {
2237 DRM_ERROR("Memory type %u has not been initialized.\n",
2242 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2245 ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2250 int drm_bo_init_mm(struct drm_device *dev,
2252 unsigned long p_offset, unsigned long p_size)
2254 struct drm_buffer_manager *bm = &dev->bm;
2256 struct drm_mem_type_manager *man;
2258 if (type >= DRM_BO_MEM_TYPES) {
2259 DRM_ERROR("Illegal memory type %d\n", type);
2263 man = &bm->man[type];
2264 if (man->has_type) {
2265 DRM_ERROR("Memory manager already initialized for type %d\n",
2270 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2275 if (type != DRM_BO_MEM_LOCAL) {
2277 DRM_ERROR("Zero size memory manager type %d\n", type);
2280 ret = drm_mm_init(&man->manager, p_offset, p_size);
2288 INIT_LIST_HEAD(&man->lru);
2289 INIT_LIST_HEAD(&man->pinned);
2293 EXPORT_SYMBOL(drm_bo_init_mm);
2296 * This function is intended to be called on drm driver unload.
2297 * If you decide to call it from lastclose, you must protect the call
2298 * from a potentially racing drm_bo_driver_init in firstopen.
2299 * (This may happen on X server restart).
2302 int drm_bo_driver_finish(struct drm_device *dev)
2304 struct drm_buffer_manager *bm = &dev->bm;
2306 unsigned i = DRM_BO_MEM_TYPES;
2307 struct drm_mem_type_manager *man;
2309 mutex_lock(&dev->struct_mutex);
2311 if (!bm->initialized)
2313 bm->initialized = 0;
2317 if (man->has_type) {
2319 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2321 DRM_ERROR("DRM memory manager type %d "
2322 "is not clean.\n", i);
2327 mutex_unlock(&dev->struct_mutex);
2329 if (!cancel_delayed_work(&bm->wq))
2330 flush_scheduled_work();
2332 mutex_lock(&dev->struct_mutex);
2333 drm_bo_delayed_delete(dev, 1);
2334 if (list_empty(&bm->ddestroy))
2335 DRM_DEBUG("Delayed destroy list was clean\n");
2337 if (list_empty(&bm->man[0].lru))
2338 DRM_DEBUG("Swap list was clean\n");
2340 if (list_empty(&bm->man[0].pinned))
2341 DRM_DEBUG("NO_MOVE list was clean\n");
2343 if (list_empty(&bm->unfenced))
2344 DRM_DEBUG("Unfenced list was clean\n");
2346 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2347 ClearPageReserved(bm->dummy_read_page);
2349 __free_page(bm->dummy_read_page);
2352 mutex_unlock(&dev->struct_mutex);
2357 * This function is intended to be called on drm driver load.
2358 * If you decide to call it from firstopen, you must protect the call
2359 * from a potentially racing drm_bo_driver_finish in lastclose.
2360 * (This may happen on X server restart).
2363 int drm_bo_driver_init(struct drm_device *dev)
2365 struct drm_bo_driver *driver = dev->driver->bo_driver;
2366 struct drm_buffer_manager *bm = &dev->bm;
2369 bm->dummy_read_page = NULL;
2370 drm_bo_init_lock(&bm->bm_lock);
2371 mutex_lock(&dev->struct_mutex);
2375 bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
2376 if (!bm->dummy_read_page) {
2381 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2382 SetPageReserved(bm->dummy_read_page);
2386 * Initialize the system memory buffer type.
2387 * Other types need to be driver / IOCTL initialized.
2389 ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
2393 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2394 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2396 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2398 bm->initialized = 1;
2400 atomic_set(&bm->count, 0);
2402 INIT_LIST_HEAD(&bm->unfenced);
2403 INIT_LIST_HEAD(&bm->ddestroy);
2405 mutex_unlock(&dev->struct_mutex);
2408 EXPORT_SYMBOL(drm_bo_driver_init);
2410 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2412 struct drm_mm_init_arg *arg = data;
2413 struct drm_buffer_manager *bm = &dev->bm;
2414 struct drm_bo_driver *driver = dev->driver->bo_driver;
2418 DRM_ERROR("Buffer objects are not supported by this driver\n");
2422 ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
2427 if (arg->magic != DRM_BO_INIT_MAGIC) {
2428 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2429 "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2432 if (arg->major != DRM_BO_INIT_MAJOR) {
2433 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2434 "\tversion don't match. Got %d, expected %d.\n",
2435 arg->major, DRM_BO_INIT_MAJOR);
2439 mutex_lock(&dev->struct_mutex);
2440 if (!bm->initialized) {
2441 DRM_ERROR("DRM memory manager was not initialized.\n");
2444 if (arg->mem_type == 0) {
2445 DRM_ERROR("System memory buffers already initialized.\n");
2448 ret = drm_bo_init_mm(dev, arg->mem_type,
2449 arg->p_offset, arg->p_size);
2452 mutex_unlock(&dev->struct_mutex);
2453 (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2461 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2463 struct drm_mm_type_arg *arg = data;
2464 struct drm_buffer_manager *bm = &dev->bm;
2465 struct drm_bo_driver *driver = dev->driver->bo_driver;
2469 DRM_ERROR("Buffer objects are not supported by this driver\n");
2473 ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
2477 mutex_lock(&dev->struct_mutex);
2479 if (!bm->initialized) {
2480 DRM_ERROR("DRM memory manager was not initialized\n");
2483 if (arg->mem_type == 0) {
2484 DRM_ERROR("No takedown for System memory buffers.\n");
2488 if (drm_bo_clean_mm(dev, arg->mem_type)) {
2489 DRM_ERROR("Memory manager type %d not clean. "
2490 "Delaying takedown\n", arg->mem_type);
2493 mutex_unlock(&dev->struct_mutex);
2494 (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2502 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2504 struct drm_mm_type_arg *arg = data;
2505 struct drm_bo_driver *driver = dev->driver->bo_driver;
2509 DRM_ERROR("Buffer objects are not supported by this driver\n");
2513 if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
2514 DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
2518 if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2519 ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
2524 mutex_lock(&dev->struct_mutex);
2525 ret = drm_bo_lock_mm(dev, arg->mem_type);
2526 mutex_unlock(&dev->struct_mutex);
2528 (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2535 int drm_mm_unlock_ioctl(struct drm_device *dev,
2537 struct drm_file *file_priv)
2539 struct drm_mm_type_arg *arg = data;
2540 struct drm_bo_driver *driver = dev->driver->bo_driver;
2544 DRM_ERROR("Buffer objects are not supported by this driver\n");
2548 if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2549 ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2557 int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2559 struct drm_mm_info_arg *arg = data;
2560 struct drm_buffer_manager *bm = &dev->bm;
2561 struct drm_bo_driver *driver = dev->driver->bo_driver;
2562 struct drm_mem_type_manager *man;
2564 int mem_type = arg->mem_type;
2567 DRM_ERROR("Buffer objects are not supported by this driver\n");
2571 if (mem_type >= DRM_BO_MEM_TYPES) {
2572 DRM_ERROR("Illegal memory type %d\n", arg->mem_type);
2576 mutex_lock(&dev->struct_mutex);
2577 if (!bm->initialized) {
2578 DRM_ERROR("DRM memory manager was not initialized\n");
2584 man = &bm->man[arg->mem_type];
2586 arg->p_size = man->size;
2589 mutex_unlock(&dev->struct_mutex);
2594 * buffer object vm functions.
2597 int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
2599 struct drm_buffer_manager *bm = &dev->bm;
2600 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2602 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2603 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2606 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2609 if (mem->flags & DRM_BO_FLAG_CACHED)
2614 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2617 * \c Get the PCI offset for the buffer object memory.
2619 * \param bo The buffer object.
2620 * \param bus_base On return the base of the PCI region
2621 * \param bus_offset On return the byte offset into the PCI region
2622 * \param bus_size On return the byte size of the buffer object or zero if
2623 * the buffer object memory is not accessible through a PCI region.
2624 * \return Failure indication.
2626 * Returns -EINVAL if the buffer object is currently not mappable.
2627 * Otherwise returns zero.
2630 int drm_bo_pci_offset(struct drm_device *dev,
2631 struct drm_bo_mem_reg *mem,
2632 unsigned long *bus_base,
2633 unsigned long *bus_offset, unsigned long *bus_size)
2635 struct drm_buffer_manager *bm = &dev->bm;
2636 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2639 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2642 if (drm_mem_reg_is_pci(dev, mem)) {
2643 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2644 *bus_size = mem->num_pages << PAGE_SHIFT;
2645 *bus_base = man->io_offset;
2652 * \c Kill all user-space virtual mappings of this buffer object.
2654 * \param bo The buffer object.
2656 * Call bo->mutex locked.
2659 void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
2661 struct drm_device *dev = bo->dev;
2662 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2663 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2665 if (!dev->dev_mapping)
2668 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2672 * drm_bo_takedown_vm_locked:
2674 * @bo: the buffer object to remove any drm device mapping
2676 * Remove any associated vm mapping on the drm device node that
2677 * would have been created for a drm_bo_type_device buffer
2679 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
2681 struct drm_map_list *list;
2682 drm_local_map_t *map;
2683 struct drm_device *dev = bo->dev;
2685 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2686 if (bo->type != drm_bo_type_device)
2689 list = &bo->map_list;
2690 if (list->user_token) {
2691 drm_ht_remove_item(&dev->map_hash, &list->hash);
2692 list->user_token = 0;
2694 if (list->file_offset_node) {
2695 drm_mm_put_block(list->file_offset_node);
2696 list->file_offset_node = NULL;
2703 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2705 list->user_token = 0ULL;
2706 drm_bo_usage_deref_locked(&bo);
2710 * drm_bo_setup_vm_locked:
2712 * @bo: the buffer to allocate address space for
2714 * Allocate address space in the drm device so that applications
2715 * can mmap the buffer and access the contents. This only
2716 * applies to drm_bo_type_device objects as others are not
2717 * placed in the drm device address space.
2719 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
2721 struct drm_map_list *list = &bo->map_list;
2722 drm_local_map_t *map;
2723 struct drm_device *dev = bo->dev;
2725 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2726 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2732 map->type = _DRM_TTM;
2733 map->flags = _DRM_REMOVABLE;
2734 map->size = bo->mem.num_pages * PAGE_SIZE;
2735 atomic_inc(&bo->usage);
2736 map->handle = (void *)bo;
2738 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2739 bo->mem.num_pages, 0, 0);
2741 if (!list->file_offset_node) {
2742 drm_bo_takedown_vm_locked(bo);
2746 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2747 bo->mem.num_pages, 0);
2749 list->hash.key = list->file_offset_node->start;
2750 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2751 drm_bo_takedown_vm_locked(bo);
2755 list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2760 int drm_bo_version_ioctl(struct drm_device *dev, void *data,
2761 struct drm_file *file_priv)
2763 struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
2765 arg->major = DRM_BO_INIT_MAJOR;
2766 arg->minor = DRM_BO_INIT_MINOR;
2767 arg->patchlevel = DRM_BO_INIT_PATCH;