1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads,
40 * Hash tables and hash heads.
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those,
44 * we need both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
47 * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
48 * the list traversal will, in general, need to be restarted.
52 static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
57 static inline uint64_t drm_bo_type_flags(unsigned type)
59 return (1ULL << (24 + type));
63 * bo locked. dev->struct_mutex locked.
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
68 struct drm_mem_type_manager *man;
70 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71 DRM_ASSERT_LOCKED(&bo->mutex);
73 man = &bo->dev->bm.man[bo->pinned_mem_type];
74 list_add_tail(&bo->pinned_lru, &man->pinned);
77 void drm_bo_add_to_lru(struct drm_buffer_object *bo)
79 struct drm_mem_type_manager *man;
81 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
83 if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84 || bo->mem.mem_type != bo->pinned_mem_type) {
85 man = &bo->dev->bm.man[bo->mem.mem_type];
86 list_add_tail(&bo->lru, &man->lru);
88 INIT_LIST_HEAD(&bo->lru);
92 static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
94 #ifdef DRM_ODD_MM_COMPAT
97 if (!bo->map_list.map)
100 ret = drm_bo_lock_kmm(bo);
103 drm_bo_unmap_virtual(bo);
105 drm_bo_finish_unmap(bo);
107 if (!bo->map_list.map)
110 drm_bo_unmap_virtual(bo);
115 static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
117 #ifdef DRM_ODD_MM_COMPAT
120 if (!bo->map_list.map)
123 ret = drm_bo_remap_bound(bo);
125 DRM_ERROR("Failed to remap a bound buffer object.\n"
126 "\tThis might cause a sigbus later.\n");
128 drm_bo_unlock_kmm(bo);
133 * Call bo->mutex locked.
136 static int drm_bo_add_ttm(struct drm_buffer_object *bo)
138 struct drm_device *dev = bo->dev;
140 uint32_t page_flags = 0;
142 DRM_ASSERT_LOCKED(&bo->mutex);
145 if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
146 page_flags |= DRM_TTM_PAGE_WRITE;
149 case drm_bo_type_device:
150 case drm_bo_type_kernel:
151 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
152 page_flags, dev->bm.dummy_read_page);
156 case drm_bo_type_user:
157 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
158 page_flags | DRM_TTM_PAGE_USER,
159 dev->bm.dummy_read_page);
163 ret = drm_ttm_set_user(bo->ttm, current,
171 DRM_ERROR("Illegal buffer object type\n");
179 static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
180 struct drm_bo_mem_reg *mem,
181 int evict, int no_wait)
183 struct drm_device *dev = bo->dev;
184 struct drm_buffer_manager *bm = &dev->bm;
185 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
186 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
187 struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
188 struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
191 if (old_is_pci || new_is_pci ||
192 ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
193 ret = drm_bo_vm_pre_move(bo, old_is_pci);
198 * Create and bind a ttm if required.
201 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
202 ret = drm_bo_add_ttm(bo);
206 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
207 ret = drm_ttm_bind(bo->ttm, mem);
213 if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
215 struct drm_bo_mem_reg *old_mem = &bo->mem;
216 uint64_t save_flags = old_mem->flags;
217 uint64_t save_proposed_flags = old_mem->proposed_flags;
221 old_mem->proposed_flags = save_proposed_flags;
222 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
224 } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
225 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
227 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
229 } else if (dev->driver->bo_driver->move) {
230 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
234 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
241 if (old_is_pci || new_is_pci)
242 drm_bo_vm_post_move(bo);
244 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
246 dev->driver->bo_driver->invalidate_caches(dev,
249 DRM_ERROR("Can not flush read caches\n");
252 DRM_FLAG_MASKED(bo->priv_flags,
253 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
254 _DRM_BO_FLAG_EVICTED);
257 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
258 bm->man[bo->mem.mem_type].gpu_offset;
264 if (old_is_pci || new_is_pci)
265 drm_bo_vm_post_move(bo);
267 new_man = &bm->man[bo->mem.mem_type];
268 if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
269 drm_ttm_unbind(bo->ttm);
270 drm_ttm_destroy(bo->ttm);
278 * Call bo->mutex locked.
279 * Wait until the buffer is idle.
282 int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
287 DRM_ASSERT_LOCKED(&bo->mutex);
290 if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
291 drm_fence_usage_deref_unlocked(&bo->fence);
297 ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
302 drm_fence_usage_deref_unlocked(&bo->fence);
306 EXPORT_SYMBOL(drm_bo_wait);
308 static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
310 struct drm_device *dev = bo->dev;
311 struct drm_buffer_manager *bm = &dev->bm;
315 unsigned long _end = jiffies + 3 * DRM_HZ;
318 ret = drm_bo_wait(bo, 0, 1, 0);
319 if (ret && allow_errors)
322 } while (ret && !time_after_eq(jiffies, _end));
326 DRM_ERROR("Detected GPU lockup or "
327 "fence driver was taken down. "
328 "Evicting buffer.\n");
332 drm_fence_usage_deref_unlocked(&bo->fence);
338 * Call dev->struct_mutex locked.
339 * Attempts to remove all private references to a buffer by expiring its
340 * fence object and removing from lru lists and memory managers.
343 static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
345 struct drm_device *dev = bo->dev;
346 struct drm_buffer_manager *bm = &dev->bm;
348 DRM_ASSERT_LOCKED(&dev->struct_mutex);
350 atomic_inc(&bo->usage);
351 mutex_unlock(&dev->struct_mutex);
352 mutex_lock(&bo->mutex);
354 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
356 if (bo->fence && drm_fence_object_signaled(bo->fence,
358 drm_fence_usage_deref_unlocked(&bo->fence);
360 if (bo->fence && remove_all)
361 (void)drm_bo_expire_fence(bo, 0);
363 mutex_lock(&dev->struct_mutex);
365 if (!atomic_dec_and_test(&bo->usage))
369 list_del_init(&bo->lru);
370 if (bo->mem.mm_node) {
371 drm_mm_put_block(bo->mem.mm_node);
372 if (bo->pinned_node == bo->mem.mm_node)
373 bo->pinned_node = NULL;
374 bo->mem.mm_node = NULL;
376 list_del_init(&bo->pinned_lru);
377 if (bo->pinned_node) {
378 drm_mm_put_block(bo->pinned_node);
379 bo->pinned_node = NULL;
381 list_del_init(&bo->ddestroy);
382 mutex_unlock(&bo->mutex);
383 drm_bo_destroy_locked(bo);
387 if (list_empty(&bo->ddestroy)) {
388 drm_fence_object_flush(bo->fence, bo->fence_type);
389 list_add_tail(&bo->ddestroy, &bm->ddestroy);
390 schedule_delayed_work(&bm->wq,
391 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
395 mutex_unlock(&bo->mutex);
400 * Verify that refcount is 0 and that there are no internal references
401 * to the buffer object. Then destroy it.
404 static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
406 struct drm_device *dev = bo->dev;
407 struct drm_buffer_manager *bm = &dev->bm;
409 DRM_ASSERT_LOCKED(&dev->struct_mutex);
411 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
412 list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
413 list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
414 if (bo->fence != NULL) {
415 DRM_ERROR("Fence was non-zero.\n");
416 drm_bo_cleanup_refs(bo, 0);
420 #ifdef DRM_ODD_MM_COMPAT
421 BUG_ON(!list_empty(&bo->vma_list));
422 BUG_ON(!list_empty(&bo->p_mm_list));
426 drm_ttm_unbind(bo->ttm);
427 drm_ttm_destroy(bo->ttm);
431 atomic_dec(&bm->count);
433 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
439 * Some stuff is still trying to reference the buffer object.
440 * Get rid of those references.
443 drm_bo_cleanup_refs(bo, 0);
449 * Call dev->struct_mutex locked.
452 static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
454 struct drm_buffer_manager *bm = &dev->bm;
456 struct drm_buffer_object *entry, *nentry;
457 struct list_head *list, *next;
459 list_for_each_safe(list, next, &bm->ddestroy) {
460 entry = list_entry(list, struct drm_buffer_object, ddestroy);
463 if (next != &bm->ddestroy) {
464 nentry = list_entry(next, struct drm_buffer_object,
466 atomic_inc(&nentry->usage);
469 drm_bo_cleanup_refs(entry, remove_all);
472 atomic_dec(&nentry->usage);
476 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
477 static void drm_bo_delayed_workqueue(void *data)
479 static void drm_bo_delayed_workqueue(struct work_struct *work)
482 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
483 struct drm_device *dev = (struct drm_device *) data;
484 struct drm_buffer_manager *bm = &dev->bm;
486 struct drm_buffer_manager *bm =
487 container_of(work, struct drm_buffer_manager, wq.work);
488 struct drm_device *dev = container_of(bm, struct drm_device, bm);
491 DRM_DEBUG("Delayed delete Worker\n");
493 mutex_lock(&dev->struct_mutex);
494 if (!bm->initialized) {
495 mutex_unlock(&dev->struct_mutex);
498 drm_bo_delayed_delete(dev, 0);
499 if (bm->initialized && !list_empty(&bm->ddestroy)) {
500 schedule_delayed_work(&bm->wq,
501 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
503 mutex_unlock(&dev->struct_mutex);
506 void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
508 struct drm_buffer_object *tmp_bo = *bo;
511 DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
513 if (atomic_dec_and_test(&tmp_bo->usage))
514 drm_bo_destroy_locked(tmp_bo);
516 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
518 static void drm_bo_base_deref_locked(struct drm_file *file_priv,
519 struct drm_user_object *uo)
521 struct drm_buffer_object *bo =
522 drm_user_object_entry(uo, struct drm_buffer_object, base);
524 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
526 drm_bo_takedown_vm_locked(bo);
527 drm_bo_usage_deref_locked(&bo);
530 void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
532 struct drm_buffer_object *tmp_bo = *bo;
533 struct drm_device *dev = tmp_bo->dev;
536 if (atomic_dec_and_test(&tmp_bo->usage)) {
537 mutex_lock(&dev->struct_mutex);
538 if (atomic_read(&tmp_bo->usage) == 0)
539 drm_bo_destroy_locked(tmp_bo);
540 mutex_unlock(&dev->struct_mutex);
543 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
545 void drm_putback_buffer_objects(struct drm_device *dev)
547 struct drm_buffer_manager *bm = &dev->bm;
548 struct list_head *list = &bm->unfenced;
549 struct drm_buffer_object *entry, *next;
551 mutex_lock(&dev->struct_mutex);
552 list_for_each_entry_safe(entry, next, list, lru) {
553 atomic_inc(&entry->usage);
554 mutex_unlock(&dev->struct_mutex);
556 mutex_lock(&entry->mutex);
557 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
558 mutex_lock(&dev->struct_mutex);
560 list_del_init(&entry->lru);
561 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
562 wake_up_all(&entry->event_queue);
565 * FIXME: Might want to put back on head of list
566 * instead of tail here.
569 drm_bo_add_to_lru(entry);
570 mutex_unlock(&entry->mutex);
571 drm_bo_usage_deref_locked(&entry);
573 mutex_unlock(&dev->struct_mutex);
575 EXPORT_SYMBOL(drm_putback_buffer_objects);
578 * Note. The caller has to register (if applicable)
579 * and deregister fence object usage.
582 int drm_fence_buffer_objects(struct drm_device *dev,
583 struct list_head *list,
584 uint32_t fence_flags,
585 struct drm_fence_object *fence,
586 struct drm_fence_object **used_fence)
588 struct drm_buffer_manager *bm = &dev->bm;
589 struct drm_buffer_object *entry;
590 uint32_t fence_type = 0;
591 uint32_t fence_class = ~0;
596 mutex_lock(&dev->struct_mutex);
599 list = &bm->unfenced;
602 fence_class = fence->fence_class;
604 list_for_each_entry(entry, list, lru) {
605 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
606 fence_type |= entry->new_fence_type;
607 if (fence_class == ~0)
608 fence_class = entry->new_fence_class;
609 else if (entry->new_fence_class != fence_class) {
610 DRM_ERROR("Unmatching fence classes on unfenced list: "
613 entry->new_fence_class);
626 if ((fence_type & fence->type) != fence_type ||
627 (fence->fence_class != fence_class)) {
628 DRM_ERROR("Given fence doesn't match buffers "
629 "on unfenced list.\n");
634 mutex_unlock(&dev->struct_mutex);
635 ret = drm_fence_object_create(dev, fence_class, fence_type,
636 fence_flags | DRM_FENCE_FLAG_EMIT,
638 mutex_lock(&dev->struct_mutex);
647 entry = list_entry(l, struct drm_buffer_object, lru);
648 atomic_inc(&entry->usage);
649 mutex_unlock(&dev->struct_mutex);
650 mutex_lock(&entry->mutex);
651 mutex_lock(&dev->struct_mutex);
653 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
656 drm_fence_usage_deref_locked(&entry->fence);
657 entry->fence = drm_fence_reference_locked(fence);
658 entry->fence_class = entry->new_fence_class;
659 entry->fence_type = entry->new_fence_type;
660 DRM_FLAG_MASKED(entry->priv_flags, 0,
661 _DRM_BO_FLAG_UNFENCED);
662 wake_up_all(&entry->event_queue);
663 drm_bo_add_to_lru(entry);
665 mutex_unlock(&entry->mutex);
666 drm_bo_usage_deref_locked(&entry);
669 DRM_DEBUG("Fenced %d buffers\n", count);
671 mutex_unlock(&dev->struct_mutex);
675 EXPORT_SYMBOL(drm_fence_buffer_objects);
681 static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
685 struct drm_device *dev = bo->dev;
686 struct drm_bo_mem_reg evict_mem;
689 * Someone might have modified the buffer before we took the
693 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
695 if (bo->mem.mem_type != mem_type)
698 ret = drm_bo_wait(bo, 0, 0, no_wait);
700 if (ret && ret != -EAGAIN) {
701 DRM_ERROR("Failed to expire fence before "
702 "buffer eviction.\n");
707 evict_mem.mm_node = NULL;
710 evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
711 ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
715 DRM_ERROR("Failed to find memory space for "
716 "buffer 0x%p eviction.\n", bo);
720 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
724 DRM_ERROR("Buffer eviction failed\n");
728 mutex_lock(&dev->struct_mutex);
729 if (evict_mem.mm_node) {
730 if (evict_mem.mm_node != bo->pinned_node)
731 drm_mm_put_block(evict_mem.mm_node);
732 evict_mem.mm_node = NULL;
735 drm_bo_add_to_lru(bo);
736 mutex_unlock(&dev->struct_mutex);
738 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
739 _DRM_BO_FLAG_EVICTED);
746 * Repeatedly evict memory from the LRU for @mem_type until we create enough
747 * space, or we've evicted everything and there isn't enough space.
749 static int drm_bo_mem_force_space(struct drm_device *dev,
750 struct drm_bo_mem_reg *mem,
751 uint32_t mem_type, int no_wait)
753 struct drm_mm_node *node;
754 struct drm_buffer_manager *bm = &dev->bm;
755 struct drm_buffer_object *entry;
756 struct drm_mem_type_manager *man = &bm->man[mem_type];
757 struct list_head *lru;
758 unsigned long num_pages = mem->num_pages;
761 mutex_lock(&dev->struct_mutex);
763 node = drm_mm_search_free(&man->manager, num_pages,
764 mem->page_alignment, 1);
769 if (lru->next == lru)
772 entry = list_entry(lru->next, struct drm_buffer_object, lru);
773 atomic_inc(&entry->usage);
774 mutex_unlock(&dev->struct_mutex);
775 mutex_lock(&entry->mutex);
776 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
778 ret = drm_bo_evict(entry, mem_type, no_wait);
779 mutex_unlock(&entry->mutex);
780 drm_bo_usage_deref_unlocked(&entry);
783 mutex_lock(&dev->struct_mutex);
787 mutex_unlock(&dev->struct_mutex);
791 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
792 mutex_unlock(&dev->struct_mutex);
794 mem->mem_type = mem_type;
798 static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
801 uint64_t mask, uint32_t *res_mask)
803 uint64_t cur_flags = drm_bo_type_flags(mem_type);
806 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
808 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
809 cur_flags |= DRM_BO_FLAG_CACHED;
810 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
811 cur_flags |= DRM_BO_FLAG_MAPPABLE;
812 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
813 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
815 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
818 if (mem_type == DRM_BO_MEM_LOCAL) {
819 *res_mask = cur_flags;
823 flag_diff = (mask ^ cur_flags);
824 if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
825 cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
827 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
828 (!(mask & DRM_BO_FLAG_CACHED) ||
829 (mask & DRM_BO_FLAG_FORCE_CACHING)))
832 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
833 ((mask & DRM_BO_FLAG_MAPPABLE) ||
834 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
837 *res_mask = cur_flags;
842 * Creates space for memory region @mem according to its type.
844 * This function first searches for free space in compatible memory types in
845 * the priority order defined by the driver. If free space isn't found, then
846 * drm_bo_mem_force_space is attempted in priority order to evict and find
849 int drm_bo_mem_space(struct drm_buffer_object *bo,
850 struct drm_bo_mem_reg *mem, int no_wait)
852 struct drm_device *dev = bo->dev;
853 struct drm_buffer_manager *bm = &dev->bm;
854 struct drm_mem_type_manager *man;
856 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
857 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
859 uint32_t mem_type = DRM_BO_MEM_LOCAL;
864 struct drm_mm_node *node = NULL;
868 for (i = 0; i < num_prios; ++i) {
870 man = &bm->man[mem_type];
872 type_ok = drm_bo_mt_compatible(man,
873 bo->type == drm_bo_type_user,
874 mem_type, mem->proposed_flags,
880 if (mem_type == DRM_BO_MEM_LOCAL)
883 if ((mem_type == bo->pinned_mem_type) &&
884 (bo->pinned_node != NULL)) {
885 node = bo->pinned_node;
889 mutex_lock(&dev->struct_mutex);
890 if (man->has_type && man->use_type) {
892 node = drm_mm_search_free(&man->manager, mem->num_pages,
893 mem->page_alignment, 1);
895 node = drm_mm_get_block(node, mem->num_pages,
896 mem->page_alignment);
898 mutex_unlock(&dev->struct_mutex);
903 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
905 mem->mem_type = mem_type;
906 mem->flags = cur_flags;
913 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
914 prios = dev->driver->bo_driver->mem_busy_prio;
916 for (i = 0; i < num_prios; ++i) {
918 man = &bm->man[mem_type];
923 if (!drm_bo_mt_compatible(man,
924 bo->type == drm_bo_type_user,
930 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
932 if (ret == 0 && mem->mm_node) {
933 mem->flags = cur_flags;
941 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
944 EXPORT_SYMBOL(drm_bo_mem_space);
947 * drm_bo_propose_flags:
949 * @bo: the buffer object getting new flags
951 * @new_flags: the new set of proposed flag bits
953 * @new_mask: the mask of bits changed in new_flags
955 * Modify the proposed_flag bits in @bo
957 static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
958 uint64_t new_flags, uint64_t new_mask)
962 /* Copy unchanging bits from existing proposed_flags */
963 DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
965 if (bo->type == drm_bo_type_user &&
966 ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
967 (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
968 DRM_ERROR("User buffers require cache-coherent memory.\n");
972 if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
973 DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
977 if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
978 DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
982 new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
985 if (new_access == 0) {
986 DRM_ERROR("Invalid buffer object rwx properties\n");
990 bo->mem.proposed_flags = new_flags;
995 * Call dev->struct_mutex locked.
998 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
999 uint32_t handle, int check_owner)
1001 struct drm_user_object *uo;
1002 struct drm_buffer_object *bo;
1004 uo = drm_lookup_user_object(file_priv, handle);
1006 if (!uo || (uo->type != drm_buffer_type)) {
1007 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
1011 if (check_owner && file_priv != uo->owner) {
1012 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
1016 bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
1017 atomic_inc(&bo->usage);
1020 EXPORT_SYMBOL(drm_lookup_buffer_object);
1023 * Call bo->mutex locked.
1024 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1025 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
1028 static int drm_bo_quick_busy(struct drm_buffer_object *bo)
1030 struct drm_fence_object *fence = bo->fence;
1032 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1034 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1035 drm_fence_usage_deref_unlocked(&bo->fence);
1044 * Call bo->mutex locked.
1045 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1048 static int drm_bo_busy(struct drm_buffer_object *bo)
1050 struct drm_fence_object *fence = bo->fence;
1052 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1054 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1055 drm_fence_usage_deref_unlocked(&bo->fence);
1058 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
1059 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1060 drm_fence_usage_deref_unlocked(&bo->fence);
1068 int drm_bo_evict_cached(struct drm_buffer_object *bo)
1072 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1073 if (bo->mem.mm_node)
1074 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1078 EXPORT_SYMBOL(drm_bo_evict_cached);
1080 * Wait until a buffer is unmapped.
1083 static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
1087 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1090 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1091 atomic_read(&bo->mapped) == -1);
1099 static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
1103 mutex_lock(&bo->mutex);
1104 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1105 mutex_unlock(&bo->mutex);
1110 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1111 * Until then, we cannot really do anything with it except delete it.
1114 static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
1117 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1125 mutex_unlock(&bo->mutex);
1126 DRM_WAIT_ON (ret, bo->event_queue, 3 * DRM_HZ,
1127 !drm_bo_check_unfenced(bo));
1128 mutex_lock(&bo->mutex);
1131 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1133 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1143 * Fill in the ioctl reply argument with buffer info.
1147 static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
1148 struct drm_bo_info_rep *rep)
1153 rep->handle = bo->base.hash.key;
1154 rep->flags = bo->mem.flags;
1155 rep->size = bo->num_pages * PAGE_SIZE;
1156 rep->offset = bo->offset;
1159 * drm_bo_type_device buffers have user-visible
1160 * handles which can be used to share across
1161 * processes. Hand that back to the application
1163 if (bo->type == drm_bo_type_device)
1164 rep->arg_handle = bo->map_list.user_token;
1166 rep->arg_handle = 0;
1168 rep->proposed_flags = bo->mem.proposed_flags;
1169 rep->buffer_start = bo->buffer_start;
1170 rep->fence_flags = bo->fence_type;
1172 rep->page_alignment = bo->mem.page_alignment;
1174 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1175 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1181 * Wait for buffer idle and register that we've mapped the buffer.
1182 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1183 * so that if the client dies, the mapping is automatically
1187 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1188 uint32_t map_flags, unsigned hint,
1189 struct drm_bo_info_rep *rep)
1191 struct drm_buffer_object *bo;
1192 struct drm_device *dev = file_priv->minor->dev;
1194 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1196 mutex_lock(&dev->struct_mutex);
1197 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1198 mutex_unlock(&dev->struct_mutex);
1203 mutex_lock(&bo->mutex);
1204 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1209 * If this returns true, we are currently unmapped.
1210 * We need to do this test, because unmapping can
1211 * be done without the bo->mutex held.
1215 if (atomic_inc_and_test(&bo->mapped)) {
1216 if (no_wait && drm_bo_busy(bo)) {
1217 atomic_dec(&bo->mapped);
1221 ret = drm_bo_wait(bo, 0, 0, no_wait);
1223 atomic_dec(&bo->mapped);
1227 if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
1228 drm_bo_evict_cached(bo);
1231 } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
1234 * We are already mapped with different flags.
1235 * need to wait for unmap.
1238 ret = drm_bo_wait_unmapped(bo, no_wait);
1247 mutex_lock(&dev->struct_mutex);
1248 ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1249 mutex_unlock(&dev->struct_mutex);
1251 if (atomic_add_negative(-1, &bo->mapped))
1252 wake_up_all(&bo->event_queue);
1255 drm_bo_fill_rep_arg(bo, rep);
1257 mutex_unlock(&bo->mutex);
1258 drm_bo_usage_deref_unlocked(&bo);
1262 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1264 struct drm_device *dev = file_priv->minor->dev;
1265 struct drm_buffer_object *bo;
1266 struct drm_ref_object *ro;
1269 mutex_lock(&dev->struct_mutex);
1271 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1277 ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1283 drm_remove_ref_object(file_priv, ro);
1284 drm_bo_usage_deref_locked(&bo);
1286 mutex_unlock(&dev->struct_mutex);
1291 * Call struct-sem locked.
1294 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1295 struct drm_user_object *uo,
1296 enum drm_ref_type action)
1298 struct drm_buffer_object *bo =
1299 drm_user_object_entry(uo, struct drm_buffer_object, base);
1302 * We DON'T want to take the bo->lock here, because we want to
1303 * hold it when we wait for unmapped buffer.
1306 BUG_ON(action != _DRM_REF_TYPE1);
1308 if (atomic_add_negative(-1, &bo->mapped))
1309 wake_up_all(&bo->event_queue);
1314 * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
1317 int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
1318 int no_wait, int move_unfenced)
1320 struct drm_device *dev = bo->dev;
1321 struct drm_buffer_manager *bm = &dev->bm;
1323 struct drm_bo_mem_reg mem;
1325 * Flush outstanding fences.
1331 * Wait for outstanding fences.
1334 ret = drm_bo_wait(bo, 0, 0, no_wait);
1338 mem.num_pages = bo->num_pages;
1339 mem.size = mem.num_pages << PAGE_SHIFT;
1340 mem.proposed_flags = new_mem_flags;
1341 mem.page_alignment = bo->mem.page_alignment;
1343 mutex_lock(&bm->evict_mutex);
1344 mutex_lock(&dev->struct_mutex);
1345 list_del_init(&bo->lru);
1346 mutex_unlock(&dev->struct_mutex);
1349 * Determine where to move the buffer.
1351 ret = drm_bo_mem_space(bo, &mem, no_wait);
1355 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1358 mutex_lock(&dev->struct_mutex);
1359 if (ret || !move_unfenced) {
1361 if (mem.mm_node != bo->pinned_node)
1362 drm_mm_put_block(mem.mm_node);
1365 drm_bo_add_to_lru(bo);
1366 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1367 wake_up_all(&bo->event_queue);
1368 DRM_FLAG_MASKED(bo->priv_flags, 0,
1369 _DRM_BO_FLAG_UNFENCED);
1372 list_add_tail(&bo->lru, &bm->unfenced);
1373 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1374 _DRM_BO_FLAG_UNFENCED);
1376 mutex_unlock(&dev->struct_mutex);
1377 mutex_unlock(&bm->evict_mutex);
1381 static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
1383 uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
1385 if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
1387 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1388 (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
1389 (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
1392 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1393 ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
1394 (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
1400 * drm_buffer_object_validate:
1402 * @bo: the buffer object to modify
1404 * @fence_class: the new fence class covering this buffer
1406 * @move_unfenced: a boolean indicating whether switching the
1407 * memory space of this buffer should cause the buffer to
1408 * be placed on the unfenced list.
1410 * @no_wait: whether this function should return -EBUSY instead
1413 * Change buffer access parameters. This can involve moving
1414 * the buffer to the correct memory type, pinning the buffer
1415 * or changing the class/type of fence covering this buffer
1417 * Must be called with bo locked.
1420 static int drm_buffer_object_validate(struct drm_buffer_object *bo,
1421 uint32_t fence_class,
1422 int move_unfenced, int no_wait)
1424 struct drm_device *dev = bo->dev;
1425 struct drm_buffer_manager *bm = &dev->bm;
1426 struct drm_bo_driver *driver = dev->driver->bo_driver;
1430 DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
1431 (unsigned long long) bo->mem.proposed_flags,
1432 (unsigned long long) bo->mem.flags);
1434 ret = driver->fence_type(bo, &fence_class, &ftype);
1437 DRM_ERROR("Driver did not support given buffer permissions\n");
1442 * We're switching command submission mechanism,
1443 * or cannot simply rely on the hardware serializing for us.
1445 * Insert a driver-dependant barrier or wait for buffer idle.
1448 if ((fence_class != bo->fence_class) ||
1449 ((ftype ^ bo->fence_type) & bo->fence_type)) {
1452 if (driver->command_stream_barrier) {
1453 ret = driver->command_stream_barrier(bo,
1459 ret = drm_bo_wait(bo, 0, 0, no_wait);
1466 bo->new_fence_class = fence_class;
1467 bo->new_fence_type = ftype;
1469 ret = drm_bo_wait_unmapped(bo, no_wait);
1471 DRM_ERROR("Timed out waiting for buffer unmap.\n");
1476 * Check whether we need to move buffer.
1479 if (!drm_bo_mem_compat(&bo->mem)) {
1480 ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
1484 DRM_ERROR("Failed moving buffer.\n");
1493 if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1494 bo->pinned_mem_type = bo->mem.mem_type;
1495 mutex_lock(&dev->struct_mutex);
1496 list_del_init(&bo->pinned_lru);
1497 drm_bo_add_to_pinned_lru(bo);
1499 if (bo->pinned_node != bo->mem.mm_node) {
1500 if (bo->pinned_node != NULL)
1501 drm_mm_put_block(bo->pinned_node);
1502 bo->pinned_node = bo->mem.mm_node;
1505 mutex_unlock(&dev->struct_mutex);
1507 } else if (bo->pinned_node != NULL) {
1509 mutex_lock(&dev->struct_mutex);
1511 if (bo->pinned_node != bo->mem.mm_node)
1512 drm_mm_put_block(bo->pinned_node);
1514 list_del_init(&bo->pinned_lru);
1515 bo->pinned_node = NULL;
1516 mutex_unlock(&dev->struct_mutex);
1521 * We might need to add a TTM.
1524 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1525 ret = drm_bo_add_ttm(bo);
1530 * Validation has succeeded, move the access and other
1531 * non-mapping-related flag bits from the proposed flags to
1535 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
1538 * Finally, adjust lru to be sure.
1541 mutex_lock(&dev->struct_mutex);
1543 if (move_unfenced) {
1544 list_add_tail(&bo->lru, &bm->unfenced);
1545 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1546 _DRM_BO_FLAG_UNFENCED);
1548 drm_bo_add_to_lru(bo);
1549 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1550 wake_up_all(&bo->event_queue);
1551 DRM_FLAG_MASKED(bo->priv_flags, 0,
1552 _DRM_BO_FLAG_UNFENCED);
1555 mutex_unlock(&dev->struct_mutex);
1561 * drm_bo_do_validate:
1563 * @bo: the buffer object
1565 * @flags: access rights, mapping parameters and cacheability. See
1566 * the DRM_BO_FLAG_* values in drm.h
1568 * @mask: Which flag values to change; this allows callers to modify
1569 * things without knowing the current state of other flags.
1571 * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1574 * @fence_class: a driver-specific way of doing fences. Presumably,
1575 * this would be used if the driver had more than one submission and
1576 * fencing mechanism. At this point, there isn't any use of this
1577 * from the user mode code.
1579 * @rep: To be stuffed with the reply from validation
1581 * 'validate' a buffer object. This changes where the buffer is
1582 * located, along with changing access modes.
1585 int drm_bo_do_validate(struct drm_buffer_object *bo,
1586 uint64_t flags, uint64_t mask, uint32_t hint,
1587 uint32_t fence_class,
1588 struct drm_bo_info_rep *rep)
1591 int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
1593 mutex_lock(&bo->mutex);
1594 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1599 ret = drm_bo_modify_proposed_flags (bo, flags, mask);
1603 ret = drm_buffer_object_validate(bo,
1605 !(hint & DRM_BO_HINT_DONT_FENCE),
1609 drm_bo_fill_rep_arg(bo, rep);
1611 mutex_unlock(&bo->mutex);
1614 EXPORT_SYMBOL(drm_bo_do_validate);
1617 * drm_bo_handle_validate
1619 * @file_priv: the drm file private, used to get a handle to the user context
1621 * @handle: the buffer object handle
1623 * @flags: access rights, mapping parameters and cacheability. See
1624 * the DRM_BO_FLAG_* values in drm.h
1626 * @mask: Which flag values to change; this allows callers to modify
1627 * things without knowing the current state of other flags.
1629 * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1632 * @fence_class: a driver-specific way of doing fences. Presumably,
1633 * this would be used if the driver had more than one submission and
1634 * fencing mechanism. At this point, there isn't any use of this
1635 * from the user mode code.
1637 * @use_old_fence_class: don't change fence class, pull it from the buffer object
1639 * @rep: To be stuffed with the reply from validation
1641 * @bp_rep: To be stuffed with the buffer object pointer
1643 * Perform drm_bo_do_validate on a buffer referenced by a user-space handle.
1644 * Some permissions checking is done on the parameters, otherwise this
1645 * is a thin wrapper.
1648 int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
1649 uint64_t flags, uint64_t mask,
1651 uint32_t fence_class,
1652 int use_old_fence_class,
1653 struct drm_bo_info_rep *rep,
1654 struct drm_buffer_object **bo_rep)
1656 struct drm_device *dev = file_priv->minor->dev;
1657 struct drm_buffer_object *bo;
1660 mutex_lock(&dev->struct_mutex);
1661 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1662 mutex_unlock(&dev->struct_mutex);
1667 if (use_old_fence_class)
1668 fence_class = bo->fence_class;
1671 * Only allow creator to change shared buffer mask.
1674 if (bo->base.owner != file_priv)
1675 mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1678 ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
1683 drm_bo_usage_deref_unlocked(&bo);
1687 EXPORT_SYMBOL(drm_bo_handle_validate);
1689 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1690 struct drm_bo_info_rep *rep)
1692 struct drm_device *dev = file_priv->minor->dev;
1693 struct drm_buffer_object *bo;
1695 mutex_lock(&dev->struct_mutex);
1696 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1697 mutex_unlock(&dev->struct_mutex);
1702 mutex_lock(&bo->mutex);
1703 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1704 (void)drm_bo_busy(bo);
1705 drm_bo_fill_rep_arg(bo, rep);
1706 mutex_unlock(&bo->mutex);
1707 drm_bo_usage_deref_unlocked(&bo);
1711 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1713 struct drm_bo_info_rep *rep)
1715 struct drm_device *dev = file_priv->minor->dev;
1716 struct drm_buffer_object *bo;
1717 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1720 mutex_lock(&dev->struct_mutex);
1721 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1722 mutex_unlock(&dev->struct_mutex);
1727 mutex_lock(&bo->mutex);
1728 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1731 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1735 drm_bo_fill_rep_arg(bo, rep);
1738 mutex_unlock(&bo->mutex);
1739 drm_bo_usage_deref_unlocked(&bo);
1743 int drm_buffer_object_create(struct drm_device *dev,
1745 enum drm_bo_type type,
1748 uint32_t page_alignment,
1749 unsigned long buffer_start,
1750 struct drm_buffer_object **buf_obj)
1752 struct drm_buffer_manager *bm = &dev->bm;
1753 struct drm_buffer_object *bo;
1755 unsigned long num_pages;
1757 size += buffer_start & ~PAGE_MASK;
1758 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1759 if (num_pages == 0) {
1760 DRM_ERROR("Illegal buffer object size %ld.\n", size);
1764 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1769 mutex_init(&bo->mutex);
1770 mutex_lock(&bo->mutex);
1772 atomic_set(&bo->usage, 1);
1773 atomic_set(&bo->mapped, -1);
1774 DRM_INIT_WAITQUEUE(&bo->event_queue);
1775 INIT_LIST_HEAD(&bo->lru);
1776 INIT_LIST_HEAD(&bo->pinned_lru);
1777 INIT_LIST_HEAD(&bo->ddestroy);
1778 #ifdef DRM_ODD_MM_COMPAT
1779 INIT_LIST_HEAD(&bo->p_mm_list);
1780 INIT_LIST_HEAD(&bo->vma_list);
1784 bo->num_pages = num_pages;
1785 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1786 bo->mem.num_pages = bo->num_pages;
1787 bo->mem.mm_node = NULL;
1788 bo->mem.page_alignment = page_alignment;
1789 bo->buffer_start = buffer_start & PAGE_MASK;
1791 bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1792 DRM_BO_FLAG_MAPPABLE);
1793 bo->mem.proposed_flags = 0;
1794 atomic_inc(&bm->count);
1796 * Use drm_bo_modify_proposed_flags to error-check the proposed flags
1798 ret = drm_bo_modify_proposed_flags (bo, flags, flags);
1803 * For drm_bo_type_device buffers, allocate
1804 * address space from the device so that applications
1805 * can mmap the buffer from there
1807 if (bo->type == drm_bo_type_device) {
1808 mutex_lock(&dev->struct_mutex);
1809 ret = drm_bo_setup_vm_locked(bo);
1810 mutex_unlock(&dev->struct_mutex);
1815 ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1819 mutex_unlock(&bo->mutex);
1824 mutex_unlock(&bo->mutex);
1826 drm_bo_usage_deref_unlocked(&bo);
1829 EXPORT_SYMBOL(drm_buffer_object_create);
1832 static int drm_bo_add_user_object(struct drm_file *file_priv,
1833 struct drm_buffer_object *bo, int shareable)
1835 struct drm_device *dev = file_priv->minor->dev;
1838 mutex_lock(&dev->struct_mutex);
1839 ret = drm_add_user_object(file_priv, &bo->base, shareable);
1843 bo->base.remove = drm_bo_base_deref_locked;
1844 bo->base.type = drm_buffer_type;
1845 bo->base.ref_struct_locked = NULL;
1846 bo->base.unref = drm_buffer_user_object_unmap;
1849 mutex_unlock(&dev->struct_mutex);
1853 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1855 struct drm_bo_create_arg *arg = data;
1856 struct drm_bo_create_req *req = &arg->d.req;
1857 struct drm_bo_info_rep *rep = &arg->d.rep;
1858 struct drm_buffer_object *entry;
1859 enum drm_bo_type bo_type;
1862 DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
1863 (int)(req->size / 1024), req->page_alignment * 4);
1865 if (!dev->bm.initialized) {
1866 DRM_ERROR("Buffer object manager is not initialized.\n");
1871 * If the buffer creation request comes in with a starting address,
1872 * that points at the desired user pages to map. Otherwise, create
1873 * a drm_bo_type_device buffer, which uses pages allocated from the kernel
1875 bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
1878 * User buffers cannot be shared
1880 if (bo_type == drm_bo_type_user)
1881 req->flags &= ~DRM_BO_FLAG_SHAREABLE;
1883 ret = drm_buffer_object_create(file_priv->minor->dev,
1884 req->size, bo_type, req->flags,
1885 req->hint, req->page_alignment,
1886 req->buffer_start, &entry);
1890 ret = drm_bo_add_user_object(file_priv, entry,
1891 req->flags & DRM_BO_FLAG_SHAREABLE);
1893 drm_bo_usage_deref_unlocked(&entry);
1897 mutex_lock(&entry->mutex);
1898 drm_bo_fill_rep_arg(entry, rep);
1899 mutex_unlock(&entry->mutex);
1905 int drm_bo_setstatus_ioctl(struct drm_device *dev,
1906 void *data, struct drm_file *file_priv)
1908 struct drm_bo_map_wait_idle_arg *arg = data;
1909 struct drm_bo_info_req *req = &arg->d.req;
1910 struct drm_bo_info_rep *rep = &arg->d.rep;
1913 if (!dev->bm.initialized) {
1914 DRM_ERROR("Buffer object manager is not initialized.\n");
1918 ret = drm_bo_read_lock(&dev->bm.bm_lock);
1923 * validate the buffer. note that 'fence_class' will be unused
1924 * as we pass use_old_fence_class=1 here. Note also that
1925 * the libdrm API doesn't pass fence_class to the kernel,
1926 * so it's a good thing it isn't used here.
1928 ret = drm_bo_handle_validate(file_priv, req->handle,
1931 req->hint | DRM_BO_HINT_DONT_FENCE,
1932 req->fence_class, 1,
1935 (void) drm_bo_read_unlock(&dev->bm.bm_lock);
1942 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1944 struct drm_bo_map_wait_idle_arg *arg = data;
1945 struct drm_bo_info_req *req = &arg->d.req;
1946 struct drm_bo_info_rep *rep = &arg->d.rep;
1948 if (!dev->bm.initialized) {
1949 DRM_ERROR("Buffer object manager is not initialized.\n");
1953 ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1961 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1963 struct drm_bo_handle_arg *arg = data;
1965 if (!dev->bm.initialized) {
1966 DRM_ERROR("Buffer object manager is not initialized.\n");
1970 ret = drm_buffer_object_unmap(file_priv, arg->handle);
1975 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1977 struct drm_bo_reference_info_arg *arg = data;
1978 struct drm_bo_handle_arg *req = &arg->d.req;
1979 struct drm_bo_info_rep *rep = &arg->d.rep;
1980 struct drm_user_object *uo;
1983 if (!dev->bm.initialized) {
1984 DRM_ERROR("Buffer object manager is not initialized.\n");
1988 ret = drm_user_object_ref(file_priv, req->handle,
1989 drm_buffer_type, &uo);
1993 ret = drm_bo_handle_info(file_priv, req->handle, rep);
2000 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2002 struct drm_bo_handle_arg *arg = data;
2005 if (!dev->bm.initialized) {
2006 DRM_ERROR("Buffer object manager is not initialized.\n");
2010 ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
2014 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2016 struct drm_bo_reference_info_arg *arg = data;
2017 struct drm_bo_handle_arg *req = &arg->d.req;
2018 struct drm_bo_info_rep *rep = &arg->d.rep;
2021 if (!dev->bm.initialized) {
2022 DRM_ERROR("Buffer object manager is not initialized.\n");
2026 ret = drm_bo_handle_info(file_priv, req->handle, rep);
2033 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2035 struct drm_bo_map_wait_idle_arg *arg = data;
2036 struct drm_bo_info_req *req = &arg->d.req;
2037 struct drm_bo_info_rep *rep = &arg->d.rep;
2039 if (!dev->bm.initialized) {
2040 DRM_ERROR("Buffer object manager is not initialized.\n");
2044 ret = drm_bo_handle_wait(file_priv, req->handle,
2052 static int drm_bo_leave_list(struct drm_buffer_object *bo,
2057 struct drm_device *dev = bo->dev;
2060 mutex_lock(&bo->mutex);
2062 ret = drm_bo_expire_fence(bo, allow_errors);
2067 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2068 mutex_lock(&dev->struct_mutex);
2069 list_del_init(&bo->pinned_lru);
2070 if (bo->pinned_node == bo->mem.mm_node)
2071 bo->pinned_node = NULL;
2072 if (bo->pinned_node != NULL) {
2073 drm_mm_put_block(bo->pinned_node);
2074 bo->pinned_node = NULL;
2076 mutex_unlock(&dev->struct_mutex);
2079 if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2080 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2081 "cleanup. Removing flag and evicting.\n");
2082 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2083 bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
2086 if (bo->mem.mem_type == mem_type)
2087 ret = drm_bo_evict(bo, mem_type, 0);
2094 DRM_ERROR("Cleanup eviction failed\n");
2099 mutex_unlock(&bo->mutex);
2104 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2108 return list_entry(list, struct drm_buffer_object, pinned_lru);
2110 return list_entry(list, struct drm_buffer_object, lru);
2114 * dev->struct_mutex locked.
2117 static int drm_bo_force_list_clean(struct drm_device *dev,
2118 struct list_head *head,
2124 struct list_head *list, *next, *prev;
2125 struct drm_buffer_object *entry, *nentry;
2130 * The list traversal is a bit odd here, because an item may
2131 * disappear from the list when we release the struct_mutex or
2132 * when we decrease the usage count. Also we're not guaranteed
2133 * to drain pinned lists, so we can't always restart.
2138 list_for_each_safe(list, next, head) {
2141 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2142 atomic_inc(&entry->usage);
2144 atomic_dec(&nentry->usage);
2149 * Protect the next item from destruction, so we can check
2150 * its list pointers later on.
2154 nentry = drm_bo_entry(next, pinned_list);
2155 atomic_inc(&nentry->usage);
2157 mutex_unlock(&dev->struct_mutex);
2159 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2161 mutex_lock(&dev->struct_mutex);
2163 drm_bo_usage_deref_locked(&entry);
2168 * Has the next item disappeared from the list?
2171 do_restart = ((next->prev != list) && (next->prev != prev));
2173 if (nentry != NULL && do_restart)
2174 drm_bo_usage_deref_locked(&nentry);
2182 int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
2184 struct drm_buffer_manager *bm = &dev->bm;
2185 struct drm_mem_type_manager *man = &bm->man[mem_type];
2188 if (mem_type >= DRM_BO_MEM_TYPES) {
2189 DRM_ERROR("Illegal memory type %d\n", mem_type);
2193 if (!man->has_type) {
2194 DRM_ERROR("Trying to take down uninitialized "
2195 "memory manager type %u\n", mem_type);
2199 if ((man->kern_init_type) && (kern_clean == 0)) {
2200 DRM_ERROR("Trying to take down kernel initialized "
2201 "memory manager type %u\n", mem_type);
2210 BUG_ON(!list_empty(&bm->unfenced));
2211 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2212 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2214 if (drm_mm_clean(&man->manager)) {
2215 drm_mm_takedown(&man->manager);
2223 EXPORT_SYMBOL(drm_bo_clean_mm);
2226 *Evict all buffers of a particular mem_type, but leave memory manager
2227 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2228 *point since we have the hardware lock.
2231 static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
2234 struct drm_buffer_manager *bm = &dev->bm;
2235 struct drm_mem_type_manager *man = &bm->man[mem_type];
2237 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2238 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2242 if (!man->has_type) {
2243 DRM_ERROR("Memory type %u has not been initialized.\n",
2248 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2251 ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2256 int drm_bo_init_mm(struct drm_device *dev, unsigned type,
2257 unsigned long p_offset, unsigned long p_size,
2260 struct drm_buffer_manager *bm = &dev->bm;
2262 struct drm_mem_type_manager *man;
2264 if (type >= DRM_BO_MEM_TYPES) {
2265 DRM_ERROR("Illegal memory type %d\n", type);
2269 man = &bm->man[type];
2270 if (man->has_type) {
2271 DRM_ERROR("Memory manager already initialized for type %d\n",
2276 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2281 if (type != DRM_BO_MEM_LOCAL) {
2283 DRM_ERROR("Zero size memory manager type %d\n", type);
2286 ret = drm_mm_init(&man->manager, p_offset, p_size);
2292 man->kern_init_type = kern_init;
2295 INIT_LIST_HEAD(&man->lru);
2296 INIT_LIST_HEAD(&man->pinned);
2300 EXPORT_SYMBOL(drm_bo_init_mm);
2303 * This function is intended to be called on drm driver unload.
2304 * If you decide to call it from lastclose, you must protect the call
2305 * from a potentially racing drm_bo_driver_init in firstopen.
2306 * (This may happen on X server restart).
2309 int drm_bo_driver_finish(struct drm_device *dev)
2311 struct drm_buffer_manager *bm = &dev->bm;
2313 unsigned i = DRM_BO_MEM_TYPES;
2314 struct drm_mem_type_manager *man;
2316 mutex_lock(&dev->struct_mutex);
2318 if (!bm->initialized)
2320 bm->initialized = 0;
2324 if (man->has_type) {
2326 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) {
2328 DRM_ERROR("DRM memory manager type %d "
2329 "is not clean.\n", i);
2334 mutex_unlock(&dev->struct_mutex);
2336 if (!cancel_delayed_work(&bm->wq))
2337 flush_scheduled_work();
2339 mutex_lock(&dev->struct_mutex);
2340 drm_bo_delayed_delete(dev, 1);
2341 if (list_empty(&bm->ddestroy))
2342 DRM_DEBUG("Delayed destroy list was clean\n");
2344 if (list_empty(&bm->man[0].lru))
2345 DRM_DEBUG("Swap list was clean\n");
2347 if (list_empty(&bm->man[0].pinned))
2348 DRM_DEBUG("NO_MOVE list was clean\n");
2350 if (list_empty(&bm->unfenced))
2351 DRM_DEBUG("Unfenced list was clean\n");
2353 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2354 ClearPageReserved(bm->dummy_read_page);
2356 __free_page(bm->dummy_read_page);
2359 mutex_unlock(&dev->struct_mutex);
2362 EXPORT_SYMBOL(drm_bo_driver_finish);
2365 * This function is intended to be called on drm driver load.
2366 * If you decide to call it from firstopen, you must protect the call
2367 * from a potentially racing drm_bo_driver_finish in lastclose.
2368 * (This may happen on X server restart).
2371 int drm_bo_driver_init(struct drm_device *dev)
2373 struct drm_bo_driver *driver = dev->driver->bo_driver;
2374 struct drm_buffer_manager *bm = &dev->bm;
2377 bm->dummy_read_page = NULL;
2378 drm_bo_init_lock(&bm->bm_lock);
2379 mutex_lock(&dev->struct_mutex);
2383 bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
2384 if (!bm->dummy_read_page) {
2389 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2390 SetPageReserved(bm->dummy_read_page);
2394 * Initialize the system memory buffer type.
2395 * Other types need to be driver / IOCTL initialized.
2397 ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1);
2401 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2402 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2404 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2406 bm->initialized = 1;
2408 atomic_set(&bm->count, 0);
2410 INIT_LIST_HEAD(&bm->unfenced);
2411 INIT_LIST_HEAD(&bm->ddestroy);
2413 mutex_unlock(&dev->struct_mutex);
2416 EXPORT_SYMBOL(drm_bo_driver_init);
2418 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2420 struct drm_mm_init_arg *arg = data;
2421 struct drm_buffer_manager *bm = &dev->bm;
2422 struct drm_bo_driver *driver = dev->driver->bo_driver;
2426 DRM_ERROR("Buffer objects are not supported by this driver\n");
2430 ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
2435 if (arg->magic != DRM_BO_INIT_MAGIC) {
2436 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2437 "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2440 if (arg->major != DRM_BO_INIT_MAJOR) {
2441 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2442 "\tversion don't match. Got %d, expected %d.\n",
2443 arg->major, DRM_BO_INIT_MAJOR);
2447 mutex_lock(&dev->struct_mutex);
2448 if (!bm->initialized) {
2449 DRM_ERROR("DRM memory manager was not initialized.\n");
2452 if (arg->mem_type == 0) {
2453 DRM_ERROR("System memory buffers already initialized.\n");
2456 ret = drm_bo_init_mm(dev, arg->mem_type,
2457 arg->p_offset, arg->p_size, 0);
2460 mutex_unlock(&dev->struct_mutex);
2461 (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2469 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2471 struct drm_mm_type_arg *arg = data;
2472 struct drm_buffer_manager *bm = &dev->bm;
2473 struct drm_bo_driver *driver = dev->driver->bo_driver;
2477 DRM_ERROR("Buffer objects are not supported by this driver\n");
2481 ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
2485 mutex_lock(&dev->struct_mutex);
2487 if (!bm->initialized) {
2488 DRM_ERROR("DRM memory manager was not initialized\n");
2491 if (arg->mem_type == 0) {
2492 DRM_ERROR("No takedown for System memory buffers.\n");
2496 if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) {
2498 DRM_ERROR("Memory manager type %d not clean. "
2499 "Delaying takedown\n", arg->mem_type);
2503 mutex_unlock(&dev->struct_mutex);
2504 (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2512 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2514 struct drm_mm_type_arg *arg = data;
2515 struct drm_bo_driver *driver = dev->driver->bo_driver;
2519 DRM_ERROR("Buffer objects are not supported by this driver\n");
2523 if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
2524 DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
2528 if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2529 ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
2534 mutex_lock(&dev->struct_mutex);
2535 ret = drm_bo_lock_mm(dev, arg->mem_type);
2536 mutex_unlock(&dev->struct_mutex);
2538 (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2545 int drm_mm_unlock_ioctl(struct drm_device *dev,
2547 struct drm_file *file_priv)
2549 struct drm_mm_type_arg *arg = data;
2550 struct drm_bo_driver *driver = dev->driver->bo_driver;
2554 DRM_ERROR("Buffer objects are not supported by this driver\n");
2558 if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2559 ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2567 int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2569 struct drm_mm_info_arg *arg = data;
2570 struct drm_buffer_manager *bm = &dev->bm;
2571 struct drm_bo_driver *driver = dev->driver->bo_driver;
2572 struct drm_mem_type_manager *man;
2574 int mem_type = arg->mem_type;
2577 DRM_ERROR("Buffer objects are not supported by this driver\n");
2581 if (mem_type >= DRM_BO_MEM_TYPES) {
2582 DRM_ERROR("Illegal memory type %d\n", arg->mem_type);
2586 mutex_lock(&dev->struct_mutex);
2587 if (!bm->initialized) {
2588 DRM_ERROR("DRM memory manager was not initialized\n");
2594 man = &bm->man[arg->mem_type];
2596 arg->p_size = man->size;
2599 mutex_unlock(&dev->struct_mutex);
2604 * buffer object vm functions.
2607 int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
2609 struct drm_buffer_manager *bm = &dev->bm;
2610 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2612 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2613 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2616 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2619 if (mem->flags & DRM_BO_FLAG_CACHED)
2624 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2627 * \c Get the PCI offset for the buffer object memory.
2629 * \param bo The buffer object.
2630 * \param bus_base On return the base of the PCI region
2631 * \param bus_offset On return the byte offset into the PCI region
2632 * \param bus_size On return the byte size of the buffer object or zero if
2633 * the buffer object memory is not accessible through a PCI region.
2634 * \return Failure indication.
2636 * Returns -EINVAL if the buffer object is currently not mappable.
2637 * Otherwise returns zero.
2640 int drm_bo_pci_offset(struct drm_device *dev,
2641 struct drm_bo_mem_reg *mem,
2642 unsigned long *bus_base,
2643 unsigned long *bus_offset, unsigned long *bus_size)
2645 struct drm_buffer_manager *bm = &dev->bm;
2646 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2649 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2652 if (drm_mem_reg_is_pci(dev, mem)) {
2653 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2654 *bus_size = mem->num_pages << PAGE_SHIFT;
2655 *bus_base = man->io_offset;
2662 * \c Kill all user-space virtual mappings of this buffer object.
2664 * \param bo The buffer object.
2666 * Call bo->mutex locked.
2669 void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
2671 struct drm_device *dev = bo->dev;
2672 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2673 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2675 if (!dev->dev_mapping)
2678 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2682 * drm_bo_takedown_vm_locked:
2684 * @bo: the buffer object to remove any drm device mapping
2686 * Remove any associated vm mapping on the drm device node that
2687 * would have been created for a drm_bo_type_device buffer
2689 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
2691 struct drm_map_list *list;
2692 drm_local_map_t *map;
2693 struct drm_device *dev = bo->dev;
2695 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2696 if (bo->type != drm_bo_type_device)
2699 list = &bo->map_list;
2700 if (list->user_token) {
2701 drm_ht_remove_item(&dev->map_hash, &list->hash);
2702 list->user_token = 0;
2704 if (list->file_offset_node) {
2705 drm_mm_put_block(list->file_offset_node);
2706 list->file_offset_node = NULL;
2713 drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2715 list->user_token = 0ULL;
2716 drm_bo_usage_deref_locked(&bo);
2720 * drm_bo_setup_vm_locked:
2722 * @bo: the buffer to allocate address space for
2724 * Allocate address space in the drm device so that applications
2725 * can mmap the buffer and access the contents. This only
2726 * applies to drm_bo_type_device objects as others are not
2727 * placed in the drm device address space.
2729 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
2731 struct drm_map_list *list = &bo->map_list;
2732 drm_local_map_t *map;
2733 struct drm_device *dev = bo->dev;
2735 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2736 list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2742 map->type = _DRM_TTM;
2743 map->flags = _DRM_REMOVABLE;
2744 map->size = bo->mem.num_pages * PAGE_SIZE;
2745 atomic_inc(&bo->usage);
2746 map->handle = (void *)bo;
2748 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2749 bo->mem.num_pages, 0, 0);
2751 if (!list->file_offset_node) {
2752 drm_bo_takedown_vm_locked(bo);
2756 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2757 bo->mem.num_pages, 0);
2759 list->hash.key = list->file_offset_node->start;
2760 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2761 drm_bo_takedown_vm_locked(bo);
2765 list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2770 int drm_bo_version_ioctl(struct drm_device *dev, void *data,
2771 struct drm_file *file_priv)
2773 struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
2775 arg->major = DRM_BO_INIT_MAJOR;
2776 arg->minor = DRM_BO_INIT_MINOR;
2777 arg->patchlevel = DRM_BO_INIT_PATCH;