1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 **************************************************************************/
29 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
35 * Buffer object locking policy:
36 * Lock dev->struct_mutex;
38 * Unlock dev->struct_mutex;
40 * Do whatever you want;
41 * Unlock buffer->mutex;
42 * Decrease usage. Call destruction if zero.
44 * User object visibility ups usage just once, since it has its own
48 * lock dev->struct_mutex;
49 * Verify that usage is zero. Otherwise unlock and continue.
51 * unlock dev->struct_mutex;
53 * Mutex and spinlock locking orders:
55 * 2.) Refer to ttm locking orders.
58 static void drm_bo_destroy_locked(drm_buffer_object_t *bo);
60 #define DRM_FLAG_MASKED(_old, _new, _mask) {\
61 (_old) ^= (((_old) ^ (_new)) & (_mask)); \
64 static inline uint32_t drm_bo_type_flags(unsigned type)
66 return (1 << (24 + type));
70 * bo locked. dev->struct_mutex locked.
73 static void drm_bo_add_to_lru(drm_buffer_object_t * bo,
74 drm_buffer_manager_t * bm)
76 struct list_head *list;
79 switch(bo->flags & DRM_BO_MASK_MEM) {
80 case DRM_BO_FLAG_MEM_TT:
81 bo->mem_type = DRM_BO_MEM_TT;
83 case DRM_BO_FLAG_MEM_VRAM:
84 bo->mem_type = DRM_BO_MEM_VRAM;
86 case DRM_BO_FLAG_MEM_LOCAL:
87 bo->mem_type = DRM_BO_MEM_LOCAL;
92 list = (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
93 &bm->pinned[bo->mem_type] : &bm->lru[bo->mem_type];
94 list_add_tail(&bo->lru, list);
102 static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict,
105 drm_device_t *dev = bo->dev;
109 mutex_lock(&dev->struct_mutex);
111 ret = drm_evict_ttm(bo->ttm);
113 ret = drm_unbind_ttm(bo->ttm);
116 mutex_unlock(&dev->struct_mutex);
122 if (!(bo->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) {
123 drm_mm_put_block(bo->mm_node);
126 mutex_unlock(&dev->struct_mutex);
129 bo->flags &= ~DRM_BO_FLAG_MEM_TT;
130 bo->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
137 * Call bo->mutex locked.
138 * Wait until the buffer is idle.
141 static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
145 drm_fence_object_t *fence = bo->fence;
149 drm_device_t *dev = bo->dev;
150 if (drm_fence_object_signaled(fence, bo->fence_type)) {
151 drm_fence_usage_deref_unlocked(dev, fence);
159 drm_fence_object_wait(dev, fence, lazy, ignore_signals,
164 drm_fence_usage_deref_unlocked(dev, fence);
172 * Call dev->struct_mutex locked.
173 * Attempts to remove all private references to a buffer by expiring its
174 * fence object and removing from lru lists and memory managers.
178 static void drm_bo_cleanup_refs(drm_buffer_object_t *bo, int remove_all)
180 drm_device_t *dev = bo->dev;
181 drm_buffer_manager_t *bm = &dev->bm;
183 atomic_inc(&bo->usage);
184 mutex_unlock(&dev->struct_mutex);
185 mutex_lock(&bo->mutex);
187 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
189 if (bo->fence && drm_fence_object_signaled(bo->fence,
191 drm_fence_usage_deref_locked(dev, bo->fence);
195 if (bo->fence && remove_all) {
197 unsigned long _end = jiffies + 3 * DRM_HZ;
200 ret = drm_bo_wait(bo, 0, 1, 0);
201 } while (ret && !time_after_eq(jiffies, _end));
205 DRM_ERROR("Detected GPU lockup or "
206 "fence driver was taken down. "
207 "Evicting waiting buffers.\n");
210 drm_fence_usage_deref_unlocked(dev, bo->fence);
215 mutex_lock(&dev->struct_mutex);
217 if (!atomic_dec_and_test(&bo->usage)) {
222 list_del_init(&bo->lru);
224 drm_mm_put_block(bo->mm_node);
227 list_del_init(&bo->ddestroy);
228 mutex_unlock(&bo->mutex);
229 drm_bo_destroy_locked(bo);
233 if (list_empty(&bo->ddestroy)) {
234 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
235 list_add_tail(&bo->ddestroy, &bm->ddestroy);
236 schedule_delayed_work(&bm->wq,
238 1) ? 1 : DRM_HZ / 100);
242 mutex_unlock(&bo->mutex);
248 * Verify that refcount is 0 and that there are no internal references
249 * to the buffer object. Then destroy it.
252 static void drm_bo_destroy_locked(drm_buffer_object_t *bo)
254 drm_device_t *dev = bo->dev;
255 drm_buffer_manager_t *bm = &dev->bm;
257 if (list_empty(&bo->lru) && bo->mm_node == NULL && atomic_read(&bo->usage) == 0) {
258 BUG_ON(bo->fence != NULL);
261 unsigned long _end = jiffies + DRM_HZ;
265 ret = drm_unbind_ttm(bo->ttm);
266 if (ret == -EAGAIN) {
267 mutex_unlock(&dev->struct_mutex);
269 mutex_lock(&dev->struct_mutex);
271 } while (ret == -EAGAIN && !time_after_eq(jiffies, _end));
274 DRM_ERROR("Couldn't unbind TTM region while destroying a buffer. "
275 "Bad. Continuing anyway\n");
279 if (bo->ttm_object) {
280 drm_ttm_object_deref_locked(dev, bo->ttm_object);
283 atomic_dec(&bm->count);
285 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
291 * Some stuff is still trying to reference the buffer object.
292 * Get rid of those references.
295 drm_bo_cleanup_refs(bo, 0);
302 * Call dev->struct_mutex locked.
305 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
307 drm_buffer_manager_t *bm = &dev->bm;
309 drm_buffer_object_t *entry, *nentry;
310 struct list_head *list, *next;
312 list_for_each_safe(list, next, &bm->ddestroy) {
313 entry = list_entry(list, drm_buffer_object_t, ddestroy);
316 if (next != &bm->ddestroy) {
317 nentry = list_entry(next, drm_buffer_object_t,
319 atomic_inc(&nentry->usage);
322 drm_bo_cleanup_refs(entry, remove_all);
325 atomic_dec(&nentry->usage);
331 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
332 static void drm_bo_delayed_workqueue(void *data)
334 static void drm_bo_delayed_workqueue(struct work_struct *work)
337 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
338 drm_device_t *dev = (drm_device_t *) data;
339 drm_buffer_manager_t *bm = &dev->bm;
341 drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work);
342 drm_device_t *dev = container_of(bm, drm_device_t, bm);
346 DRM_DEBUG("Delayed delete Worker\n");
348 mutex_lock(&dev->struct_mutex);
349 if (!bm->initialized) {
350 mutex_unlock(&dev->struct_mutex);
353 drm_bo_delayed_delete(dev, 0);
354 if (bm->initialized && !list_empty(&bm->ddestroy)) {
355 schedule_delayed_work(&bm->wq,
356 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
358 mutex_unlock(&dev->struct_mutex);
361 static void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
363 if (atomic_dec_and_test(&bo->usage)) {
364 drm_bo_destroy_locked(bo);
368 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
370 drm_bo_usage_deref_locked(drm_user_object_entry(uo, drm_buffer_object_t,
374 static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
376 if (atomic_dec_and_test(&bo->usage)) {
377 mutex_lock(&bo->dev->struct_mutex);
378 if (atomic_read(&bo->usage) == 0)
379 drm_bo_destroy_locked(bo);
380 mutex_unlock(&bo->dev->struct_mutex);
385 * Note. The caller has to register (if applicable)
386 * and deregister fence object usage.
389 int drm_fence_buffer_objects(drm_file_t * priv,
390 struct list_head *list,
391 uint32_t fence_flags,
392 drm_fence_object_t * fence,
393 drm_fence_object_t ** used_fence)
395 drm_device_t *dev = priv->head->dev;
396 drm_buffer_manager_t *bm = &dev->bm;
398 drm_buffer_object_t *entry;
399 uint32_t fence_type = 0;
405 mutex_lock(&dev->struct_mutex);
408 list = &bm->unfenced;
410 list_for_each_entry(entry, list, lru) {
411 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
412 fence_type |= entry->fence_type;
413 if (entry->fence_class != 0) {
414 DRM_ERROR("Fence class %d is not implemented yet.\n",
428 * Transfer to a local list before we release the dev->struct_mutex;
429 * This is so we don't get any new unfenced objects while fencing
430 * the ones we already have..
433 list_splice_init(list, &f_list);
436 if ((fence_type & fence->type) != fence_type) {
437 DRM_ERROR("Given fence doesn't match buffers "
438 "on unfenced list.\n");
443 mutex_unlock(&dev->struct_mutex);
444 ret = drm_fence_object_create(dev, fence_type,
445 fence_flags | DRM_FENCE_FLAG_EMIT,
447 mutex_lock(&dev->struct_mutex);
454 while (l != &f_list) {
455 entry = list_entry(l, drm_buffer_object_t, lru);
456 atomic_inc(&entry->usage);
457 mutex_unlock(&dev->struct_mutex);
458 mutex_lock(&entry->mutex);
459 mutex_lock(&dev->struct_mutex);
461 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
464 drm_fence_usage_deref_locked(dev, entry->fence);
465 entry->fence = fence;
466 DRM_FLAG_MASKED(entry->priv_flags, 0,
467 _DRM_BO_FLAG_UNFENCED);
468 DRM_WAKEUP(&entry->event_queue);
469 drm_bo_add_to_lru(entry, bm);
471 mutex_unlock(&entry->mutex);
472 drm_bo_usage_deref_locked(entry);
475 atomic_add(count, &fence->usage);
476 DRM_DEBUG("Fenced %d buffers\n", count);
478 mutex_unlock(&dev->struct_mutex);
483 EXPORT_SYMBOL(drm_fence_buffer_objects);
489 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
490 int no_wait, int force_no_move)
493 drm_device_t *dev = bo->dev;
494 drm_buffer_manager_t *bm = &dev->bm;
497 * Someone might have modified the buffer before we took the buffer mutex.
500 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
502 if (!(bo->flags & drm_bo_type_flags(mem_type)))
505 ret = drm_bo_wait(bo, 0, 0, no_wait);
509 DRM_ERROR("Failed to expire fence before "
510 "buffer eviction.\n");
514 if (mem_type == DRM_BO_MEM_TT) {
515 ret = drm_move_tt_to_local(bo, 1, force_no_move);
518 mutex_lock(&dev->struct_mutex);
519 list_del_init(&bo->lru);
520 drm_bo_add_to_lru(bo, bm);
521 mutex_unlock(&dev->struct_mutex);
527 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
528 _DRM_BO_FLAG_EVICTED);
537 int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type,
540 drm_device_t *dev = bo->dev;
542 drm_buffer_manager_t *bm = &dev->bm;
543 drm_buffer_object_t *entry;
544 drm_mm_t *mm = &bm->manager[mem_type];
545 struct list_head *lru;
546 unsigned long size = bo->num_pages;
549 mutex_lock(&dev->struct_mutex);
551 node = drm_mm_search_free(mm, size, bo->page_alignment, 1);
555 lru = &bm->lru[mem_type];
556 if (lru->next == lru)
559 entry = list_entry(lru->next, drm_buffer_object_t, lru);
561 atomic_inc(&entry->usage);
562 mutex_unlock(&dev->struct_mutex);
563 mutex_lock(&entry->mutex);
564 BUG_ON(bo->flags & DRM_BO_FLAG_NO_MOVE);
565 ret = drm_bo_evict(entry, mem_type, no_wait, 0);
566 mutex_unlock(&entry->mutex);
567 drm_bo_usage_deref_unlocked(entry);
570 mutex_lock(&dev->struct_mutex);
574 DRM_ERROR("Out of videoram / aperture space\n");
575 mutex_unlock(&dev->struct_mutex);
579 node = drm_mm_get_block(node, size, bo->page_alignment);
580 mutex_unlock(&dev->struct_mutex);
582 node->private = (void *)bo;
585 bo->offset = node->start * PAGE_SIZE;
589 static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
591 drm_device_t *dev = bo->dev;
592 drm_ttm_backend_t *be;
595 if (!(bo->mm_node && (bo->flags & DRM_BO_FLAG_NO_MOVE))) {
597 ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait);
602 DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start);
604 mutex_lock(&dev->struct_mutex);
605 ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED,
608 drm_mm_put_block(bo->mm_node);
611 mutex_unlock(&dev->struct_mutex);
618 if (be->needs_ub_cache_adjust(be))
619 bo->flags &= ~DRM_BO_FLAG_CACHED;
620 bo->flags &= ~DRM_BO_MASK_MEM;
621 bo->flags |= DRM_BO_FLAG_MEM_TT;
623 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
624 ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags);
626 DRM_ERROR("Could not flush read caches\n");
628 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED);
633 static int drm_bo_new_flags(drm_device_t * dev,
634 uint32_t flags, uint32_t new_mask, uint32_t hint,
635 int init, uint32_t * n_flags, uint32_t * n_mask)
637 uint32_t new_flags = 0;
639 drm_bo_driver_t *driver = dev->driver->bo_driver;
640 drm_buffer_manager_t *bm = &dev->bm;
644 * First adjust the mask to take away nonexistant memory types.
647 for (i = 0; i < DRM_BO_MEM_TYPES; ++i) {
648 if (!bm->use_type[i])
649 new_mask &= ~drm_bo_type_flags(i);
652 if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
654 ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
658 if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
659 if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
660 !driver->cached[DRM_BO_MEM_TT]) &&
661 ((new_mask & DRM_BO_FLAG_MEM_VRAM)
662 && !driver->cached[DRM_BO_MEM_VRAM])) {
663 new_mask &= ~DRM_BO_FLAG_BIND_CACHED;
665 if (!driver->cached[DRM_BO_MEM_TT])
666 new_flags &= DRM_BO_FLAG_MEM_TT;
667 if (!driver->cached[DRM_BO_MEM_VRAM])
668 new_flags &= DRM_BO_FLAG_MEM_VRAM;
672 if ((new_mask & DRM_BO_FLAG_READ_CACHED) &&
673 !(new_mask & DRM_BO_FLAG_BIND_CACHED)) {
674 if ((new_mask & DRM_BO_FLAG_NO_EVICT) &&
675 !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) {
677 ("Cannot read cached from a pinned VRAM / TT buffer\n");
683 * Determine new memory location:
686 if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) {
688 new_flags = new_mask & DRM_BO_MASK_MEM;
691 DRM_ERROR("Invalid buffer object memory flags\n");
695 if (new_flags & DRM_BO_FLAG_MEM_LOCAL) {
696 if ((hint & DRM_BO_HINT_AVOID_LOCAL) &&
697 new_flags & (DRM_BO_FLAG_MEM_VRAM |
698 DRM_BO_FLAG_MEM_TT)) {
699 new_flags &= ~DRM_BO_FLAG_MEM_LOCAL;
701 new_flags = DRM_BO_FLAG_MEM_LOCAL;
704 if (new_flags & DRM_BO_FLAG_MEM_TT) {
705 if ((new_mask & DRM_BO_FLAG_PREFER_VRAM) &&
706 new_flags & DRM_BO_FLAG_MEM_VRAM) {
707 new_flags = DRM_BO_FLAG_MEM_VRAM;
709 new_flags = DRM_BO_FLAG_MEM_TT;
713 new_flags = flags & DRM_BO_MASK_MEM;
716 new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
720 DRM_ERROR("Invalid buffer object rwx properties\n");
724 new_flags |= new_mask & ~DRM_BO_MASK_MEM;
726 if (((flags ^ new_flags) & DRM_BO_FLAG_BIND_CACHED) &&
727 (new_flags & DRM_BO_FLAG_NO_EVICT) &&
728 (flags & (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM))) {
729 if (!(flags & DRM_BO_FLAG_CACHED)) {
731 ("Cannot change caching policy of pinned buffer\n");
734 new_flags &= ~DRM_BO_FLAG_CACHED;
738 *n_flags = new_flags;
744 * Call dev->struct_mutex locked.
747 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
748 uint32_t handle, int check_owner)
750 drm_user_object_t *uo;
751 drm_buffer_object_t *bo;
753 uo = drm_lookup_user_object(priv, handle);
755 if (!uo || (uo->type != drm_buffer_type)) {
756 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
760 if (check_owner && priv != uo->owner) {
761 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
765 bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
766 atomic_inc(&bo->usage);
771 * Call bo->mutex locked.
772 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
773 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
776 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
778 drm_fence_object_t *fence = bo->fence;
780 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
782 drm_device_t *dev = bo->dev;
783 if (drm_fence_object_signaled(fence, bo->fence_type)) {
784 drm_fence_usage_deref_unlocked(dev, fence);
794 * Call bo->mutex locked.
795 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
798 static int drm_bo_busy(drm_buffer_object_t * bo)
800 drm_fence_object_t *fence = bo->fence;
802 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
804 drm_device_t *dev = bo->dev;
805 if (drm_fence_object_signaled(fence, bo->fence_type)) {
806 drm_fence_usage_deref_unlocked(dev, fence);
810 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
811 if (drm_fence_object_signaled(fence, bo->fence_type)) {
812 drm_fence_usage_deref_unlocked(dev, fence);
821 static int drm_bo_read_cached(drm_buffer_object_t * bo)
825 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
827 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
832 * Wait until a buffer is unmapped.
835 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
839 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
842 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
843 atomic_read(&bo->mapped) == -1);
851 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
855 mutex_lock(&bo->mutex);
856 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
857 mutex_unlock(&bo->mutex);
862 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
863 * Until then, we cannot really do anything with it except delete it.
864 * The unfenced list is a PITA, and the operations
866 * 2) submitting commands
868 * Should really be an atomic operation.
869 * We now "solve" this problem by keeping
870 * the buffer "unfenced" after validating, but before fencing.
873 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
876 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
877 unsigned long _end = jiffies + 3 * DRM_HZ;
885 mutex_unlock(&bo->mutex);
886 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
887 !drm_bo_check_unfenced(bo));
888 mutex_lock(&bo->mutex);
893 ("Error waiting for buffer to become fenced\n");
896 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
897 } while (ret && !time_after_eq(jiffies, _end));
899 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
909 * Fill in the ioctl reply argument with buffer info.
913 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
914 drm_bo_arg_reply_t * rep)
916 rep->handle = bo->base.hash.key;
917 rep->flags = bo->flags;
918 rep->size = bo->num_pages * PAGE_SIZE;
919 rep->offset = bo->offset;
921 if (bo->ttm_object) {
922 rep->arg_handle = bo->ttm_object->map_list.user_token;
927 rep->mask = bo->mask;
928 rep->buffer_start = bo->buffer_start;
929 rep->fence_flags = bo->fence_type;
931 rep->page_alignment = bo->page_alignment;
933 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
934 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
940 * Wait for buffer idle and register that we've mapped the buffer.
941 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
942 * so that if the client dies, the mapping is automatically
946 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
947 uint32_t map_flags, unsigned hint,
948 drm_bo_arg_reply_t * rep)
950 drm_buffer_object_t *bo;
951 drm_device_t *dev = priv->head->dev;
953 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
955 mutex_lock(&dev->struct_mutex);
956 bo = drm_lookup_buffer_object(priv, handle, 1);
957 mutex_unlock(&dev->struct_mutex);
962 mutex_lock(&bo->mutex);
963 if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
964 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
970 * If this returns true, we are currently unmapped.
971 * We need to do this test, because unmapping can
972 * be done without the bo->mutex held.
976 if (atomic_inc_and_test(&bo->mapped)) {
977 if (no_wait && drm_bo_busy(bo)) {
978 atomic_dec(&bo->mapped);
982 ret = drm_bo_wait(bo, 0, 0, no_wait);
984 atomic_dec(&bo->mapped);
988 if ((map_flags & DRM_BO_FLAG_READ) &&
989 (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
990 (!(bo->flags & DRM_BO_FLAG_CACHED))) {
991 drm_bo_read_cached(bo);
994 } else if ((map_flags & DRM_BO_FLAG_READ) &&
995 (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
996 (!(bo->flags & DRM_BO_FLAG_CACHED))) {
999 * We are already mapped with different flags.
1000 * need to wait for unmap.
1003 ret = drm_bo_wait_unmapped(bo, no_wait);
1012 mutex_lock(&dev->struct_mutex);
1013 ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1014 mutex_unlock(&dev->struct_mutex);
1016 if (atomic_add_negative(-1, &bo->mapped))
1017 DRM_WAKEUP(&bo->event_queue);
1020 drm_bo_fill_rep_arg(bo, rep);
1022 mutex_unlock(&bo->mutex);
1023 drm_bo_usage_deref_unlocked(bo);
1027 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1029 drm_device_t *dev = priv->head->dev;
1030 drm_buffer_object_t *bo;
1031 drm_ref_object_t *ro;
1034 mutex_lock(&dev->struct_mutex);
1036 bo = drm_lookup_buffer_object(priv, handle, 1);
1042 ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1048 drm_remove_ref_object(priv, ro);
1049 drm_bo_usage_deref_locked(bo);
1051 mutex_unlock(&dev->struct_mutex);
1056 * Call struct-sem locked.
1059 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1060 drm_user_object_t * uo,
1063 drm_buffer_object_t *bo =
1064 drm_user_object_entry(uo, drm_buffer_object_t, base);
1067 * We DON'T want to take the bo->lock here, because we want to
1068 * hold it when we wait for unmapped buffer.
1071 BUG_ON(action != _DRM_REF_TYPE1);
1073 if (atomic_add_negative(-1, &bo->mapped))
1074 DRM_WAKEUP(&bo->event_queue);
1081 static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags,
1082 int no_wait, int force_no_move)
1087 * Flush outstanding fences.
1092 * Make sure we're not mapped.
1095 ret = drm_bo_wait_unmapped(bo, no_wait);
1100 * Wait for outstanding fences.
1103 ret = drm_bo_wait(bo, 0, 0, no_wait);
1110 if (new_flags & DRM_BO_FLAG_MEM_TT) {
1111 ret = drm_move_local_to_tt(bo, no_wait);
1115 drm_move_tt_to_local(bo, 0, force_no_move);
1125 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1127 int move_unfenced, int no_wait)
1129 drm_device_t *dev = bo->dev;
1130 drm_buffer_manager_t *bm = &dev->bm;
1131 uint32_t flag_diff = (new_flags ^ bo->flags);
1132 drm_bo_driver_t *driver = dev->driver->bo_driver;
1136 if (new_flags & DRM_BO_FLAG_MEM_VRAM) {
1137 DRM_ERROR("Vram support not implemented yet\n");
1141 DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->flags);
1142 ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type);
1144 DRM_ERROR("Driver did not support given buffer permissions\n");
1149 * Move out if we need to change caching policy.
1152 if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
1153 !(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) {
1154 if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1155 DRM_ERROR("Cannot change caching policy of "
1156 "pinned buffer.\n");
1159 ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0);
1162 DRM_ERROR("Failed moving buffer.\n");
1166 DRM_MASK_VAL(bo->flags, DRM_BO_FLAG_BIND_CACHED, new_flags);
1167 flag_diff = (new_flags ^ bo->flags);
1170 * Check whether we dropped no_move policy, and in that case,
1171 * release reserved manager regions.
1174 if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
1175 !(new_flags & DRM_BO_FLAG_NO_MOVE)) {
1176 mutex_lock(&dev->struct_mutex);
1178 drm_mm_put_block(bo->mm_node);
1181 mutex_unlock(&dev->struct_mutex);
1185 * Check whether we need to move buffer.
1188 if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) {
1189 ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1);
1192 DRM_ERROR("Failed moving buffer.\n");
1197 if (move_unfenced) {
1200 * Place on unfenced list.
1203 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1204 _DRM_BO_FLAG_UNFENCED);
1205 mutex_lock(&dev->struct_mutex);
1207 list_add_tail(&bo->lru, &bm->unfenced);
1208 mutex_unlock(&dev->struct_mutex);
1211 mutex_lock(&dev->struct_mutex);
1212 list_del_init(&bo->lru);
1213 drm_bo_add_to_lru(bo, bm);
1214 mutex_unlock(&dev->struct_mutex);
1217 bo->flags = new_flags;
1221 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1222 uint32_t flags, uint32_t mask, uint32_t hint,
1223 drm_bo_arg_reply_t * rep)
1225 drm_buffer_object_t *bo;
1226 drm_device_t *dev = priv->head->dev;
1228 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1231 bo = drm_lookup_buffer_object(priv, handle, 1);
1236 mutex_lock(&bo->mutex);
1237 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1242 ret = drm_bo_new_flags(dev, bo->flags,
1243 (flags & mask) | (bo->mask & ~mask), hint,
1244 0, &new_flags, &bo->mask);
1250 drm_buffer_object_validate(bo, new_flags,
1251 !(hint & DRM_BO_HINT_DONT_FENCE),
1253 drm_bo_fill_rep_arg(bo, rep);
1257 mutex_unlock(&bo->mutex);
1258 drm_bo_usage_deref_unlocked(bo);
1262 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1263 drm_bo_arg_reply_t * rep)
1265 drm_buffer_object_t *bo;
1267 bo = drm_lookup_buffer_object(priv, handle, 1);
1271 mutex_lock(&bo->mutex);
1272 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1273 (void)drm_bo_busy(bo);
1274 drm_bo_fill_rep_arg(bo, rep);
1275 mutex_unlock(&bo->mutex);
1276 drm_bo_usage_deref_unlocked(bo);
1280 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1281 uint32_t hint, drm_bo_arg_reply_t * rep)
1283 drm_buffer_object_t *bo;
1284 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1287 bo = drm_lookup_buffer_object(priv, handle, 1);
1292 mutex_lock(&bo->mutex);
1293 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1296 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1300 drm_bo_fill_rep_arg(bo, rep);
1303 mutex_unlock(&bo->mutex);
1304 drm_bo_usage_deref_unlocked(bo);
1309 * Call bo->mutex locked.
1312 static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo)
1314 drm_device_t *dev = bo->dev;
1315 drm_ttm_object_t *to = NULL;
1317 uint32_t ttm_flags = 0;
1319 bo->ttm_object = NULL;
1323 case drm_bo_type_dc:
1324 mutex_lock(&dev->struct_mutex);
1325 ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE,
1327 mutex_unlock(&dev->struct_mutex);
1329 case drm_bo_type_user:
1330 case drm_bo_type_fake:
1333 DRM_ERROR("Illegal buffer object type\n");
1343 bo->ttm_object = to;
1344 bo->ttm = drm_ttm_from_object(to);
1350 * Transfer a buffer object's memory and LRU status to a newly
1351 * created object. User-space references remains with the old
1352 * object. Call bo->mutex locked.
1355 int drm_buffer_object_transfer(drm_buffer_object_t *bo,
1356 drm_buffer_object_t **new_obj)
1358 drm_buffer_object_t *fbo;
1359 drm_device_t *dev = bo->dev;
1360 drm_buffer_manager_t *bm = &dev->bm;
1362 fbo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1367 mutex_init(&fbo->mutex);
1368 mutex_lock(&fbo->mutex);
1369 mutex_lock(&dev->struct_mutex);
1371 INIT_LIST_HEAD(&fbo->ddestroy);
1372 INIT_LIST_HEAD(&fbo->lru);
1373 list_splice_init(&bo->lru, &fbo->lru);
1377 bo->ttm_object = NULL;
1381 fbo->mm_node->private = (void *)fbo;
1382 atomic_set(&fbo->usage, 1);
1383 atomic_inc(&bm->count);
1384 mutex_unlock(&dev->struct_mutex);
1385 mutex_unlock(&fbo->mutex);
1392 int drm_buffer_object_create(drm_file_t * priv,
1397 uint32_t page_alignment,
1398 unsigned long buffer_start,
1399 drm_buffer_object_t ** buf_obj)
1401 drm_device_t *dev = priv->head->dev;
1402 drm_buffer_manager_t *bm = &dev->bm;
1403 drm_buffer_object_t *bo;
1406 unsigned long num_pages;
1408 if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1409 DRM_ERROR("Invalid buffer object start.\n");
1412 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1413 if (num_pages == 0) {
1414 DRM_ERROR("Illegal buffer object size.\n");
1418 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1423 mutex_init(&bo->mutex);
1424 mutex_lock(&bo->mutex);
1426 atomic_set(&bo->usage, 1);
1427 atomic_set(&bo->mapped, -1);
1428 DRM_INIT_WAITQUEUE(&bo->event_queue);
1429 INIT_LIST_HEAD(&bo->lru);
1430 INIT_LIST_HEAD(&bo->ddestroy);
1433 bo->num_pages = num_pages;
1435 bo->page_alignment = page_alignment;
1436 if (bo->type == drm_bo_type_fake) {
1437 bo->offset = buffer_start;
1438 bo->buffer_start = 0;
1440 bo->buffer_start = buffer_start;
1443 bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
1444 atomic_inc(&bm->count);
1445 ret = drm_bo_new_flags(dev, bo->flags, mask, hint,
1446 1, &new_flags, &bo->mask);
1449 ret = drm_bo_add_ttm(priv, bo);
1453 ret = drm_buffer_object_validate(bo, new_flags, 0,
1454 hint & DRM_BO_HINT_DONT_BLOCK);
1458 mutex_unlock(&bo->mutex);
1463 mutex_unlock(&bo->mutex);
1464 drm_bo_usage_deref_unlocked(bo);
1468 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1471 drm_device_t *dev = priv->head->dev;
1474 mutex_lock(&dev->struct_mutex);
1475 ret = drm_add_user_object(priv, &bo->base, shareable);
1479 bo->base.remove = drm_bo_base_deref_locked;
1480 bo->base.type = drm_buffer_type;
1481 bo->base.ref_struct_locked = NULL;
1482 bo->base.unref = drm_buffer_user_object_unmap;
1485 mutex_unlock(&dev->struct_mutex);
1489 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1491 LOCK_TEST_WITH_RETURN(dev, filp);
1495 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1499 drm_bo_arg_request_t *req = &arg.d.req;
1500 drm_bo_arg_reply_t rep;
1502 drm_user_object_t *uo;
1503 drm_buffer_object_t *entry;
1505 if (!dev->bm.initialized) {
1506 DRM_ERROR("Buffer object manager is not initialized.\n");
1511 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1522 drm_buffer_object_create(priv, req->size,
1526 req->page_alignment,
1527 req->buffer_start, &entry);
1532 drm_bo_add_user_object(priv, entry,
1535 DRM_BO_FLAG_SHAREABLE);
1537 drm_bo_usage_deref_unlocked(entry);
1542 mutex_lock(&entry->mutex);
1543 drm_bo_fill_rep_arg(entry, &rep);
1544 mutex_unlock(&entry->mutex);
1547 rep.ret = drm_buffer_object_unmap(priv, req->handle);
1550 rep.ret = drm_buffer_object_map(priv, req->handle,
1554 case drm_bo_destroy:
1555 mutex_lock(&dev->struct_mutex);
1556 uo = drm_lookup_user_object(priv, req->handle);
1557 if (!uo || (uo->type != drm_buffer_type)
1558 || uo->owner != priv) {
1559 mutex_unlock(&dev->struct_mutex);
1563 rep.ret = drm_remove_user_object(priv, uo);
1564 mutex_unlock(&dev->struct_mutex);
1566 case drm_bo_reference:
1567 rep.ret = drm_user_object_ref(priv, req->handle,
1568 drm_buffer_type, &uo);
1571 mutex_lock(&dev->struct_mutex);
1572 uo = drm_lookup_user_object(priv, req->handle);
1574 drm_user_object_entry(uo, drm_buffer_object_t,
1576 atomic_dec(&entry->usage);
1577 mutex_unlock(&dev->struct_mutex);
1578 mutex_lock(&entry->mutex);
1579 drm_bo_fill_rep_arg(entry, &rep);
1580 mutex_unlock(&entry->mutex);
1582 case drm_bo_unreference:
1583 rep.ret = drm_user_object_unref(priv, req->handle,
1586 case drm_bo_validate:
1587 rep.ret = drm_bo_lock_test(dev, filp);
1592 drm_bo_handle_validate(priv, req->handle, req->mask,
1593 req->arg_handle, req->hint,
1597 rep.ret = drm_bo_lock_test(dev, filp);
1602 rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1604 case drm_bo_wait_idle:
1605 rep.ret = drm_bo_handle_wait(priv, req->handle,
1608 case drm_bo_ref_fence:
1610 DRM_ERROR("Function is not implemented yet.\n");
1617 * A signal interrupted us. Make sure the ioctl is restartable.
1620 if (rep.ret == -EAGAIN)
1625 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1632 * dev->struct_sem locked.
1635 static int drm_bo_force_list_clean(drm_device_t * dev,
1636 struct list_head *head,
1638 int force_no_move, int allow_errors)
1640 drm_buffer_manager_t *bm = &dev->bm;
1641 struct list_head *list, *next, *prev;
1642 drm_buffer_object_t *entry;
1648 list_for_each_safe(list, next, head) {
1650 entry = list_entry(list, drm_buffer_object_t, lru);
1651 atomic_inc(&entry->usage);
1652 mutex_unlock(&dev->struct_mutex);
1653 mutex_lock(&entry->mutex);
1654 mutex_lock(&dev->struct_mutex);
1656 if (prev != list->prev || next != list->next) {
1657 mutex_unlock(&entry->mutex);
1658 drm_bo_usage_deref_locked(entry);
1661 if (entry->mm_node) {
1668 mutex_unlock(&dev->struct_mutex);
1669 if (entry->fence && bm->nice_mode) {
1670 unsigned long _end = jiffies + 3 * DRM_HZ;
1672 ret = drm_bo_wait(entry, 0, 1, 0);
1673 if (ret && allow_errors) {
1678 } while (ret && !time_after_eq(jiffies, _end));
1682 DRM_ERROR("Detected GPU hang or "
1683 "fence manager was taken down. "
1684 "Evicting waiting buffers\n");
1688 drm_fence_usage_deref_unlocked(dev,
1690 entry->fence = NULL;
1693 DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED,
1696 if (force_no_move) {
1697 DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE,
1700 if (entry->flags & DRM_BO_FLAG_NO_EVICT) {
1701 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1702 "cleanup. Removing flag and evicting.\n");
1703 entry->flags &= ~DRM_BO_FLAG_NO_EVICT;
1704 entry->mask &= ~DRM_BO_FLAG_NO_EVICT;
1707 ret = drm_bo_evict(entry, mem_type, 1, force_no_move);
1712 DRM_ERROR("Aargh. Eviction failed.\n");
1715 mutex_lock(&dev->struct_mutex);
1717 mutex_unlock(&entry->mutex);
1718 drm_bo_usage_deref_locked(entry);
1719 if (prev != list->prev || next != list->next) {
1727 mutex_unlock(&entry->mutex);
1728 drm_bo_usage_deref_unlocked(entry);
1729 mutex_lock(&dev->struct_mutex);
1733 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1735 drm_buffer_manager_t *bm = &dev->bm;
1738 if (mem_type >= DRM_BO_MEM_TYPES) {
1739 DRM_ERROR("Illegal memory type %d\n", mem_type);
1743 if (!bm->has_type[mem_type]) {
1744 DRM_ERROR("Trying to take down uninitialized "
1745 "memory manager type\n");
1748 bm->use_type[mem_type] = 0;
1749 bm->has_type[mem_type] = 0;
1755 * Throw out unfenced buffers.
1758 drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0);
1761 * Throw out evicted no-move buffers.
1764 drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL],
1766 drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1,
1768 drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1,
1771 if (drm_mm_clean(&bm->manager[mem_type])) {
1772 drm_mm_takedown(&bm->manager[mem_type]);
1781 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1784 drm_buffer_manager_t *bm = &dev->bm;
1786 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1787 DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
1791 ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
1794 ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1);
1798 drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1);
1802 static int drm_bo_init_mm(drm_device_t * dev,
1804 unsigned long p_offset, unsigned long p_size)
1806 drm_buffer_manager_t *bm = &dev->bm;
1809 if (type >= DRM_BO_MEM_TYPES) {
1810 DRM_ERROR("Illegal memory type %d\n", type);
1813 if (bm->has_type[type]) {
1814 DRM_ERROR("Memory manager already initialized for type %d\n",
1820 if (type != DRM_BO_MEM_LOCAL) {
1822 DRM_ERROR("Zero size memory manager type %d\n", type);
1825 ret = drm_mm_init(&bm->manager[type], p_offset, p_size);
1829 bm->has_type[type] = 1;
1830 bm->use_type[type] = 1;
1832 INIT_LIST_HEAD(&bm->lru[type]);
1833 INIT_LIST_HEAD(&bm->pinned[type]);
1839 * This is called from lastclose, so we don't need to bother about
1840 * any clients still running when we set the initialized flag to zero.
1843 int drm_bo_driver_finish(drm_device_t * dev)
1845 drm_buffer_manager_t *bm = &dev->bm;
1847 unsigned i = DRM_BO_MEM_TYPES;
1849 mutex_lock(&dev->bm.init_mutex);
1850 mutex_lock(&dev->struct_mutex);
1852 if (!bm->initialized)
1854 bm->initialized = 0;
1857 if (bm->has_type[i]) {
1858 bm->use_type[i] = 0;
1859 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
1861 DRM_ERROR("DRM memory manager type %d "
1862 "is not clean.\n", i);
1864 bm->has_type[i] = 0;
1867 mutex_unlock(&dev->struct_mutex);
1868 if (!cancel_delayed_work(&bm->wq)) {
1869 flush_scheduled_work();
1871 mutex_lock(&dev->struct_mutex);
1872 drm_bo_delayed_delete(dev, 1);
1873 if (list_empty(&bm->ddestroy)) {
1874 DRM_DEBUG("Delayed destroy list was clean\n");
1876 if (list_empty(&bm->lru[0])) {
1877 DRM_DEBUG("Swap list was clean\n");
1879 if (list_empty(&bm->pinned[0])) {
1880 DRM_DEBUG("NO_MOVE list was clean\n");
1882 if (list_empty(&bm->unfenced)) {
1883 DRM_DEBUG("Unfenced list was clean\n");
1886 mutex_unlock(&dev->struct_mutex);
1887 mutex_unlock(&dev->bm.init_mutex);
1891 int drm_bo_driver_init(drm_device_t * dev)
1893 drm_bo_driver_t *driver = dev->driver->bo_driver;
1894 drm_buffer_manager_t *bm = &dev->bm;
1897 mutex_lock(&dev->bm.init_mutex);
1898 mutex_lock(&dev->struct_mutex);
1903 * Initialize the system memory buffer type.
1904 * Other types need to be driver / IOCTL initialized.
1907 ret = drm_bo_init_mm(dev, 0, 0, 0);
1911 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1912 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
1914 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
1916 bm->initialized = 1;
1918 atomic_set(&bm->count, 0);
1920 INIT_LIST_HEAD(&bm->unfenced);
1921 INIT_LIST_HEAD(&bm->ddestroy);
1923 mutex_unlock(&dev->struct_mutex);
1924 mutex_unlock(&dev->bm.init_mutex);
1928 EXPORT_SYMBOL(drm_bo_driver_init);
1930 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
1935 drm_mm_init_arg_t arg;
1936 drm_buffer_manager_t *bm = &dev->bm;
1937 drm_bo_driver_t *driver = dev->driver->bo_driver;
1940 DRM_ERROR("Buffer objects are not supported by this driver\n");
1944 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1946 switch (arg.req.op) {
1949 mutex_lock(&dev->bm.init_mutex);
1950 mutex_lock(&dev->struct_mutex);
1951 if (!bm->initialized) {
1952 DRM_ERROR("DRM memory manager was not initialized.\n");
1955 if (arg.req.mem_type == 0) {
1957 ("System memory buffers already initialized.\n");
1960 ret = drm_bo_init_mm(dev, arg.req.mem_type,
1961 arg.req.p_offset, arg.req.p_size);
1964 LOCK_TEST_WITH_RETURN(dev, filp);
1965 mutex_lock(&dev->bm.init_mutex);
1966 mutex_lock(&dev->struct_mutex);
1968 if (!bm->initialized) {
1969 DRM_ERROR("DRM memory manager was not initialized\n");
1972 if (arg.req.mem_type == 0) {
1973 DRM_ERROR("No takedown for System memory buffers.\n");
1977 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
1978 DRM_ERROR("Memory manager type %d not clean. "
1979 "Delaying takedown\n", arg.req.mem_type);
1983 LOCK_TEST_WITH_RETURN(dev, filp);
1984 mutex_lock(&dev->bm.init_mutex);
1985 mutex_lock(&dev->struct_mutex);
1986 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
1989 LOCK_TEST_WITH_RETURN(dev, filp);
1990 mutex_lock(&dev->bm.init_mutex);
1991 mutex_lock(&dev->struct_mutex);
1995 DRM_ERROR("Function not implemented yet\n");
1999 mutex_unlock(&dev->struct_mutex);
2000 mutex_unlock(&dev->bm.init_mutex);
2004 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));