1 /**************************************************************************
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 **************************************************************************/
29 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
35 * Buffer object locking policy:
36 * Lock dev->struct_mutex;
38 * Unlock dev->struct_mutex;
40 * Do whatever you want;
41 * Unlock buffer->mutex;
42 * Decrease usage. Call destruction if zero.
44 * User object visibility ups usage just once, since it has its own
48 * lock dev->struct_mutex;
49 * Verify that usage is zero. Otherwise unlock and continue.
51 * unlock dev->struct_mutex;
53 * Mutex and spinlock locking orders:
55 * 2.) Refer to ttm locking orders.
58 #define DRM_FLAG_MASKED(_old, _new, _mask) {\
59 (_old) ^= (((_old) ^ (_new)) & (_mask)); \
62 static inline uint32_t drm_bo_type_flags(unsigned type)
64 return (1 << (24 + type));
68 * bo locked. dev->struct_mutex locked.
71 static void drm_bo_add_to_lru(drm_buffer_object_t * buf,
72 drm_buffer_manager_t * bm)
74 struct list_head *list;
77 switch(buf->flags & DRM_BO_MASK_MEM) {
78 case DRM_BO_FLAG_MEM_TT:
79 buf->mem_type = DRM_BO_MEM_TT;
81 case DRM_BO_FLAG_MEM_VRAM:
82 buf->mem_type = DRM_BO_MEM_VRAM;
84 case DRM_BO_FLAG_MEM_LOCAL:
85 buf->mem_type = DRM_BO_MEM_LOCAL;
90 list = (buf->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
91 &bm->pinned[buf->mem_type] : &bm->lru[buf->mem_type];
92 list_add_tail(&buf->lru, list);
100 static int drm_move_tt_to_local(drm_buffer_object_t * buf, int evict,
103 drm_device_t *dev = buf->dev;
107 mutex_lock(&dev->struct_mutex);
109 ret = drm_evict_ttm(buf->ttm);
111 ret = drm_unbind_ttm(buf->ttm);
114 mutex_unlock(&dev->struct_mutex);
120 if (!(buf->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) {
121 drm_mm_put_block(buf->mm_node);
124 mutex_unlock(&dev->struct_mutex);
127 buf->flags &= ~DRM_BO_FLAG_MEM_TT;
128 buf->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
134 * Lock dev->struct_mutex
137 static void drm_bo_destroy_locked(drm_device_t * dev, drm_buffer_object_t * bo)
140 drm_buffer_manager_t *bm = &dev->bm;
142 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
145 * Somone might try to access us through the still active BM lists.
148 if (atomic_read(&bo->usage) != 0)
150 if (!list_empty(&bo->ddestroy))
154 if (!drm_fence_object_signaled(bo->fence, bo->fence_type)) {
156 drm_fence_object_flush(dev, bo->fence, bo->fence_type);
157 list_add_tail(&bo->ddestroy, &bm->ddestroy);
158 schedule_delayed_work(&bm->wq,
160 1) ? 1 : DRM_HZ / 100);
163 drm_fence_usage_deref_locked(dev, bo->fence);
168 * Take away from lru lists.
171 list_del_init(&bo->lru);
174 unsigned long _end = jiffies + DRM_HZ;
178 * This temporarily unlocks struct_mutex.
182 ret = drm_unbind_ttm(bo->ttm);
183 if (ret == -EAGAIN) {
184 mutex_unlock(&dev->struct_mutex);
186 mutex_lock(&dev->struct_mutex);
188 } while (ret == -EAGAIN && !time_after_eq(jiffies, _end));
191 DRM_ERROR("Couldn't unbind buffer. "
192 "Bad. Continuing anyway\n");
197 drm_mm_put_block(bo->mm_node);
200 if (bo->ttm_object) {
201 drm_ttm_object_deref_locked(dev, bo->ttm_object);
203 atomic_dec(&bm->count);
204 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
208 * Call bo->mutex locked.
209 * Wait until the buffer is idle.
212 static int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
216 drm_fence_object_t *fence = bo->fence;
220 drm_device_t *dev = bo->dev;
221 if (drm_fence_object_signaled(fence, bo->fence_type)) {
222 drm_fence_usage_deref_unlocked(dev, fence);
230 drm_fence_object_wait(dev, fence, lazy, ignore_signals,
235 drm_fence_usage_deref_unlocked(dev, fence);
243 * Call dev->struct_mutex locked.
246 static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
248 drm_buffer_manager_t *bm = &dev->bm;
250 drm_buffer_object_t *entry, *nentry;
251 struct list_head *list, *next;
252 drm_fence_object_t *fence;
254 list_for_each_safe(list, next, &bm->ddestroy) {
255 entry = list_entry(list, drm_buffer_object_t, ddestroy);
256 atomic_inc(&entry->usage);
257 if (atomic_read(&entry->usage) != 1) {
258 atomic_dec(&entry->usage);
263 if (next != &bm->ddestroy) {
264 nentry = list_entry(next, drm_buffer_object_t,
266 atomic_inc(&nentry->usage);
269 mutex_unlock(&dev->struct_mutex);
270 mutex_lock(&entry->mutex);
271 fence = entry->fence;
272 if (fence && drm_fence_object_signaled(fence,
273 entry->fence_type)) {
274 drm_fence_usage_deref_locked(dev, fence);
278 if (entry->fence && remove_all) {
280 unsigned long _end = jiffies + 3 * DRM_HZ;
283 ret = drm_bo_wait(entry, 0, 1, 0);
284 } while (ret && !time_after_eq(jiffies, _end));
288 DRM_ERROR("Detected GPU lockup or "
289 "fence driver was taken down. "
290 "Evicting waiting buffers.\n");
294 drm_fence_usage_deref_unlocked(dev,
299 mutex_lock(&dev->struct_mutex);
300 mutex_unlock(&entry->mutex);
301 if (atomic_dec_and_test(&entry->usage) && (!entry->fence)) {
302 list_del_init(&entry->ddestroy);
303 drm_bo_destroy_locked(dev, entry);
306 atomic_dec(&nentry->usage);
312 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
313 static void drm_bo_delayed_workqueue(void *data)
315 static void drm_bo_delayed_workqueue(struct work_struct *work)
318 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
319 drm_device_t *dev = (drm_device_t *) data;
320 drm_buffer_manager_t *bm = &dev->bm;
322 drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work);
323 drm_device_t *dev = container_of(bm, drm_device_t, bm);
327 DRM_DEBUG("Delayed delete Worker\n");
329 mutex_lock(&dev->struct_mutex);
330 if (!bm->initialized) {
331 mutex_unlock(&dev->struct_mutex);
334 drm_bo_delayed_delete(dev, 0);
335 if (bm->initialized && !list_empty(&bm->ddestroy)) {
336 schedule_delayed_work(&bm->wq,
337 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
339 mutex_unlock(&dev->struct_mutex);
342 void drm_bo_usage_deref_locked(drm_device_t * dev, drm_buffer_object_t * bo)
344 if (atomic_dec_and_test(&bo->usage)) {
345 drm_bo_destroy_locked(dev, bo);
349 static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
351 drm_bo_usage_deref_locked(priv->head->dev,
352 drm_user_object_entry(uo, drm_buffer_object_t,
356 void drm_bo_usage_deref_unlocked(drm_device_t * dev, drm_buffer_object_t * bo)
358 if (atomic_dec_and_test(&bo->usage)) {
359 mutex_lock(&dev->struct_mutex);
360 if (atomic_read(&bo->usage) == 0)
361 drm_bo_destroy_locked(dev, bo);
362 mutex_unlock(&dev->struct_mutex);
367 * Note. The caller has to register (if applicable)
368 * and deregister fence object usage.
371 int drm_fence_buffer_objects(drm_file_t * priv,
372 struct list_head *list,
373 uint32_t fence_flags,
374 drm_fence_object_t * fence,
375 drm_fence_object_t ** used_fence)
377 drm_device_t *dev = priv->head->dev;
378 drm_buffer_manager_t *bm = &dev->bm;
380 drm_buffer_object_t *entry;
381 uint32_t fence_type = 0;
384 struct list_head f_list, *l;
386 mutex_lock(&dev->struct_mutex);
389 list = &bm->unfenced;
391 list_for_each_entry(entry, list, lru) {
392 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
393 fence_type |= entry->fence_type;
394 if (entry->fence_class != 0) {
395 DRM_ERROR("Fence class %d is not implemented yet.\n",
409 * Transfer to a local list before we release the dev->struct_mutex;
410 * This is so we don't get any new unfenced objects while fencing
411 * the ones we already have..
414 list_add_tail(&f_list, list);
418 if ((fence_type & fence->type) != fence_type) {
419 DRM_ERROR("Given fence doesn't match buffers "
420 "on unfenced list.\n");
425 mutex_unlock(&dev->struct_mutex);
426 ret = drm_fence_object_create(dev, fence_type,
427 fence_flags | DRM_FENCE_FLAG_EMIT,
429 mutex_lock(&dev->struct_mutex);
436 while (l != &f_list) {
437 entry = list_entry(l, drm_buffer_object_t, lru);
438 atomic_inc(&entry->usage);
439 mutex_unlock(&dev->struct_mutex);
440 mutex_lock(&entry->mutex);
441 mutex_lock(&dev->struct_mutex);
443 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
446 drm_fence_usage_deref_locked(dev, entry->fence);
447 entry->fence = fence;
448 DRM_FLAG_MASKED(entry->priv_flags, 0,
449 _DRM_BO_FLAG_UNFENCED);
450 DRM_WAKEUP(&entry->event_queue);
451 drm_bo_add_to_lru(entry, bm);
453 mutex_unlock(&entry->mutex);
454 drm_bo_usage_deref_locked(dev, entry);
457 atomic_add(count, &fence->usage);
458 DRM_DEBUG("Fenced %d buffers\n", count);
460 mutex_unlock(&dev->struct_mutex);
465 EXPORT_SYMBOL(drm_fence_buffer_objects);
471 static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
472 int no_wait, int force_no_move)
475 drm_device_t *dev = bo->dev;
476 drm_buffer_manager_t *bm = &dev->bm;
479 * Someone might have modified the buffer before we took the buffer mutex.
482 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
484 if (!(bo->flags & drm_bo_type_flags(mem_type)))
487 ret = drm_bo_wait(bo, 0, 0, no_wait);
491 DRM_ERROR("Failed to expire fence before "
492 "buffer eviction.\n");
496 if (mem_type == DRM_BO_MEM_TT) {
497 ret = drm_move_tt_to_local(bo, 1, force_no_move);
500 mutex_lock(&dev->struct_mutex);
501 list_del_init(&bo->lru);
502 drm_bo_add_to_lru(bo, bm);
503 mutex_unlock(&dev->struct_mutex);
509 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
510 _DRM_BO_FLAG_EVICTED);
519 int drm_bo_alloc_space(drm_buffer_object_t * buf, unsigned mem_type,
522 drm_device_t *dev = buf->dev;
524 drm_buffer_manager_t *bm = &dev->bm;
525 drm_buffer_object_t *bo;
526 drm_mm_t *mm = &bm->manager[mem_type];
527 struct list_head *lru;
528 unsigned long size = buf->num_pages;
531 mutex_lock(&dev->struct_mutex);
533 node = drm_mm_search_free(mm, size, buf->page_alignment, 1);
537 lru = &bm->lru[mem_type];
538 if (lru->next == lru)
541 bo = list_entry(lru->next, drm_buffer_object_t, lru);
543 atomic_inc(&bo->usage);
544 mutex_unlock(&dev->struct_mutex);
545 mutex_lock(&bo->mutex);
546 BUG_ON(bo->flags & DRM_BO_FLAG_NO_MOVE);
547 ret = drm_bo_evict(bo, mem_type, no_wait, 0);
548 mutex_unlock(&bo->mutex);
549 drm_bo_usage_deref_unlocked(dev, bo);
552 mutex_lock(&dev->struct_mutex);
556 DRM_ERROR("Out of videoram / aperture space\n");
557 mutex_unlock(&dev->struct_mutex);
561 node = drm_mm_get_block(node, size, buf->page_alignment);
562 mutex_unlock(&dev->struct_mutex);
564 node->private = (void *)buf;
567 buf->offset = node->start * PAGE_SIZE;
571 static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
573 drm_device_t *dev = bo->dev;
574 drm_ttm_backend_t *be;
577 if (!(bo->mm_node && (bo->flags & DRM_BO_FLAG_NO_MOVE))) {
579 ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait);
584 DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start);
586 mutex_lock(&dev->struct_mutex);
587 ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED,
590 drm_mm_put_block(bo->mm_node);
593 mutex_unlock(&dev->struct_mutex);
600 if (be->needs_ub_cache_adjust(be))
601 bo->flags &= ~DRM_BO_FLAG_CACHED;
602 bo->flags &= ~DRM_BO_MASK_MEM;
603 bo->flags |= DRM_BO_FLAG_MEM_TT;
605 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
606 ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags);
608 DRM_ERROR("Could not flush read caches\n");
610 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED);
615 static int drm_bo_new_flags(drm_device_t * dev,
616 uint32_t flags, uint32_t new_mask, uint32_t hint,
617 int init, uint32_t * n_flags, uint32_t * n_mask)
619 uint32_t new_flags = 0;
621 drm_bo_driver_t *driver = dev->driver->bo_driver;
622 drm_buffer_manager_t *bm = &dev->bm;
626 * First adjust the mask to take away nonexistant memory types.
629 for (i = 0; i < DRM_BO_MEM_TYPES; ++i) {
630 if (!bm->use_type[i])
631 new_mask &= ~drm_bo_type_flags(i);
634 if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
636 ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
640 if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
641 if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
642 !driver->cached[DRM_BO_MEM_TT]) &&
643 ((new_mask & DRM_BO_FLAG_MEM_VRAM)
644 && !driver->cached[DRM_BO_MEM_VRAM])) {
645 new_mask &= ~DRM_BO_FLAG_BIND_CACHED;
647 if (!driver->cached[DRM_BO_MEM_TT])
648 new_flags &= DRM_BO_FLAG_MEM_TT;
649 if (!driver->cached[DRM_BO_MEM_VRAM])
650 new_flags &= DRM_BO_FLAG_MEM_VRAM;
654 if ((new_mask & DRM_BO_FLAG_READ_CACHED) &&
655 !(new_mask & DRM_BO_FLAG_BIND_CACHED)) {
656 if ((new_mask & DRM_BO_FLAG_NO_EVICT) &&
657 !(new_mask & DRM_BO_FLAG_MEM_LOCAL)) {
659 ("Cannot read cached from a pinned VRAM / TT buffer\n");
665 * Determine new memory location:
668 if (!(flags & new_mask & DRM_BO_MASK_MEM) || init) {
670 new_flags = new_mask & DRM_BO_MASK_MEM;
673 DRM_ERROR("Invalid buffer object memory flags\n");
677 if (new_flags & DRM_BO_FLAG_MEM_LOCAL) {
678 if ((hint & DRM_BO_HINT_AVOID_LOCAL) &&
679 new_flags & (DRM_BO_FLAG_MEM_VRAM |
680 DRM_BO_FLAG_MEM_TT)) {
681 new_flags &= ~DRM_BO_FLAG_MEM_LOCAL;
683 new_flags = DRM_BO_FLAG_MEM_LOCAL;
686 if (new_flags & DRM_BO_FLAG_MEM_TT) {
687 if ((new_mask & DRM_BO_FLAG_PREFER_VRAM) &&
688 new_flags & DRM_BO_FLAG_MEM_VRAM) {
689 new_flags = DRM_BO_FLAG_MEM_VRAM;
691 new_flags = DRM_BO_FLAG_MEM_TT;
695 new_flags = flags & DRM_BO_MASK_MEM;
698 new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
702 DRM_ERROR("Invalid buffer object rwx properties\n");
706 new_flags |= new_mask & ~DRM_BO_MASK_MEM;
708 if (((flags ^ new_flags) & DRM_BO_FLAG_BIND_CACHED) &&
709 (new_flags & DRM_BO_FLAG_NO_EVICT) &&
710 (flags & (DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM))) {
711 if (!(flags & DRM_BO_FLAG_CACHED)) {
713 ("Cannot change caching policy of pinned buffer\n");
716 new_flags &= ~DRM_BO_FLAG_CACHED;
720 *n_flags = new_flags;
726 * Call dev->struct_mutex locked.
729 drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
730 uint32_t handle, int check_owner)
732 drm_user_object_t *uo;
733 drm_buffer_object_t *bo;
735 uo = drm_lookup_user_object(priv, handle);
737 if (!uo || (uo->type != drm_buffer_type)) {
738 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
742 if (check_owner && priv != uo->owner) {
743 if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
747 bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
748 atomic_inc(&bo->usage);
753 * Call bo->mutex locked.
754 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
755 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
758 static int drm_bo_quick_busy(drm_buffer_object_t * bo)
760 drm_fence_object_t *fence = bo->fence;
762 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
764 drm_device_t *dev = bo->dev;
765 if (drm_fence_object_signaled(fence, bo->fence_type)) {
766 drm_fence_usage_deref_unlocked(dev, fence);
776 * Call bo->mutex locked.
777 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
780 static int drm_bo_busy(drm_buffer_object_t * bo)
782 drm_fence_object_t *fence = bo->fence;
784 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
786 drm_device_t *dev = bo->dev;
787 if (drm_fence_object_signaled(fence, bo->fence_type)) {
788 drm_fence_usage_deref_unlocked(dev, fence);
792 drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
793 if (drm_fence_object_signaled(fence, bo->fence_type)) {
794 drm_fence_usage_deref_unlocked(dev, fence);
803 static int drm_bo_read_cached(drm_buffer_object_t * bo)
807 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
809 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
814 * Wait until a buffer is unmapped.
817 static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
821 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
824 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
825 atomic_read(&bo->mapped) == -1);
833 static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
837 mutex_lock(&bo->mutex);
838 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
839 mutex_unlock(&bo->mutex);
844 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
845 * Until then, we cannot really do anything with it except delete it.
846 * The unfenced list is a PITA, and the operations
848 * 2) submitting commands
850 * Should really be an atomic operation.
851 * We now "solve" this problem by keeping
852 * the buffer "unfenced" after validating, but before fencing.
855 static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
858 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
859 unsigned long _end = jiffies + 3 * DRM_HZ;
867 mutex_unlock(&bo->mutex);
868 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
869 !drm_bo_check_unfenced(bo));
870 mutex_lock(&bo->mutex);
875 ("Error waiting for buffer to become fenced\n");
878 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
879 } while (ret && !time_after_eq(jiffies, _end));
881 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
891 * Fill in the ioctl reply argument with buffer info.
895 static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
896 drm_bo_arg_reply_t * rep)
898 rep->handle = bo->base.hash.key;
899 rep->flags = bo->flags;
900 rep->size = bo->num_pages * PAGE_SIZE;
901 rep->offset = bo->offset;
903 if (bo->ttm_object) {
904 rep->arg_handle = bo->ttm_object->map_list.user_token;
909 rep->mask = bo->mask;
910 rep->buffer_start = bo->buffer_start;
911 rep->fence_flags = bo->fence_type;
913 rep->page_alignment = bo->page_alignment;
915 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
916 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
922 * Wait for buffer idle and register that we've mapped the buffer.
923 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
924 * so that if the client dies, the mapping is automatically
928 static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
929 uint32_t map_flags, unsigned hint,
930 drm_bo_arg_reply_t * rep)
932 drm_buffer_object_t *bo;
933 drm_device_t *dev = priv->head->dev;
935 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
937 mutex_lock(&dev->struct_mutex);
938 bo = drm_lookup_buffer_object(priv, handle, 1);
939 mutex_unlock(&dev->struct_mutex);
944 mutex_lock(&bo->mutex);
945 if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
946 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
952 * If this returns true, we are currently unmapped.
953 * We need to do this test, because unmapping can
954 * be done without the bo->mutex held.
958 if (atomic_inc_and_test(&bo->mapped)) {
959 if (no_wait && drm_bo_busy(bo)) {
960 atomic_dec(&bo->mapped);
964 ret = drm_bo_wait(bo, 0, 0, no_wait);
966 atomic_dec(&bo->mapped);
970 if ((map_flags & DRM_BO_FLAG_READ) &&
971 (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
972 (!(bo->flags & DRM_BO_FLAG_CACHED))) {
973 drm_bo_read_cached(bo);
976 } else if ((map_flags & DRM_BO_FLAG_READ) &&
977 (bo->flags & DRM_BO_FLAG_READ_CACHED) &&
978 (!(bo->flags & DRM_BO_FLAG_CACHED))) {
981 * We are already mapped with different flags.
982 * need to wait for unmap.
985 ret = drm_bo_wait_unmapped(bo, no_wait);
994 mutex_lock(&dev->struct_mutex);
995 ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
996 mutex_unlock(&dev->struct_mutex);
998 if (atomic_add_negative(-1, &bo->mapped))
999 DRM_WAKEUP(&bo->event_queue);
1002 drm_bo_fill_rep_arg(bo, rep);
1004 mutex_unlock(&bo->mutex);
1005 drm_bo_usage_deref_unlocked(dev, bo);
1009 static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
1011 drm_device_t *dev = priv->head->dev;
1012 drm_buffer_object_t *bo;
1013 drm_ref_object_t *ro;
1016 mutex_lock(&dev->struct_mutex);
1018 bo = drm_lookup_buffer_object(priv, handle, 1);
1024 ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
1030 drm_remove_ref_object(priv, ro);
1031 drm_bo_usage_deref_locked(dev, bo);
1033 mutex_unlock(&dev->struct_mutex);
1038 * Call struct-sem locked.
1041 static void drm_buffer_user_object_unmap(drm_file_t * priv,
1042 drm_user_object_t * uo,
1045 drm_buffer_object_t *bo =
1046 drm_user_object_entry(uo, drm_buffer_object_t, base);
1049 * We DON'T want to take the bo->lock here, because we want to
1050 * hold it when we wait for unmapped buffer.
1053 BUG_ON(action != _DRM_REF_TYPE1);
1055 if (atomic_add_negative(-1, &bo->mapped))
1056 DRM_WAKEUP(&bo->event_queue);
1063 static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags,
1064 int no_wait, int force_no_move)
1069 * Flush outstanding fences.
1074 * Make sure we're not mapped.
1077 ret = drm_bo_wait_unmapped(bo, no_wait);
1082 * Wait for outstanding fences.
1085 ret = drm_bo_wait(bo, 0, 0, no_wait);
1092 if (new_flags & DRM_BO_FLAG_MEM_TT) {
1093 ret = drm_move_local_to_tt(bo, no_wait);
1097 drm_move_tt_to_local(bo, 0, force_no_move);
1107 static int drm_buffer_object_validate(drm_buffer_object_t * bo,
1109 int move_unfenced, int no_wait)
1111 drm_device_t *dev = bo->dev;
1112 drm_buffer_manager_t *bm = &dev->bm;
1113 uint32_t flag_diff = (new_flags ^ bo->flags);
1114 drm_bo_driver_t *driver = dev->driver->bo_driver;
1118 if (new_flags & DRM_BO_FLAG_MEM_VRAM) {
1119 DRM_ERROR("Vram support not implemented yet\n");
1123 DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", new_flags, bo->flags);
1124 ret = driver->fence_type(new_flags, &bo->fence_class, &bo->fence_type);
1126 DRM_ERROR("Driver did not support given buffer permissions\n");
1131 * Move out if we need to change caching policy.
1134 if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
1135 !(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) {
1136 if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1137 DRM_ERROR("Cannot change caching policy of "
1138 "pinned buffer.\n");
1141 ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0);
1144 DRM_ERROR("Failed moving buffer.\n");
1148 DRM_MASK_VAL(bo->flags, DRM_BO_FLAG_BIND_CACHED, new_flags);
1149 flag_diff = (new_flags ^ bo->flags);
1152 * Check whether we dropped no_move policy, and in that case,
1153 * release reserved manager regions.
1156 if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
1157 !(new_flags & DRM_BO_FLAG_NO_MOVE)) {
1158 mutex_lock(&dev->struct_mutex);
1160 drm_mm_put_block(bo->mm_node);
1163 mutex_unlock(&dev->struct_mutex);
1167 * Check whether we need to move buffer.
1170 if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) {
1171 ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1);
1174 DRM_ERROR("Failed moving buffer.\n");
1179 if (move_unfenced) {
1182 * Place on unfenced list.
1185 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1186 _DRM_BO_FLAG_UNFENCED);
1187 mutex_lock(&dev->struct_mutex);
1189 list_add_tail(&bo->lru, &bm->unfenced);
1190 mutex_unlock(&dev->struct_mutex);
1193 mutex_lock(&dev->struct_mutex);
1194 list_del_init(&bo->lru);
1195 drm_bo_add_to_lru(bo, bm);
1196 mutex_unlock(&dev->struct_mutex);
1199 bo->flags = new_flags;
1203 static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
1204 uint32_t flags, uint32_t mask, uint32_t hint,
1205 drm_bo_arg_reply_t * rep)
1207 drm_buffer_object_t *bo;
1208 drm_device_t *dev = priv->head->dev;
1210 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1213 bo = drm_lookup_buffer_object(priv, handle, 1);
1218 mutex_lock(&bo->mutex);
1219 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1224 ret = drm_bo_new_flags(dev, bo->flags,
1225 (flags & mask) | (bo->mask & ~mask), hint,
1226 0, &new_flags, &bo->mask);
1232 drm_buffer_object_validate(bo, new_flags,
1233 !(hint & DRM_BO_HINT_DONT_FENCE),
1235 drm_bo_fill_rep_arg(bo, rep);
1239 mutex_unlock(&bo->mutex);
1240 drm_bo_usage_deref_unlocked(dev, bo);
1244 static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
1245 drm_bo_arg_reply_t * rep)
1247 drm_buffer_object_t *bo;
1249 bo = drm_lookup_buffer_object(priv, handle, 1);
1253 mutex_lock(&bo->mutex);
1254 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1255 (void)drm_bo_busy(bo);
1256 drm_bo_fill_rep_arg(bo, rep);
1257 mutex_unlock(&bo->mutex);
1258 drm_bo_usage_deref_unlocked(bo->dev, bo);
1262 static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
1263 uint32_t hint, drm_bo_arg_reply_t * rep)
1265 drm_buffer_object_t *bo;
1266 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1269 bo = drm_lookup_buffer_object(priv, handle, 1);
1274 mutex_lock(&bo->mutex);
1275 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1278 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1282 drm_bo_fill_rep_arg(bo, rep);
1285 mutex_unlock(&bo->mutex);
1286 drm_bo_usage_deref_unlocked(bo->dev, bo);
1291 * Call bo->mutex locked.
1294 static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo)
1296 drm_device_t *dev = bo->dev;
1297 drm_ttm_object_t *to = NULL;
1299 uint32_t ttm_flags = 0;
1301 bo->ttm_object = NULL;
1305 case drm_bo_type_dc:
1306 mutex_lock(&dev->struct_mutex);
1307 ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE,
1309 mutex_unlock(&dev->struct_mutex);
1311 case drm_bo_type_user:
1312 case drm_bo_type_fake:
1315 DRM_ERROR("Illegal buffer object type\n");
1325 bo->ttm_object = to;
1326 bo->ttm = drm_ttm_from_object(to);
1331 int drm_buffer_object_create(drm_file_t * priv,
1336 uint32_t page_alignment,
1337 unsigned long buffer_start,
1338 drm_buffer_object_t ** buf_obj)
1340 drm_device_t *dev = priv->head->dev;
1341 drm_buffer_manager_t *bm = &dev->bm;
1342 drm_buffer_object_t *bo;
1345 unsigned long num_pages;
1347 if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1348 DRM_ERROR("Invalid buffer object start.\n");
1351 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1352 if (num_pages == 0) {
1353 DRM_ERROR("Illegal buffer object size.\n");
1357 bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1362 mutex_init(&bo->mutex);
1363 mutex_lock(&bo->mutex);
1365 atomic_set(&bo->usage, 1);
1366 atomic_set(&bo->mapped, -1);
1367 DRM_INIT_WAITQUEUE(&bo->event_queue);
1368 INIT_LIST_HEAD(&bo->lru);
1369 INIT_LIST_HEAD(&bo->ddestroy);
1372 bo->num_pages = num_pages;
1374 bo->page_alignment = page_alignment;
1375 if (bo->type == drm_bo_type_fake) {
1376 bo->offset = buffer_start;
1377 bo->buffer_start = 0;
1379 bo->buffer_start = buffer_start;
1382 bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
1383 atomic_inc(&bm->count);
1384 ret = drm_bo_new_flags(dev, bo->flags, mask, hint,
1385 1, &new_flags, &bo->mask);
1388 ret = drm_bo_add_ttm(priv, bo);
1392 ret = drm_buffer_object_validate(bo, new_flags, 0,
1393 hint & DRM_BO_HINT_DONT_BLOCK);
1397 mutex_unlock(&bo->mutex);
1402 mutex_unlock(&bo->mutex);
1403 drm_bo_usage_deref_unlocked(dev, bo);
1407 static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
1410 drm_device_t *dev = priv->head->dev;
1413 mutex_lock(&dev->struct_mutex);
1414 ret = drm_add_user_object(priv, &bo->base, shareable);
1418 bo->base.remove = drm_bo_base_deref_locked;
1419 bo->base.type = drm_buffer_type;
1420 bo->base.ref_struct_locked = NULL;
1421 bo->base.unref = drm_buffer_user_object_unmap;
1424 mutex_unlock(&dev->struct_mutex);
1428 static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
1430 LOCK_TEST_WITH_RETURN(dev, filp);
1434 int drm_bo_ioctl(DRM_IOCTL_ARGS)
1438 drm_bo_arg_request_t *req = &arg.d.req;
1439 drm_bo_arg_reply_t rep;
1441 drm_user_object_t *uo;
1442 drm_buffer_object_t *entry;
1444 if (!dev->bm.initialized) {
1445 DRM_ERROR("Buffer object manager is not initialized.\n");
1450 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1461 drm_buffer_object_create(priv, req->size,
1465 req->page_alignment,
1466 req->buffer_start, &entry);
1471 drm_bo_add_user_object(priv, entry,
1474 DRM_BO_FLAG_SHAREABLE);
1476 drm_bo_usage_deref_unlocked(dev, entry);
1481 mutex_lock(&entry->mutex);
1482 drm_bo_fill_rep_arg(entry, &rep);
1483 mutex_unlock(&entry->mutex);
1486 rep.ret = drm_buffer_object_unmap(priv, req->handle);
1489 rep.ret = drm_buffer_object_map(priv, req->handle,
1493 case drm_bo_destroy:
1494 mutex_lock(&dev->struct_mutex);
1495 uo = drm_lookup_user_object(priv, req->handle);
1496 if (!uo || (uo->type != drm_buffer_type)
1497 || uo->owner != priv) {
1498 mutex_unlock(&dev->struct_mutex);
1502 rep.ret = drm_remove_user_object(priv, uo);
1503 mutex_unlock(&dev->struct_mutex);
1505 case drm_bo_reference:
1506 rep.ret = drm_user_object_ref(priv, req->handle,
1507 drm_buffer_type, &uo);
1510 mutex_lock(&dev->struct_mutex);
1511 uo = drm_lookup_user_object(priv, req->handle);
1513 drm_user_object_entry(uo, drm_buffer_object_t,
1515 atomic_dec(&entry->usage);
1516 mutex_unlock(&dev->struct_mutex);
1517 mutex_lock(&entry->mutex);
1518 drm_bo_fill_rep_arg(entry, &rep);
1519 mutex_unlock(&entry->mutex);
1521 case drm_bo_unreference:
1522 rep.ret = drm_user_object_unref(priv, req->handle,
1525 case drm_bo_validate:
1526 rep.ret = drm_bo_lock_test(dev, filp);
1531 drm_bo_handle_validate(priv, req->handle, req->mask,
1532 req->arg_handle, req->hint,
1536 rep.ret = drm_bo_lock_test(dev, filp);
1541 rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
1543 case drm_bo_wait_idle:
1544 rep.ret = drm_bo_handle_wait(priv, req->handle,
1547 case drm_bo_ref_fence:
1549 DRM_ERROR("Function is not implemented yet.\n");
1556 * A signal interrupted us. Make sure the ioctl is restartable.
1559 if (rep.ret == -EAGAIN)
1564 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
1571 * dev->struct_sem locked.
1574 static int drm_bo_force_list_clean(drm_device_t * dev,
1575 struct list_head *head,
1577 int force_no_move, int allow_errors)
1579 drm_buffer_manager_t *bm = &dev->bm;
1580 struct list_head *list, *next, *prev;
1581 drm_buffer_object_t *entry;
1587 list_for_each_safe(list, next, head) {
1589 entry = list_entry(list, drm_buffer_object_t, lru);
1590 atomic_inc(&entry->usage);
1591 mutex_unlock(&dev->struct_mutex);
1592 mutex_lock(&entry->mutex);
1593 mutex_lock(&dev->struct_mutex);
1595 if (prev != list->prev || next != list->next) {
1596 mutex_unlock(&entry->mutex);
1597 drm_bo_usage_deref_locked(dev, entry);
1600 if (entry->mm_node) {
1607 mutex_unlock(&dev->struct_mutex);
1608 if (entry->fence && bm->nice_mode) {
1609 unsigned long _end = jiffies + 3 * DRM_HZ;
1611 ret = drm_bo_wait(entry, 0, 1, 0);
1612 if (ret && allow_errors) {
1617 } while (ret && !time_after_eq(jiffies, _end));
1621 DRM_ERROR("Detected GPU hang or "
1622 "fence manager was taken down. "
1623 "Evicting waiting buffers\n");
1627 drm_fence_usage_deref_unlocked(dev,
1629 entry->fence = NULL;
1632 DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED,
1635 if (force_no_move) {
1636 DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE,
1639 if (entry->flags & DRM_BO_FLAG_NO_EVICT) {
1640 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1641 "cleanup. Removing flag and evicting.\n");
1642 entry->flags &= ~DRM_BO_FLAG_NO_EVICT;
1643 entry->mask &= ~DRM_BO_FLAG_NO_EVICT;
1646 ret = drm_bo_evict(entry, mem_type, 1, force_no_move);
1651 DRM_ERROR("Aargh. Eviction failed.\n");
1654 mutex_lock(&dev->struct_mutex);
1656 mutex_unlock(&entry->mutex);
1657 drm_bo_usage_deref_locked(dev, entry);
1658 if (prev != list->prev || next != list->next) {
1666 mutex_unlock(&entry->mutex);
1667 drm_bo_usage_deref_unlocked(dev, entry);
1668 mutex_lock(&dev->struct_mutex);
1672 int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
1674 drm_buffer_manager_t *bm = &dev->bm;
1677 if (mem_type >= DRM_BO_MEM_TYPES) {
1678 DRM_ERROR("Illegal memory type %d\n", mem_type);
1682 if (!bm->has_type[mem_type]) {
1683 DRM_ERROR("Trying to take down uninitialized "
1684 "memory manager type\n");
1687 bm->use_type[mem_type] = 0;
1688 bm->has_type[mem_type] = 0;
1694 * Throw out unfenced buffers.
1697 drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0);
1700 * Throw out evicted no-move buffers.
1703 drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL],
1705 drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1,
1707 drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1,
1710 if (drm_mm_clean(&bm->manager[mem_type])) {
1711 drm_mm_takedown(&bm->manager[mem_type]);
1720 static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
1723 drm_buffer_manager_t *bm = &dev->bm;
1725 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1726 DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
1730 ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
1733 ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1);
1737 drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1);
1741 static int drm_bo_init_mm(drm_device_t * dev,
1743 unsigned long p_offset, unsigned long p_size)
1745 drm_buffer_manager_t *bm = &dev->bm;
1748 if (type >= DRM_BO_MEM_TYPES) {
1749 DRM_ERROR("Illegal memory type %d\n", type);
1752 if (bm->has_type[type]) {
1753 DRM_ERROR("Memory manager already initialized for type %d\n",
1759 if (type != DRM_BO_MEM_LOCAL) {
1761 DRM_ERROR("Zero size memory manager type %d\n", type);
1764 ret = drm_mm_init(&bm->manager[type], p_offset, p_size);
1768 bm->has_type[type] = 1;
1769 bm->use_type[type] = 1;
1771 INIT_LIST_HEAD(&bm->lru[type]);
1772 INIT_LIST_HEAD(&bm->pinned[type]);
1778 * This is called from lastclose, so we don't need to bother about
1779 * any clients still running when we set the initialized flag to zero.
1782 int drm_bo_driver_finish(drm_device_t * dev)
1784 drm_buffer_manager_t *bm = &dev->bm;
1786 unsigned i = DRM_BO_MEM_TYPES;
1788 mutex_lock(&dev->bm.init_mutex);
1789 mutex_lock(&dev->struct_mutex);
1791 if (!bm->initialized)
1793 bm->initialized = 0;
1796 if (bm->has_type[i]) {
1797 bm->use_type[i] = 0;
1798 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
1800 DRM_ERROR("DRM memory manager type %d "
1801 "is not clean.\n", i);
1803 bm->has_type[i] = 0;
1806 mutex_unlock(&dev->struct_mutex);
1807 if (!cancel_delayed_work(&bm->wq)) {
1808 flush_scheduled_work();
1810 mutex_lock(&dev->struct_mutex);
1811 drm_bo_delayed_delete(dev, 1);
1812 if (list_empty(&bm->ddestroy)) {
1813 DRM_DEBUG("Delayed destroy list was clean\n");
1815 if (list_empty(&bm->lru[0])) {
1816 DRM_DEBUG("Swap list was clean\n");
1818 if (list_empty(&bm->pinned[0])) {
1819 DRM_DEBUG("NO_MOVE list was clean\n");
1821 if (list_empty(&bm->unfenced)) {
1822 DRM_DEBUG("Unfenced list was clean\n");
1825 mutex_unlock(&dev->struct_mutex);
1826 mutex_unlock(&dev->bm.init_mutex);
1830 int drm_bo_driver_init(drm_device_t * dev)
1832 drm_bo_driver_t *driver = dev->driver->bo_driver;
1833 drm_buffer_manager_t *bm = &dev->bm;
1836 mutex_lock(&dev->bm.init_mutex);
1837 mutex_lock(&dev->struct_mutex);
1842 * Initialize the system memory buffer type.
1843 * Other types need to be driver / IOCTL initialized.
1846 ret = drm_bo_init_mm(dev, 0, 0, 0);
1850 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1851 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
1853 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
1855 bm->initialized = 1;
1857 atomic_set(&bm->count, 0);
1859 INIT_LIST_HEAD(&bm->unfenced);
1860 INIT_LIST_HEAD(&bm->ddestroy);
1862 mutex_unlock(&dev->struct_mutex);
1863 mutex_unlock(&dev->bm.init_mutex);
1867 EXPORT_SYMBOL(drm_bo_driver_init);
1869 int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
1874 drm_mm_init_arg_t arg;
1875 drm_buffer_manager_t *bm = &dev->bm;
1876 drm_bo_driver_t *driver = dev->driver->bo_driver;
1879 DRM_ERROR("Buffer objects are not supported by this driver\n");
1883 DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
1885 switch (arg.req.op) {
1888 mutex_lock(&dev->bm.init_mutex);
1889 mutex_lock(&dev->struct_mutex);
1890 if (!bm->initialized) {
1891 DRM_ERROR("DRM memory manager was not initialized.\n");
1894 if (arg.req.mem_type == 0) {
1896 ("System memory buffers already initialized.\n");
1899 ret = drm_bo_init_mm(dev, arg.req.mem_type,
1900 arg.req.p_offset, arg.req.p_size);
1903 LOCK_TEST_WITH_RETURN(dev, filp);
1904 mutex_lock(&dev->bm.init_mutex);
1905 mutex_lock(&dev->struct_mutex);
1907 if (!bm->initialized) {
1908 DRM_ERROR("DRM memory manager was not initialized\n");
1911 if (arg.req.mem_type == 0) {
1912 DRM_ERROR("No takedown for System memory buffers.\n");
1916 if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
1917 DRM_ERROR("Memory manager type %d not clean. "
1918 "Delaying takedown\n", arg.req.mem_type);
1922 LOCK_TEST_WITH_RETURN(dev, filp);
1923 mutex_lock(&dev->bm.init_mutex);
1924 mutex_lock(&dev->struct_mutex);
1925 ret = drm_bo_lock_mm(dev, arg.req.mem_type);
1928 LOCK_TEST_WITH_RETURN(dev, filp);
1929 mutex_lock(&dev->bm.init_mutex);
1930 mutex_lock(&dev->struct_mutex);
1934 DRM_ERROR("Function not implemented yet\n");
1938 mutex_unlock(&dev->struct_mutex);
1939 mutex_unlock(&dev->bm.init_mutex);
1943 DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));