Add fence error member.
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
40  * heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44  * both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47  * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48  * traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(struct drm_buffer_object * bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object * bo);
56
57 static inline uint32_t drm_bo_type_flags(unsigned type)
58 {
59         return (1 << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
67 {
68         struct drm_mem_type_manager *man;
69
70         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71         DRM_ASSERT_LOCKED(&bo->mutex);
72
73         man = &bo->dev->bm.man[bo->pinned_mem_type];
74         list_add_tail(&bo->pinned_lru, &man->pinned);
75 }
76
77 void drm_bo_add_to_lru(struct drm_buffer_object * bo)
78 {
79         struct drm_mem_type_manager *man;
80
81         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
82
83         if (!bo->pinned || bo->mem.mem_type != bo->pinned_mem_type) {
84                 man = &bo->dev->bm.man[bo->mem.mem_type];
85                 list_add_tail(&bo->lru, &man->lru);
86         } else {
87                 INIT_LIST_HEAD(&bo->lru);
88         }
89 }
90
91 static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
92 {
93 #ifdef DRM_ODD_MM_COMPAT
94         int ret;
95
96         if (!bo->map_list.map)
97                 return 0;
98
99         ret = drm_bo_lock_kmm(bo);
100         if (ret)
101                 return ret;
102         drm_bo_unmap_virtual(bo);
103         if (old_is_pci)
104                 drm_bo_finish_unmap(bo);
105 #else
106         if (!bo->map_list.map)
107                 return 0;
108
109         drm_bo_unmap_virtual(bo);
110 #endif
111         return 0;
112 }
113
114 static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
115 {
116 #ifdef DRM_ODD_MM_COMPAT
117         int ret;
118
119         if (!bo->map_list.map)
120                 return;
121
122         ret = drm_bo_remap_bound(bo);
123         if (ret) {
124                 DRM_ERROR("Failed to remap a bound buffer object.\n"
125                           "\tThis might cause a sigbus later.\n");
126         }
127         drm_bo_unlock_kmm(bo);
128 #endif
129 }
130
131 /*
132  * Call bo->mutex locked.
133  */
134
135 static int drm_bo_add_ttm(struct drm_buffer_object * bo)
136 {
137         struct drm_device *dev = bo->dev;
138         int ret = 0;
139         bo->ttm = NULL;
140
141         DRM_ASSERT_LOCKED(&bo->mutex);
142
143         switch (bo->type) {
144         case drm_bo_type_dc:
145         case drm_bo_type_kernel:
146                 bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
147                 if (!bo->ttm)
148                         ret = -ENOMEM;
149                 break;
150         case drm_bo_type_user:
151         case drm_bo_type_fake:
152                 break;
153         default:
154                 DRM_ERROR("Illegal buffer object type\n");
155                 ret = -EINVAL;
156                 break;
157         }
158
159         return ret;
160 }
161
162 static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
163                                   struct drm_bo_mem_reg * mem,
164                                   int evict, int no_wait)
165 {
166         struct drm_device *dev = bo->dev;
167         struct drm_buffer_manager *bm = &dev->bm;
168         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
169         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
170         struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
171         struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
172         int ret = 0;
173
174         if (old_is_pci || new_is_pci ||
175             ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
176                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
177         if (ret)
178                 return ret;
179
180         /*
181          * Create and bind a ttm if required.
182          */
183
184         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
185                 ret = drm_bo_add_ttm(bo);
186                 if (ret)
187                         goto out_err;
188
189                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
190                         ret = drm_bind_ttm(bo->ttm, mem);
191                         if (ret)
192                                 goto out_err;
193                 }
194         }
195
196         if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
197
198                 struct drm_bo_mem_reg *old_mem = &bo->mem;
199                 uint64_t save_flags = old_mem->flags;
200                 uint64_t save_mask = old_mem->mask;
201
202                 *old_mem = *mem;
203                 mem->mm_node = NULL;
204                 old_mem->mask = save_mask;
205                 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
206
207         } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
208                    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
209
210                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
211
212         } else if (dev->driver->bo_driver->move) {
213                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
214
215         } else {
216
217                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
218
219         }
220
221         if (ret)
222                 goto out_err;
223
224         if (old_is_pci || new_is_pci)
225                 drm_bo_vm_post_move(bo);
226
227         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
228                 ret =
229                     dev->driver->bo_driver->invalidate_caches(dev,
230                                                               bo->mem.flags);
231                 if (ret)
232                         DRM_ERROR("Can not flush read caches\n");
233         }
234
235         DRM_FLAG_MASKED(bo->priv_flags,
236                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
237                         _DRM_BO_FLAG_EVICTED);
238
239         if (bo->mem.mm_node)
240                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
241                         bm->man[bo->mem.mem_type].gpu_offset;
242
243
244         return 0;
245
246       out_err:
247         if (old_is_pci || new_is_pci)
248                 drm_bo_vm_post_move(bo);
249
250         new_man = &bm->man[bo->mem.mem_type];
251         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
252                 drm_ttm_unbind(bo->ttm);
253                 drm_destroy_ttm(bo->ttm);
254                 bo->ttm = NULL;
255         }
256
257         return ret;
258 }
259
260 /*
261  * Call bo->mutex locked.
262  * Wait until the buffer is idle.
263  */
264
265 int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
266                 int no_wait)
267 {
268         int ret;
269
270         DRM_ASSERT_LOCKED(&bo->mutex);
271
272         if (bo->fence) {
273                 if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
274                         drm_fence_usage_deref_unlocked(&bo->fence);
275                         return 0;
276                 }
277                 if (no_wait) {
278                         return -EBUSY;
279                 }
280                 ret =
281                     drm_fence_object_wait(bo->fence, lazy, ignore_signals,
282                                           bo->fence_type);
283                 if (ret)
284                         return ret;
285
286                 drm_fence_usage_deref_unlocked(&bo->fence);
287         }
288         return 0;
289 }
290 EXPORT_SYMBOL(drm_bo_wait);
291
292 static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
293 {
294         struct drm_device *dev = bo->dev;
295         struct drm_buffer_manager *bm = &dev->bm;
296
297         if (bo->fence) {
298                 if (bm->nice_mode) {
299                         unsigned long _end = jiffies + 3 * DRM_HZ;
300                         int ret;
301                         do {
302                                 ret = drm_bo_wait(bo, 0, 1, 0);
303                                 if (ret && allow_errors)
304                                         return ret;
305
306                         } while (ret && !time_after_eq(jiffies, _end));
307
308                         if (bo->fence) {
309                                 bm->nice_mode = 0;
310                                 DRM_ERROR("Detected GPU lockup or "
311                                           "fence driver was taken down. "
312                                           "Evicting buffer.\n");
313                         }
314                 }
315                 if (bo->fence)
316                         drm_fence_usage_deref_unlocked(&bo->fence);
317         }
318         return 0;
319 }
320
321 /*
322  * Call dev->struct_mutex locked.
323  * Attempts to remove all private references to a buffer by expiring its
324  * fence object and removing from lru lists and memory managers.
325  */
326
327 static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
328 {
329         struct drm_device *dev = bo->dev;
330         struct drm_buffer_manager *bm = &dev->bm;
331
332         DRM_ASSERT_LOCKED(&dev->struct_mutex);
333
334         atomic_inc(&bo->usage);
335         mutex_unlock(&dev->struct_mutex);
336         mutex_lock(&bo->mutex);
337
338         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
339
340         if (bo->fence && drm_fence_object_signaled(bo->fence,
341                                                    bo->fence_type, 0))
342                 drm_fence_usage_deref_unlocked(&bo->fence);
343
344         if (bo->fence && remove_all)
345                 (void)drm_bo_expire_fence(bo, 0);
346
347         mutex_lock(&dev->struct_mutex);
348
349         if (!atomic_dec_and_test(&bo->usage)) {
350                 goto out;
351         }
352
353         if (!bo->fence) {
354                 list_del_init(&bo->lru);
355                 if (bo->mem.mm_node) {
356                         drm_mm_put_block(bo->mem.mm_node);
357                         if (bo->pinned_node == bo->mem.mm_node)
358                                 bo->pinned_node = NULL;
359                         bo->mem.mm_node = NULL;
360                 }
361                 list_del_init(&bo->pinned_lru);
362                 if (bo->pinned_node) {
363                         drm_mm_put_block(bo->pinned_node);
364                         bo->pinned_node = NULL;
365                 }
366                 list_del_init(&bo->ddestroy);
367                 mutex_unlock(&bo->mutex);
368                 drm_bo_destroy_locked(bo);
369                 return;
370         }
371
372         if (list_empty(&bo->ddestroy)) {
373                 drm_fence_object_flush(bo->fence, bo->fence_type);
374                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
375                 schedule_delayed_work(&bm->wq,
376                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
377         }
378
379       out:
380         mutex_unlock(&bo->mutex);
381         return;
382 }
383
384 /*
385  * Verify that refcount is 0 and that there are no internal references
386  * to the buffer object. Then destroy it.
387  */
388
389 static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
390 {
391         struct drm_device *dev = bo->dev;
392         struct drm_buffer_manager *bm = &dev->bm;
393
394         DRM_ASSERT_LOCKED(&dev->struct_mutex);
395
396         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
397             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
398             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
399                 if (bo->fence != NULL) {
400                         DRM_ERROR("Fence was non-zero.\n");
401                         drm_bo_cleanup_refs(bo, 0);
402                         return;
403                 }
404
405 #ifdef DRM_ODD_MM_COMPAT
406                 BUG_ON(!list_empty(&bo->vma_list));
407                 BUG_ON(!list_empty(&bo->p_mm_list));
408 #endif
409
410                 if (bo->ttm) {
411                         drm_ttm_unbind(bo->ttm);
412                         drm_destroy_ttm(bo->ttm);
413                         bo->ttm = NULL;
414                 }
415
416                 atomic_dec(&bm->count);
417
418                 //              BUG_ON(!list_empty(&bo->base.list));
419                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
420
421                 return;
422         }
423
424         /*
425          * Some stuff is still trying to reference the buffer object.
426          * Get rid of those references.
427          */
428
429         drm_bo_cleanup_refs(bo, 0);
430
431         return;
432 }
433
434 /*
435  * Call dev->struct_mutex locked.
436  */
437
438 static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
439 {
440         struct drm_buffer_manager *bm = &dev->bm;
441
442         struct drm_buffer_object *entry, *nentry;
443         struct list_head *list, *next;
444
445         list_for_each_safe(list, next, &bm->ddestroy) {
446                 entry = list_entry(list, struct drm_buffer_object, ddestroy);
447
448                 nentry = NULL;
449                 if (next != &bm->ddestroy) {
450                         nentry = list_entry(next, struct drm_buffer_object,
451                                             ddestroy);
452                         atomic_inc(&nentry->usage);
453                 }
454
455                 drm_bo_cleanup_refs(entry, remove_all);
456
457                 if (nentry) {
458                         atomic_dec(&nentry->usage);
459                 }
460         }
461 }
462
463 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
464 static void drm_bo_delayed_workqueue(void *data)
465 #else
466 static void drm_bo_delayed_workqueue(struct work_struct *work)
467 #endif
468 {
469 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
470         struct drm_device *dev = (struct drm_device *) data;
471         struct drm_buffer_manager *bm = &dev->bm;
472 #else
473         struct drm_buffer_manager *bm =
474             container_of(work, struct drm_buffer_manager, wq.work);
475         struct drm_device *dev = container_of(bm, struct drm_device, bm);
476 #endif
477
478         DRM_DEBUG("Delayed delete Worker\n");
479
480         mutex_lock(&dev->struct_mutex);
481         if (!bm->initialized) {
482                 mutex_unlock(&dev->struct_mutex);
483                 return;
484         }
485         drm_bo_delayed_delete(dev, 0);
486         if (bm->initialized && !list_empty(&bm->ddestroy)) {
487                 schedule_delayed_work(&bm->wq,
488                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
489         }
490         mutex_unlock(&dev->struct_mutex);
491 }
492
493 void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
494 {
495         struct drm_buffer_object *tmp_bo = *bo;
496         bo = NULL;
497
498         DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
499
500         if (atomic_dec_and_test(&tmp_bo->usage)) {
501                 drm_bo_destroy_locked(tmp_bo);
502         }
503 }
504 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
505
506 static void drm_bo_base_deref_locked(struct drm_file * file_priv,
507                                      struct drm_user_object * uo)
508 {
509         struct drm_buffer_object *bo =
510             drm_user_object_entry(uo, struct drm_buffer_object, base);
511
512         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
513
514         drm_bo_takedown_vm_locked(bo);
515         drm_bo_usage_deref_locked(&bo);
516 }
517
518 void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
519 {
520         struct drm_buffer_object *tmp_bo = *bo;
521         struct drm_device *dev = tmp_bo->dev;
522
523         *bo = NULL;
524         if (atomic_dec_and_test(&tmp_bo->usage)) {
525                 mutex_lock(&dev->struct_mutex);
526                 if (atomic_read(&tmp_bo->usage) == 0)
527                         drm_bo_destroy_locked(tmp_bo);
528                 mutex_unlock(&dev->struct_mutex);
529         }
530 }
531 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
532
533 void drm_putback_buffer_objects(struct drm_device *dev)
534 {
535         struct drm_buffer_manager *bm = &dev->bm;
536         struct list_head *list = &bm->unfenced;
537         struct drm_buffer_object *entry, *next;
538
539         mutex_lock(&dev->struct_mutex);
540         list_for_each_entry_safe(entry, next, list, lru) {
541                 atomic_inc(&entry->usage);
542                 mutex_unlock(&dev->struct_mutex);
543
544                 mutex_lock(&entry->mutex);
545                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
546                 mutex_lock(&dev->struct_mutex);
547
548                 list_del_init(&entry->lru);
549                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
550                 DRM_WAKEUP(&entry->event_queue);
551
552                 /*
553                  * FIXME: Might want to put back on head of list
554                  * instead of tail here.
555                  */
556
557                 drm_bo_add_to_lru(entry);
558                 mutex_unlock(&entry->mutex);
559                 drm_bo_usage_deref_locked(&entry);
560         }
561         mutex_unlock(&dev->struct_mutex);
562 }
563 EXPORT_SYMBOL(drm_putback_buffer_objects);
564
565
566 /*
567  * Note. The caller has to register (if applicable)
568  * and deregister fence object usage.
569  */
570
571 int drm_fence_buffer_objects(struct drm_device *dev,
572                              struct list_head *list,
573                              uint32_t fence_flags,
574                              struct drm_fence_object * fence,
575                              struct drm_fence_object ** used_fence)
576 {
577         struct drm_buffer_manager *bm = &dev->bm;
578         struct drm_buffer_object *entry;
579         uint32_t fence_type = 0;
580         uint32_t fence_class = ~0;
581         int count = 0;
582         int ret = 0;
583         struct list_head *l;
584
585         mutex_lock(&dev->struct_mutex);
586
587         if (!list)
588                 list = &bm->unfenced;
589
590         if (fence)
591                 fence_class = fence->class;
592
593         list_for_each_entry(entry, list, lru) {
594                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
595                 fence_type |= entry->new_fence_type;
596                 if (fence_class == ~0)
597                         fence_class = entry->new_fence_class;
598                 else if (entry->new_fence_class != fence_class) {
599                         DRM_ERROR("Unmatching fence classes on unfenced list: "
600                                   "%d and %d.\n",
601                                   fence_class,
602                                   entry->new_fence_class);
603                         ret = -EINVAL;
604                         goto out;
605                 }
606                 count++;
607         }
608
609         if (!count) {
610                 ret = -EINVAL;
611                 goto out;
612         }
613
614         if (fence) {
615                 if ((fence_type & fence->type) != fence_type) {
616                         DRM_ERROR("Given fence doesn't match buffers "
617                                   "on unfenced list.\n");
618                         ret = -EINVAL;
619                         goto out;
620                 }
621         } else {
622                 mutex_unlock(&dev->struct_mutex);
623                 ret = drm_fence_object_create(dev, fence_class, fence_type,
624                                               fence_flags | DRM_FENCE_FLAG_EMIT,
625                                               &fence);
626                 mutex_lock(&dev->struct_mutex);
627                 if (ret)
628                         goto out;
629         }
630
631         count = 0;
632         l = list->next;
633         while (l != list) {
634                 prefetch(l->next);
635                 entry = list_entry(l, struct drm_buffer_object, lru);
636                 atomic_inc(&entry->usage);
637                 mutex_unlock(&dev->struct_mutex);
638                 mutex_lock(&entry->mutex);
639                 mutex_lock(&dev->struct_mutex);
640                 list_del_init(l);
641                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
642                         count++;
643                         if (entry->fence)
644                                 drm_fence_usage_deref_locked(&entry->fence);
645                         entry->fence = drm_fence_reference_locked(fence);
646                         entry->fence_class = entry->new_fence_class;
647                         entry->fence_type = entry->new_fence_type;
648                         DRM_FLAG_MASKED(entry->priv_flags, 0,
649                                         _DRM_BO_FLAG_UNFENCED);
650                         DRM_WAKEUP(&entry->event_queue);
651                         drm_bo_add_to_lru(entry);
652                 }
653                 mutex_unlock(&entry->mutex);
654                 drm_bo_usage_deref_locked(&entry);
655                 l = list->next;
656         }
657         DRM_DEBUG("Fenced %d buffers\n", count);
658       out:
659         mutex_unlock(&dev->struct_mutex);
660         *used_fence = fence;
661         return ret;
662 }
663 EXPORT_SYMBOL(drm_fence_buffer_objects);
664
665 /*
666  * bo->mutex locked
667  */
668
669 static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
670                         int no_wait)
671 {
672         int ret = 0;
673         struct drm_device *dev = bo->dev;
674         struct drm_bo_mem_reg evict_mem;
675
676         /*
677          * Someone might have modified the buffer before we took the buffer mutex.
678          */
679
680         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
681                 goto out;
682         if (bo->mem.mem_type != mem_type)
683                 goto out;
684
685         ret = drm_bo_wait(bo, 0, 0, no_wait);
686
687         if (ret && ret != -EAGAIN) {
688                 DRM_ERROR("Failed to expire fence before "
689                           "buffer eviction.\n");
690                 goto out;
691         }
692
693         evict_mem = bo->mem;
694         evict_mem.mm_node = NULL;
695
696         if (bo->type == drm_bo_type_fake) {
697                 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
698                 bo->mem.mm_node = NULL;
699                 goto out1;
700         }
701
702         evict_mem = bo->mem;
703         evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
704         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
705
706         if (ret) {
707                 if (ret != -EAGAIN)
708                         DRM_ERROR("Failed to find memory space for "
709                                   "buffer 0x%p eviction.\n", bo);
710                 goto out;
711         }
712
713         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
714
715         if (ret) {
716                 if (ret != -EAGAIN)
717                         DRM_ERROR("Buffer eviction failed\n");
718                 goto out;
719         }
720
721       out1:
722         mutex_lock(&dev->struct_mutex);
723         if (evict_mem.mm_node) {
724                 if (evict_mem.mm_node != bo->pinned_node)
725                         drm_mm_put_block(evict_mem.mm_node);
726                 evict_mem.mm_node = NULL;
727         }
728         list_del(&bo->lru);
729         drm_bo_add_to_lru(bo);
730         mutex_unlock(&dev->struct_mutex);
731
732         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
733                         _DRM_BO_FLAG_EVICTED);
734
735       out:
736         return ret;
737 }
738
739 /**
740  * Repeatedly evict memory from the LRU for @mem_type until we create enough
741  * space, or we've evicted everything and there isn't enough space.
742  */
743 static int drm_bo_mem_force_space(struct drm_device * dev,
744                                   struct drm_bo_mem_reg * mem,
745                                   uint32_t mem_type, int no_wait)
746 {
747         struct drm_mm_node *node;
748         struct drm_buffer_manager *bm = &dev->bm;
749         struct drm_buffer_object *entry;
750         struct drm_mem_type_manager *man = &bm->man[mem_type];
751         struct list_head *lru;
752         unsigned long num_pages = mem->num_pages;
753         int ret;
754
755         mutex_lock(&dev->struct_mutex);
756         do {
757                 node = drm_mm_search_free(&man->manager, num_pages,
758                                           mem->page_alignment, 1);
759                 if (node)
760                         break;
761
762                 lru = &man->lru;
763                 if (lru->next == lru)
764                         break;
765
766                 entry = list_entry(lru->next, struct drm_buffer_object, lru);
767                 atomic_inc(&entry->usage);
768                 mutex_unlock(&dev->struct_mutex);
769                 mutex_lock(&entry->mutex);
770                 BUG_ON(entry->pinned);
771
772                 ret = drm_bo_evict(entry, mem_type, no_wait);
773                 mutex_unlock(&entry->mutex);
774                 drm_bo_usage_deref_unlocked(&entry);
775                 if (ret)
776                         return ret;
777                 mutex_lock(&dev->struct_mutex);
778         } while (1);
779
780         if (!node) {
781                 mutex_unlock(&dev->struct_mutex);
782                 return -ENOMEM;
783         }
784
785         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
786         mutex_unlock(&dev->struct_mutex);
787         mem->mm_node = node;
788         mem->mem_type = mem_type;
789         return 0;
790 }
791
792 static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
793                                 uint32_t mem_type,
794                                 uint32_t mask, uint32_t * res_mask)
795 {
796         uint32_t cur_flags = drm_bo_type_flags(mem_type);
797         uint32_t flag_diff;
798
799         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
800                 cur_flags |= DRM_BO_FLAG_CACHED;
801         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
802                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
803         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
804                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
805
806         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
807                 return 0;
808
809         if (mem_type == DRM_BO_MEM_LOCAL) {
810                 *res_mask = cur_flags;
811                 return 1;
812         }
813
814         flag_diff = (mask ^ cur_flags);
815         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
816             (!(mask & DRM_BO_FLAG_CACHED) ||
817              (mask & DRM_BO_FLAG_FORCE_CACHING)))
818                 return 0;
819
820         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
821             ((mask & DRM_BO_FLAG_MAPPABLE) ||
822              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
823                 return 0;
824
825         *res_mask = cur_flags;
826         return 1;
827 }
828
829 /**
830  * Creates space for memory region @mem according to its type.
831  *
832  * This function first searches for free space in compatible memory types in
833  * the priority order defined by the driver.  If free space isn't found, then
834  * drm_bo_mem_force_space is attempted in priority order to evict and find
835  * space.
836  */
837 int drm_bo_mem_space(struct drm_buffer_object * bo,
838                      struct drm_bo_mem_reg * mem, int no_wait)
839 {
840         struct drm_device *dev = bo->dev;
841         struct drm_buffer_manager *bm = &dev->bm;
842         struct drm_mem_type_manager *man;
843
844         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
845         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
846         uint32_t i;
847         uint32_t mem_type = DRM_BO_MEM_LOCAL;
848         uint32_t cur_flags;
849         int type_found = 0;
850         int type_ok = 0;
851         int has_eagain = 0;
852         struct drm_mm_node *node = NULL;
853         int ret;
854
855         mem->mm_node = NULL;
856         for (i = 0; i < num_prios; ++i) {
857                 mem_type = prios[i];
858                 man = &bm->man[mem_type];
859
860                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
861                                                &cur_flags);
862
863                 if (!type_ok)
864                         continue;
865
866                 if (mem_type == DRM_BO_MEM_LOCAL)
867                         break;
868
869                 if ((mem_type == bo->pinned_mem_type) &&
870                     (bo->pinned_node != NULL)) {
871                         node = bo->pinned_node;
872                         break;
873                 }
874
875                 mutex_lock(&dev->struct_mutex);
876                 if (man->has_type && man->use_type) {
877                         type_found = 1;
878                         node = drm_mm_search_free(&man->manager, mem->num_pages,
879                                                   mem->page_alignment, 1);
880                         if (node)
881                                 node = drm_mm_get_block(node, mem->num_pages,
882                                                         mem->page_alignment);
883                 }
884                 mutex_unlock(&dev->struct_mutex);
885                 if (node)
886                         break;
887         }
888
889         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
890                 mem->mm_node = node;
891                 mem->mem_type = mem_type;
892                 mem->flags = cur_flags;
893                 return 0;
894         }
895
896         if (!type_found)
897                 return -EINVAL;
898
899         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
900         prios = dev->driver->bo_driver->mem_busy_prio;
901
902         for (i = 0; i < num_prios; ++i) {
903                 mem_type = prios[i];
904                 man = &bm->man[mem_type];
905
906                 if (!man->has_type)
907                         continue;
908
909                 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
910                         continue;
911
912                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
913
914                 if (ret == 0) {
915                         mem->flags = cur_flags;
916                         return 0;
917                 }
918
919                 if (ret == -EAGAIN)
920                         has_eagain = 1;
921         }
922
923         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
924         return ret;
925 }
926
927 EXPORT_SYMBOL(drm_bo_mem_space);
928
929 static int drm_bo_new_mask(struct drm_buffer_object * bo,
930                            uint64_t new_mask, uint32_t hint)
931 {
932         uint32_t new_props;
933
934         if (bo->type == drm_bo_type_user) {
935                 DRM_ERROR("User buffers are not supported yet\n");
936                 return -EINVAL;
937         }
938
939         new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
940                                 DRM_BO_FLAG_READ);
941
942         if (!new_props) {
943                 DRM_ERROR("Invalid buffer object rwx properties\n");
944                 return -EINVAL;
945         }
946
947         bo->mem.mask = new_mask;
948         return 0;
949 }
950
951 /*
952  * Call dev->struct_mutex locked.
953  */
954
955 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
956                                               uint32_t handle, int check_owner)
957 {
958         struct drm_user_object *uo;
959         struct drm_buffer_object *bo;
960
961         uo = drm_lookup_user_object(file_priv, handle);
962
963         if (!uo || (uo->type != drm_buffer_type)) {
964                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
965                 return NULL;
966         }
967
968         if (check_owner && file_priv != uo->owner) {
969                 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
970                         return NULL;
971         }
972
973         bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
974         atomic_inc(&bo->usage);
975         return bo;
976 }
977 EXPORT_SYMBOL(drm_lookup_buffer_object);
978
979 /*
980  * Call bo->mutex locked.
981  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
982  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
983  */
984
985 static int drm_bo_quick_busy(struct drm_buffer_object * bo)
986 {
987         struct drm_fence_object *fence = bo->fence;
988
989         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
990         if (fence) {
991                 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
992                         drm_fence_usage_deref_unlocked(&bo->fence);
993                         return 0;
994                 }
995                 return 1;
996         }
997         return 0;
998 }
999
1000 /*
1001  * Call bo->mutex locked.
1002  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1003  */
1004
1005 static int drm_bo_busy(struct drm_buffer_object * bo)
1006 {
1007         struct drm_fence_object *fence = bo->fence;
1008
1009         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1010         if (fence) {
1011                 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
1012                         drm_fence_usage_deref_unlocked(&bo->fence);
1013                         return 0;
1014                 }
1015                 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
1016                 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
1017                         drm_fence_usage_deref_unlocked(&bo->fence);
1018                         return 0;
1019                 }
1020                 return 1;
1021         }
1022         return 0;
1023 }
1024
1025 static int drm_bo_read_cached(struct drm_buffer_object * bo)
1026 {
1027         int ret = 0;
1028
1029         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1030         if (bo->mem.mm_node)
1031                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1032         return ret;
1033 }
1034
1035 /*
1036  * Wait until a buffer is unmapped.
1037  */
1038
1039 static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
1040 {
1041         int ret = 0;
1042
1043         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1044                 return -EBUSY;
1045
1046         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1047                     atomic_read(&bo->mapped) == -1);
1048
1049         if (ret == -EINTR)
1050                 ret = -EAGAIN;
1051
1052         return ret;
1053 }
1054
1055 static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
1056 {
1057         int ret;
1058
1059         mutex_lock(&bo->mutex);
1060         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1061         mutex_unlock(&bo->mutex);
1062         return ret;
1063 }
1064
1065 /*
1066  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1067  * Until then, we cannot really do anything with it except delete it.
1068  * The unfenced list is a PITA, and the operations
1069  * 1) validating
1070  * 2) submitting commands
1071  * 3) fencing
1072  * Should really be an atomic operation.
1073  * We now "solve" this problem by keeping
1074  * the buffer "unfenced" after validating, but before fencing.
1075  */
1076
1077 static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
1078                                 int eagain_if_wait)
1079 {
1080         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1081
1082         if (ret && no_wait)
1083                 return -EBUSY;
1084         else if (!ret)
1085                 return 0;
1086
1087         ret = 0;
1088         mutex_unlock(&bo->mutex);
1089         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1090                     !drm_bo_check_unfenced(bo));
1091         mutex_lock(&bo->mutex);
1092         if (ret == -EINTR)
1093                 return -EAGAIN;
1094         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1095         if (ret) {
1096                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1097                 return -EBUSY;
1098         }
1099         if (eagain_if_wait)
1100                 return -EAGAIN;
1101
1102         return 0;
1103 }
1104
1105 /*
1106  * Fill in the ioctl reply argument with buffer info.
1107  * Bo locked.
1108  */
1109
1110 static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
1111                                 struct drm_bo_info_rep *rep)
1112 {
1113         if (!rep)
1114                 return;
1115
1116         rep->handle = bo->base.hash.key;
1117         rep->flags = bo->mem.flags;
1118         rep->size = bo->num_pages * PAGE_SIZE;
1119         rep->offset = bo->offset;
1120         rep->arg_handle = bo->map_list.user_token;
1121         rep->mask = bo->mem.mask;
1122         rep->buffer_start = bo->buffer_start;
1123         rep->fence_flags = bo->fence_type;
1124         rep->rep_flags = 0;
1125         rep->page_alignment = bo->mem.page_alignment;
1126
1127         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1128                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1129                                 DRM_BO_REP_BUSY);
1130         }
1131 }
1132
1133 /*
1134  * Wait for buffer idle and register that we've mapped the buffer.
1135  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1136  * so that if the client dies, the mapping is automatically
1137  * unregistered.
1138  */
1139
1140 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1141                                  uint32_t map_flags, unsigned hint,
1142                                  struct drm_bo_info_rep *rep)
1143 {
1144         struct drm_buffer_object *bo;
1145         struct drm_device *dev = file_priv->head->dev;
1146         int ret = 0;
1147         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1148
1149         mutex_lock(&dev->struct_mutex);
1150         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1151         mutex_unlock(&dev->struct_mutex);
1152
1153         if (!bo)
1154                 return -EINVAL;
1155
1156         mutex_lock(&bo->mutex);
1157         if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1158                 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1159                 if (ret)
1160                         goto out;
1161         }
1162
1163         /*
1164          * If this returns true, we are currently unmapped.
1165          * We need to do this test, because unmapping can
1166          * be done without the bo->mutex held.
1167          */
1168
1169         while (1) {
1170                 if (atomic_inc_and_test(&bo->mapped)) {
1171                         if (no_wait && drm_bo_busy(bo)) {
1172                                 atomic_dec(&bo->mapped);
1173                                 ret = -EBUSY;
1174                                 goto out;
1175                         }
1176                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1177                         if (ret) {
1178                                 atomic_dec(&bo->mapped);
1179                                 goto out;
1180                         }
1181
1182                         if ((map_flags & DRM_BO_FLAG_READ) &&
1183                             (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1184                             (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1185                                 drm_bo_read_cached(bo);
1186                         }
1187                         break;
1188                 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1189                            (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1190                            (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1191
1192                         /*
1193                          * We are already mapped with different flags.
1194                          * need to wait for unmap.
1195                          */
1196
1197                         ret = drm_bo_wait_unmapped(bo, no_wait);
1198                         if (ret)
1199                                 goto out;
1200
1201                         continue;
1202                 }
1203                 break;
1204         }
1205
1206         mutex_lock(&dev->struct_mutex);
1207         ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1208         mutex_unlock(&dev->struct_mutex);
1209         if (ret) {
1210                 if (atomic_add_negative(-1, &bo->mapped))
1211                         DRM_WAKEUP(&bo->event_queue);
1212
1213         } else
1214                 drm_bo_fill_rep_arg(bo, rep);
1215       out:
1216         mutex_unlock(&bo->mutex);
1217         drm_bo_usage_deref_unlocked(&bo);
1218         return ret;
1219 }
1220
1221 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1222 {
1223         struct drm_device *dev = file_priv->head->dev;
1224         struct drm_buffer_object *bo;
1225         struct drm_ref_object *ro;
1226         int ret = 0;
1227
1228         mutex_lock(&dev->struct_mutex);
1229
1230         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1231         if (!bo) {
1232                 ret = -EINVAL;
1233                 goto out;
1234         }
1235
1236         ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1237         if (!ro) {
1238                 ret = -EINVAL;
1239                 goto out;
1240         }
1241
1242         drm_remove_ref_object(file_priv, ro);
1243         drm_bo_usage_deref_locked(&bo);
1244       out:
1245         mutex_unlock(&dev->struct_mutex);
1246         return ret;
1247 }
1248
1249 /*
1250  * Call struct-sem locked.
1251  */
1252
1253 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1254                                          struct drm_user_object * uo,
1255                                          enum drm_ref_type action)
1256 {
1257         struct drm_buffer_object *bo =
1258             drm_user_object_entry(uo, struct drm_buffer_object, base);
1259
1260         /*
1261          * We DON'T want to take the bo->lock here, because we want to
1262          * hold it when we wait for unmapped buffer.
1263          */
1264
1265         BUG_ON(action != _DRM_REF_TYPE1);
1266
1267         if (atomic_add_negative(-1, &bo->mapped))
1268                 DRM_WAKEUP(&bo->event_queue);
1269 }
1270
1271 /*
1272  * bo->mutex locked.
1273  * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1274  */
1275
1276 int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
1277                        int no_wait, int move_unfenced)
1278 {
1279         struct drm_device *dev = bo->dev;
1280         struct drm_buffer_manager *bm = &dev->bm;
1281         int ret = 0;
1282         struct drm_bo_mem_reg mem;
1283         /*
1284          * Flush outstanding fences.
1285          */
1286
1287         drm_bo_busy(bo);
1288
1289         /*
1290          * Wait for outstanding fences.
1291          */
1292
1293         ret = drm_bo_wait(bo, 0, 0, no_wait);
1294         if (ret)
1295                 return ret;
1296
1297         mem.num_pages = bo->num_pages;
1298         mem.size = mem.num_pages << PAGE_SHIFT;
1299         mem.mask = new_mem_flags;
1300         mem.page_alignment = bo->mem.page_alignment;
1301
1302         mutex_lock(&bm->evict_mutex);
1303         mutex_lock(&dev->struct_mutex);
1304         list_del(&bo->lru);
1305         list_add_tail(&bo->lru, &bm->unfenced);
1306         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1307                         _DRM_BO_FLAG_UNFENCED);
1308         mutex_unlock(&dev->struct_mutex);
1309
1310         /*
1311          * Determine where to move the buffer.
1312          */
1313         ret = drm_bo_mem_space(bo, &mem, no_wait);
1314         if (ret)
1315                 goto out_unlock;
1316
1317         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1318
1319  out_unlock:
1320         if (ret || !move_unfenced) {
1321                 mutex_lock(&dev->struct_mutex);
1322                 if (mem.mm_node) {
1323                         if (mem.mm_node != bo->pinned_node)
1324                                 drm_mm_put_block(mem.mm_node);
1325                         mem.mm_node = NULL;
1326                 }
1327                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1328                 DRM_WAKEUP(&bo->event_queue);
1329                 list_del(&bo->lru);
1330                 drm_bo_add_to_lru(bo);
1331                 mutex_unlock(&dev->struct_mutex);
1332         }
1333
1334         mutex_unlock(&bm->evict_mutex);
1335         return ret;
1336 }
1337
1338 static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
1339 {
1340         uint32_t flag_diff = (mem->mask ^ mem->flags);
1341
1342         if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1343                 return 0;
1344         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1345             (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
1346              (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1347           return 0;
1348         }
1349         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1350             ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1351              (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1352                 return 0;
1353         return 1;
1354 }
1355
1356 static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem)
1357 {
1358         struct drm_buffer_manager *bm = &dev->bm;
1359         struct drm_mem_type_manager *man;
1360         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1361         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1362         uint32_t i;
1363         int type_ok = 0;
1364         uint32_t mem_type = 0;
1365         uint32_t cur_flags;
1366
1367         if (drm_bo_mem_compat(mem))
1368                 return 0;
1369
1370         BUG_ON(mem->mm_node);
1371
1372         for (i = 0; i < num_prios; ++i) {
1373                 mem_type = prios[i];
1374                 man = &bm->man[mem_type];
1375                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1376                                                &cur_flags);
1377                 if (type_ok)
1378                         break;
1379         }
1380
1381         if (type_ok) {
1382                 mem->mm_node = NULL;
1383                 mem->mem_type = mem_type;
1384                 mem->flags = cur_flags;
1385                 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1386                 return 0;
1387         }
1388
1389         DRM_ERROR("Illegal fake buffer flags 0x%016llx\n",
1390                   (unsigned long long) mem->mask);
1391         return -EINVAL;
1392 }
1393
1394 /*
1395  * bo locked.
1396  */
1397
1398 static int drm_buffer_object_validate(struct drm_buffer_object * bo,
1399                                       uint32_t fence_class,
1400                                       int move_unfenced, int no_wait)
1401 {
1402         struct drm_device *dev = bo->dev;
1403         struct drm_buffer_manager *bm = &dev->bm;
1404         struct drm_bo_driver *driver = dev->driver->bo_driver;
1405         uint32_t ftype;
1406         int ret;
1407
1408         DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
1409                   (unsigned long long) bo->mem.mask,
1410                   (unsigned long long) bo->mem.flags);
1411
1412         ret = driver->fence_type(bo, &fence_class, &ftype);
1413
1414         if (ret) {
1415                 DRM_ERROR("Driver did not support given buffer permissions\n");
1416                 return ret;
1417         }
1418
1419         if (bo->pinned && bo->pinned_mem_type != bo->mem.mem_type) {
1420                 DRM_ERROR("Attempt to validate pinned buffer into different memory "
1421                     "type\n");
1422                 return -EINVAL;
1423         }
1424
1425         /*
1426          * We're switching command submission mechanism,
1427          * or cannot simply rely on the hardware serializing for us.
1428          *
1429          * Wait for buffer idle.
1430          */
1431
1432         if ((fence_class != bo->fence_class) ||
1433             ((ftype ^ bo->fence_type) & bo->fence_type)) {
1434
1435                 ret = drm_bo_wait(bo, 0, 0, no_wait);
1436
1437                 if (ret)
1438                         return ret;
1439
1440         }
1441
1442         bo->new_fence_class = fence_class;
1443         bo->new_fence_type = ftype;
1444
1445         ret = drm_bo_wait_unmapped(bo, no_wait);
1446         if (ret) {
1447                 DRM_ERROR("Timed out waiting for buffer unmap.\n");
1448                 return ret;
1449         }
1450         if (bo->type == drm_bo_type_fake) {
1451                 ret = drm_bo_check_fake(dev, &bo->mem);
1452                 if (ret)
1453                         return ret;
1454         }
1455
1456         /*
1457          * Check whether we need to move buffer.
1458          */
1459
1460         if (!drm_bo_mem_compat(&bo->mem)) {
1461                 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1462                                          move_unfenced);
1463                 if (ret) {
1464                         if (ret != -EAGAIN)
1465                                 DRM_ERROR("Failed moving buffer.\n");
1466                         return ret;
1467                 }
1468         }
1469
1470         /*
1471          * We might need to add a TTM.
1472          */
1473
1474         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1475                 ret = drm_bo_add_ttm(bo);
1476                 if (ret)
1477                         return ret;
1478         }
1479         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1480
1481         /*
1482          * Finally, adjust lru to be sure.
1483          */
1484
1485         mutex_lock(&dev->struct_mutex);
1486         list_del(&bo->lru);
1487         if (move_unfenced) {
1488                 list_add_tail(&bo->lru, &bm->unfenced);
1489                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1490                                 _DRM_BO_FLAG_UNFENCED);
1491         } else {
1492                 drm_bo_add_to_lru(bo);
1493                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1494                         DRM_WAKEUP(&bo->event_queue);
1495                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1496                                         _DRM_BO_FLAG_UNFENCED);
1497                 }
1498         }
1499         mutex_unlock(&dev->struct_mutex);
1500
1501         return 0;
1502 }
1503
1504 int drm_bo_do_validate(struct drm_buffer_object *bo,
1505                        uint64_t flags, uint64_t mask, uint32_t hint,
1506                        uint32_t fence_class,
1507                        int no_wait,
1508                        struct drm_bo_info_rep *rep)
1509 {
1510         int ret;
1511
1512         mutex_lock(&bo->mutex);
1513         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1514
1515         if (ret)
1516                 goto out;
1517
1518         if ((mask & flags & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
1519                 DRM_ERROR
1520                     ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
1521                      "processes\n");
1522                 return -EPERM;
1523         }
1524
1525
1526         DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1527         ret = drm_bo_new_mask(bo, flags, hint);
1528         if (ret)
1529                 goto out;
1530
1531         ret = drm_buffer_object_validate(bo,
1532                                          fence_class,
1533                                          !(hint & DRM_BO_HINT_DONT_FENCE),
1534                                          no_wait);
1535 out:
1536         if (rep)
1537                 drm_bo_fill_rep_arg(bo, rep);
1538
1539         mutex_unlock(&bo->mutex);
1540         return ret;
1541 }
1542 EXPORT_SYMBOL(drm_bo_do_validate);
1543
1544
1545 int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
1546                            uint32_t fence_class,
1547                            uint64_t flags, uint64_t mask, uint32_t hint,
1548                            struct drm_bo_info_rep * rep,
1549                            struct drm_buffer_object **bo_rep)
1550 {
1551         struct drm_device *dev = file_priv->head->dev;
1552         struct drm_buffer_object *bo;
1553         int ret;
1554         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1555
1556         mutex_lock(&dev->struct_mutex);
1557         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1558         mutex_unlock(&dev->struct_mutex);
1559
1560         if (!bo) {
1561                 return -EINVAL;
1562         }
1563
1564         ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
1565                                  no_wait, rep);
1566
1567         if (!ret && bo_rep)
1568                 *bo_rep = bo;
1569         else
1570                 drm_bo_usage_deref_unlocked(&bo);
1571
1572         return ret;
1573 }
1574 EXPORT_SYMBOL(drm_bo_handle_validate);
1575
1576 /**
1577  * Fills out the generic buffer object ioctl reply with the information for
1578  * the BO with id of handle.
1579  */
1580 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1581                               struct drm_bo_info_rep *rep)
1582 {
1583         struct drm_device *dev = file_priv->head->dev;
1584         struct drm_buffer_object *bo;
1585
1586         mutex_lock(&dev->struct_mutex);
1587         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1588         mutex_unlock(&dev->struct_mutex);
1589
1590         if (!bo) {
1591                 return -EINVAL;
1592         }
1593         mutex_lock(&bo->mutex);
1594         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1595                 (void)drm_bo_busy(bo);
1596         drm_bo_fill_rep_arg(bo, rep);
1597         mutex_unlock(&bo->mutex);
1598         drm_bo_usage_deref_unlocked(&bo);
1599         return 0;
1600 }
1601
1602 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1603                               uint32_t hint,
1604                               struct drm_bo_info_rep *rep)
1605 {
1606         struct drm_device *dev = file_priv->head->dev;
1607         struct drm_buffer_object *bo;
1608         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1609         int ret;
1610
1611         mutex_lock(&dev->struct_mutex);
1612         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1613         mutex_unlock(&dev->struct_mutex);
1614
1615         if (!bo) {
1616                 return -EINVAL;
1617         }
1618
1619         mutex_lock(&bo->mutex);
1620         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1621         if (ret)
1622                 goto out;
1623         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1624         if (ret)
1625                 goto out;
1626
1627         drm_bo_fill_rep_arg(bo, rep);
1628
1629       out:
1630         mutex_unlock(&bo->mutex);
1631         drm_bo_usage_deref_unlocked(&bo);
1632         return ret;
1633 }
1634
1635 int drm_buffer_object_create(struct drm_device *dev,
1636                              unsigned long size,
1637                              enum drm_bo_type type,
1638                              uint64_t mask,
1639                              uint32_t hint,
1640                              uint32_t page_alignment,
1641                              unsigned long buffer_start,
1642                              struct drm_buffer_object ** buf_obj)
1643 {
1644         struct drm_buffer_manager *bm = &dev->bm;
1645         struct drm_buffer_object *bo;
1646         struct drm_bo_driver *driver = dev->driver->bo_driver;
1647         int ret = 0;
1648         unsigned long num_pages;
1649
1650         if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1651                 DRM_ERROR("Invalid buffer object start.\n");
1652                 return -EINVAL;
1653         }
1654         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1655         if (num_pages == 0) {
1656                 DRM_ERROR("Illegal buffer object size.\n");
1657                 return -EINVAL;
1658         }
1659
1660         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1661
1662         if (!bo)
1663                 return -ENOMEM;
1664
1665         mutex_init(&bo->mutex);
1666         mutex_lock(&bo->mutex);
1667
1668         atomic_set(&bo->usage, 1);
1669         atomic_set(&bo->mapped, -1);
1670         DRM_INIT_WAITQUEUE(&bo->event_queue);
1671         INIT_LIST_HEAD(&bo->lru);
1672         INIT_LIST_HEAD(&bo->pinned_lru);
1673         INIT_LIST_HEAD(&bo->ddestroy);
1674 #ifdef DRM_ODD_MM_COMPAT
1675         INIT_LIST_HEAD(&bo->p_mm_list);
1676         INIT_LIST_HEAD(&bo->vma_list);
1677 #endif
1678         bo->dev = dev;
1679         bo->type = type;
1680         bo->num_pages = num_pages;
1681         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1682         bo->mem.num_pages = bo->num_pages;
1683         bo->mem.mm_node = NULL;
1684         bo->mem.page_alignment = page_alignment;
1685         if (bo->type == drm_bo_type_fake) {
1686                 bo->offset = buffer_start;
1687                 bo->buffer_start = 0;
1688         } else {
1689                 bo->buffer_start = buffer_start;
1690         }
1691         bo->priv_flags = 0;
1692         bo->mem.flags = 0ULL;
1693         bo->mem.mask = 0ULL;
1694         atomic_inc(&bm->count);
1695         ret = drm_bo_new_mask(bo, mask, hint);
1696
1697         if (ret)
1698                 goto out_err;
1699
1700         if (bo->type == drm_bo_type_dc) {
1701                 mutex_lock(&dev->struct_mutex);
1702                 ret = drm_bo_setup_vm_locked(bo);
1703                 mutex_unlock(&dev->struct_mutex);
1704                 if (ret)
1705                         goto out_err;
1706         }
1707
1708         bo->fence_class = 0;
1709         ret = driver->fence_type(bo, &bo->fence_type);
1710         if (ret) {
1711                 DRM_ERROR("Driver did not support given buffer permissions\n");
1712                 goto out_err;
1713         }
1714
1715         if (bo->type == drm_bo_type_fake) {
1716                 ret = drm_bo_check_fake(dev, &bo->mem);
1717                 if (ret)
1718                         goto out_err;
1719         }
1720
1721         ret = drm_bo_add_ttm(bo);
1722         if (ret)
1723                 goto out_err;
1724
1725         mutex_lock(&dev->struct_mutex);
1726         drm_bo_add_to_lru(bo);
1727         mutex_unlock(&dev->struct_mutex);
1728
1729         mutex_unlock(&bo->mutex);
1730         *buf_obj = bo;
1731         return 0;
1732
1733       out_err:
1734         mutex_unlock(&bo->mutex);
1735
1736         drm_bo_usage_deref_unlocked(&bo);
1737         return ret;
1738 }
1739 EXPORT_SYMBOL(drm_buffer_object_create);
1740
1741 int drm_bo_add_user_object(struct drm_file *file_priv,
1742                            struct drm_buffer_object *bo, int shareable)
1743 {
1744         struct drm_device *dev = file_priv->head->dev;
1745         int ret;
1746
1747         mutex_lock(&dev->struct_mutex);
1748         ret = drm_add_user_object(file_priv, &bo->base, shareable);
1749         if (ret)
1750                 goto out;
1751
1752         bo->base.remove = drm_bo_base_deref_locked;
1753         bo->base.type = drm_buffer_type;
1754         bo->base.ref_struct_locked = NULL;
1755         bo->base.unref = drm_buffer_user_object_unmap;
1756
1757       out:
1758         mutex_unlock(&dev->struct_mutex);
1759         return ret;
1760 }
1761 EXPORT_SYMBOL(drm_bo_add_user_object);
1762
1763 static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv)
1764 {
1765         LOCK_TEST_WITH_RETURN(dev, file_priv);
1766         return 0;
1767 }
1768
1769 int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1770 {
1771         struct drm_bo_op_arg curarg;
1772         struct drm_bo_op_arg *arg = data;
1773         struct drm_bo_op_req *req = &arg->d.req;
1774         struct drm_bo_info_rep rep;
1775         struct drm_buffer_object *dummy;
1776         unsigned long next = 0;
1777         void __user *curuserarg = NULL;
1778         int ret;
1779
1780         DRM_DEBUG("drm_bo_op_ioctl\n");
1781
1782         if (!dev->bm.initialized) {
1783                 DRM_ERROR("Buffer object manager is not initialized.\n");
1784                 return -EINVAL;
1785         }
1786
1787         do {
1788                 if (next != 0) {
1789                         curuserarg = (void __user *)next;
1790                         if (copy_from_user(&curarg, curuserarg,
1791                                            sizeof(curarg)) != 0)
1792                                 return -EFAULT;
1793                         arg = &curarg;
1794                 }
1795
1796                 if (arg->handled) {
1797                         next = arg->next;
1798                         continue;
1799                 }
1800                 req = &arg->d.req;
1801                 ret = 0;
1802                 switch (req->op) {
1803                 case drm_bo_validate:
1804                         ret = drm_bo_lock_test(dev, file_priv);
1805                         if (ret)
1806                                 break;
1807                         ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
1808                                                      req->bo_req.fence_class,
1809                                                      req->bo_req.flags,
1810                                                      req->bo_req.mask,
1811                                                      req->bo_req.hint,
1812                                                      &rep, &dummy);
1813                         break;
1814                 case drm_bo_fence:
1815                         ret = -EINVAL;
1816                         DRM_ERROR("Function is not implemented yet.\n");
1817                         break;
1818                 case drm_bo_ref_fence:
1819                         ret = -EINVAL;
1820                         DRM_ERROR("Function is not implemented yet.\n");
1821                         break;
1822                 default:
1823                         ret = -EINVAL;
1824                 }
1825                 next = arg->next;
1826
1827                 /*
1828                  * A signal interrupted us. Make sure the ioctl is restartable.
1829                  */
1830
1831                 if (ret == -EAGAIN)
1832                         return -EAGAIN;
1833
1834                 arg->handled = 1;
1835                 arg->d.rep.ret = ret;
1836                 arg->d.rep.bo_info = rep;
1837                 if (arg != data) {
1838                         if (copy_to_user(curuserarg, &curarg,
1839                                          sizeof(curarg)) != 0)
1840                                 return -EFAULT;
1841                 }
1842         } while (next != 0);
1843         return 0;
1844 }
1845
1846 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1847 {
1848         struct drm_bo_create_arg *arg = data;
1849         struct drm_bo_create_req *req = &arg->d.req;
1850         struct drm_bo_info_rep *rep = &arg->d.rep;
1851         struct drm_buffer_object *entry;
1852         int ret = 0;
1853
1854         DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align, %d type\n",
1855             (int)(req->size / 1024), req->page_alignment * 4, req->type);
1856
1857         if (!dev->bm.initialized) {
1858                 DRM_ERROR("Buffer object manager is not initialized.\n");
1859                 return -EINVAL;
1860         }
1861         if (req->type == drm_bo_type_fake)
1862                 LOCK_TEST_WITH_RETURN(dev, file_priv);
1863
1864         ret = drm_buffer_object_create(file_priv->head->dev,
1865                                        req->size, req->type, req->mask,
1866                                        req->hint, req->page_alignment,
1867                                        req->buffer_start, &entry);
1868         if (ret)
1869                 goto out;
1870         
1871         ret = drm_bo_add_user_object(file_priv, entry,
1872                                      req->mask & DRM_BO_FLAG_SHAREABLE);
1873         if (ret) {
1874                 drm_bo_usage_deref_unlocked(&entry);
1875                 goto out;
1876         }
1877         
1878         mutex_lock(&entry->mutex);
1879         drm_bo_fill_rep_arg(entry, rep);
1880         mutex_unlock(&entry->mutex);
1881
1882 out:
1883         return ret;
1884 }
1885
1886
1887 int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1888 {
1889         struct drm_bo_handle_arg *arg = data;
1890         struct drm_user_object *uo;
1891         int ret = 0;
1892
1893         DRM_DEBUG("drm_bo_destroy_ioctl: buffer %d\n", arg->handle);
1894
1895         if (!dev->bm.initialized) {
1896                 DRM_ERROR("Buffer object manager is not initialized.\n");
1897                 return -EINVAL;
1898         }
1899
1900         mutex_lock(&dev->struct_mutex);
1901         uo = drm_lookup_user_object(file_priv, arg->handle);
1902         if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) {
1903                 mutex_unlock(&dev->struct_mutex);
1904                 return -EINVAL;
1905         }
1906         ret = drm_remove_user_object(file_priv, uo);
1907         mutex_unlock(&dev->struct_mutex);
1908         
1909         return ret;
1910 }
1911
1912 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1913 {
1914         struct drm_bo_map_wait_idle_arg *arg = data;
1915         struct drm_bo_info_req *req = &arg->d.req;
1916         struct drm_bo_info_rep *rep = &arg->d.rep;
1917         int ret;
1918
1919         DRM_DEBUG("drm_bo_map_ioctl: buffer %d\n", req->handle);
1920
1921         if (!dev->bm.initialized) {
1922                 DRM_ERROR("Buffer object manager is not initialized.\n");
1923                 return -EINVAL;
1924         }
1925
1926         ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1927                                     req->hint, rep);
1928         if (ret)
1929                 return ret;
1930
1931         return 0;
1932 }
1933
1934 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1935 {
1936         struct drm_bo_handle_arg *arg = data;
1937         int ret;
1938
1939         DRM_DEBUG("drm_bo_unmap_ioctl: buffer %d\n", arg->handle);
1940
1941         if (!dev->bm.initialized) {
1942                 DRM_ERROR("Buffer object manager is not initialized.\n");
1943                 return -EINVAL;
1944         }
1945
1946         ret = drm_buffer_object_unmap(file_priv, arg->handle);
1947         return ret;
1948 }
1949
1950
1951 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1952 {
1953         struct drm_bo_reference_info_arg *arg = data;
1954         struct drm_bo_handle_arg *req = &arg->d.req;
1955         struct drm_bo_info_rep *rep = &arg->d.rep;
1956         struct drm_user_object *uo;
1957         int ret;
1958
1959         DRM_DEBUG("drm_bo_reference_ioctl: buffer %d\n", req->handle);
1960
1961         if (!dev->bm.initialized) {
1962                 DRM_ERROR("Buffer object manager is not initialized.\n");
1963                 return -EINVAL;
1964         }
1965
1966         ret = drm_user_object_ref(file_priv, req->handle,
1967                                   drm_buffer_type, &uo);
1968         if (ret)
1969                 return ret;
1970         
1971         ret = drm_bo_handle_info(file_priv, req->handle, rep);
1972         if (ret)
1973                 return ret;
1974
1975         return 0;
1976 }
1977
1978 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1979 {
1980         struct drm_bo_handle_arg *arg = data;
1981         int ret = 0;
1982
1983         DRM_DEBUG("drm_bo_unreference_ioctl: buffer %d\n", arg->handle);
1984
1985         if (!dev->bm.initialized) {
1986                 DRM_ERROR("Buffer object manager is not initialized.\n");
1987                 return -EINVAL;
1988         }
1989
1990         ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
1991         return ret;
1992 }
1993
1994 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1995 {
1996         struct drm_bo_reference_info_arg *arg = data;
1997         struct drm_bo_handle_arg *req = &arg->d.req;
1998         struct drm_bo_info_rep *rep = &arg->d.rep;
1999         int ret;
2000
2001         DRM_DEBUG("drm_bo_info_ioctl: buffer %d\n", req->handle);
2002
2003         if (!dev->bm.initialized) {
2004                 DRM_ERROR("Buffer object manager is not initialized.\n");
2005                 return -EINVAL;
2006         }
2007
2008         ret = drm_bo_handle_info(file_priv, req->handle, rep);
2009         if (ret)
2010                 return ret;
2011
2012         return 0;
2013 }
2014
2015 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2016 {
2017         struct drm_bo_map_wait_idle_arg *arg = data;
2018         struct drm_bo_info_req *req = &arg->d.req;
2019         struct drm_bo_info_rep *rep = &arg->d.rep;
2020         int ret;
2021
2022         DRM_DEBUG("drm_bo_wait_idle_ioctl: buffer %d\n", req->handle);
2023
2024         if (!dev->bm.initialized) {
2025                 DRM_ERROR("Buffer object manager is not initialized.\n");
2026                 return -EINVAL;
2027         }
2028
2029         ret = drm_bo_handle_wait(file_priv, req->handle,
2030                                  req->hint, rep);
2031         if (ret)
2032                 return ret;
2033
2034         return 0;
2035 }
2036
2037 /**
2038  * Pins or unpins the given buffer object in the given memory area.
2039  *
2040  * Pinned buffers will not be evicted from or move within their memory area.
2041  * Must be called with the hardware lock held for pinning.
2042  */
2043 static int
2044 drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo,
2045     int pin)
2046 {
2047         int ret = 0;
2048
2049         mutex_lock(&bo->mutex);
2050         if (bo->pinned == pin) {
2051                 mutex_unlock(&bo->mutex);
2052                 return 0;
2053         }
2054
2055         if (pin) {
2056                 ret = drm_bo_wait_unfenced(bo, 0, 0);
2057                 if (ret) {
2058                         mutex_unlock(&bo->mutex);
2059                         return ret;
2060                 }
2061
2062                 /* Validate the buffer into its pinned location, with no
2063                  * pending fence.
2064                  */
2065                 ret = drm_buffer_object_validate(bo, 0, 0, 0);
2066                 if (ret) {
2067                         mutex_unlock(&bo->mutex);
2068                         return ret;
2069                 }
2070
2071                 /* Pull the buffer off of the LRU and add it to the pinned
2072                  * list
2073                  */
2074                 bo->pinned_mem_type = bo->mem.mem_type;
2075                 mutex_lock(&dev->struct_mutex);
2076                 list_del_init(&bo->lru);
2077                 list_del_init(&bo->pinned_lru);
2078                 drm_bo_add_to_pinned_lru(bo);
2079
2080                 if (bo->pinned_node != bo->mem.mm_node) {
2081                         if (bo->pinned_node != NULL)
2082                                 drm_mm_put_block(bo->pinned_node);
2083                         bo->pinned_node = bo->mem.mm_node;
2084                 }
2085
2086                 bo->pinned = pin;
2087                 mutex_unlock(&dev->struct_mutex);
2088
2089         } else {
2090                 mutex_lock(&dev->struct_mutex);
2091
2092                 /* Remove our buffer from the pinned list */
2093                 if (bo->pinned_node != bo->mem.mm_node)
2094                         drm_mm_put_block(bo->pinned_node);
2095
2096                 list_del_init(&bo->pinned_lru);
2097                 bo->pinned_node = NULL;
2098                 bo->pinned = pin;
2099                 mutex_unlock(&dev->struct_mutex);
2100         }
2101         mutex_unlock(&bo->mutex);
2102         return 0;
2103 }
2104
2105 int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data,
2106                          struct drm_file *file_priv)
2107 {
2108         struct drm_bo_set_pin_arg *arg = data;
2109         struct drm_bo_set_pin_req *req = &arg->d.req;
2110         struct drm_bo_info_rep *rep = &arg->d.rep;
2111         struct drm_buffer_object *bo;
2112         int ret;
2113
2114         DRM_DEBUG("drm_bo_set_pin_ioctl: buffer %d, pin %d\n",
2115             req->handle, req->pin);
2116
2117         if (!dev->bm.initialized) {
2118                 DRM_ERROR("Buffer object manager is not initialized.\n");
2119                 return -EINVAL;
2120         }
2121
2122         if (req->pin < 0 || req->pin > 1) {
2123                 DRM_ERROR("Bad arguments to set_pin\n");
2124                 return -EINVAL;
2125         }
2126
2127         if (req->pin)
2128                 LOCK_TEST_WITH_RETURN(dev, file_priv);
2129
2130         mutex_lock(&dev->struct_mutex);
2131         bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
2132         mutex_unlock(&dev->struct_mutex);
2133         if (!bo) {
2134                 return -EINVAL;
2135         }
2136
2137         ret = drm_bo_set_pin(dev, bo, req->pin);
2138         if (ret) {
2139                 drm_bo_usage_deref_unlocked(&bo);
2140                 return ret;
2141         }
2142
2143         drm_bo_fill_rep_arg(bo, rep);
2144         drm_bo_usage_deref_unlocked(&bo);
2145
2146         return 0;
2147 }
2148
2149
2150 /**
2151  *Clean the unfenced list and put on regular LRU.
2152  *This is part of the memory manager cleanup and should only be
2153  *called with the DRI lock held.
2154  *Call dev->struct_sem locked.
2155  */
2156
2157 static void drm_bo_clean_unfenced(struct drm_device *dev)
2158 {
2159         struct drm_buffer_manager *bm  = &dev->bm;
2160         struct list_head *head, *list;
2161         struct drm_buffer_object *entry;
2162         struct drm_fence_object *fence;
2163
2164         head = &bm->unfenced;
2165
2166         if (list_empty(head))
2167                 return;
2168
2169         DRM_ERROR("Clean unfenced\n");
2170
2171         if (drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence)) {
2172
2173                 /*
2174                  * Fixme: Should really wait here.
2175                  */
2176         }
2177
2178         if (fence)
2179                 drm_fence_usage_deref_locked(&fence);
2180
2181         if (list_empty(head))
2182                 return;
2183
2184         DRM_ERROR("Really clean unfenced\n");
2185
2186         list = head->next;
2187         while(list != head) {
2188                 prefetch(list->next);
2189                 entry = list_entry(list, struct drm_buffer_object, lru);
2190
2191                 atomic_inc(&entry->usage);
2192                 mutex_unlock(&dev->struct_mutex);
2193                 mutex_lock(&entry->mutex);
2194                 mutex_lock(&dev->struct_mutex);
2195
2196                 list_del(&entry->lru);
2197                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
2198                 drm_bo_add_to_lru(entry);
2199                 mutex_unlock(&entry->mutex);
2200                 list = head->next;
2201         }
2202 }
2203
2204 static int drm_bo_leave_list(struct drm_buffer_object * bo,
2205                              uint32_t mem_type,
2206                              int free_pinned, int allow_errors)
2207 {
2208         struct drm_device *dev = bo->dev;
2209         int ret = 0;
2210
2211         mutex_lock(&bo->mutex);
2212
2213         ret = drm_bo_expire_fence(bo, allow_errors);
2214         if (ret)
2215                 goto out;
2216
2217         if (free_pinned) {
2218                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2219                 mutex_lock(&dev->struct_mutex);
2220                 list_del_init(&bo->pinned_lru);
2221                 if (bo->pinned_node == bo->mem.mm_node)
2222                         bo->pinned_node = NULL;
2223                 if (bo->pinned_node != NULL) {
2224                         drm_mm_put_block(bo->pinned_node);
2225                         bo->pinned_node = NULL;
2226                 }
2227                 mutex_unlock(&dev->struct_mutex);
2228         }
2229
2230         if (bo->pinned) {
2231                 DRM_ERROR("A pinned buffer was present at "
2232                           "cleanup. Removing flag and evicting.\n");
2233                 bo->pinned = 0;
2234         }
2235
2236         if (bo->mem.mem_type == mem_type)
2237                 ret = drm_bo_evict(bo, mem_type, 0);
2238
2239         if (ret) {
2240                 if (allow_errors) {
2241                         goto out;
2242                 } else {
2243                         ret = 0;
2244                         DRM_ERROR("Cleanup eviction failed\n");
2245                 }
2246         }
2247
2248       out:
2249         mutex_unlock(&bo->mutex);
2250         return ret;
2251 }
2252
2253
2254 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2255                                          int pinned_list)
2256 {
2257         if (pinned_list)
2258                 return list_entry(list, struct drm_buffer_object, pinned_lru);
2259         else
2260                 return list_entry(list, struct drm_buffer_object, lru);
2261 }
2262
2263 /*
2264  * dev->struct_mutex locked.
2265  */
2266
2267 static int drm_bo_force_list_clean(struct drm_device * dev,
2268                                    struct list_head *head,
2269                                    unsigned mem_type,
2270                                    int free_pinned,
2271                                    int allow_errors,
2272                                    int pinned_list)
2273 {
2274         struct list_head *list, *next, *prev;
2275         struct drm_buffer_object *entry, *nentry;
2276         int ret;
2277         int do_restart;
2278
2279         /*
2280          * The list traversal is a bit odd here, because an item may
2281          * disappear from the list when we release the struct_mutex or
2282          * when we decrease the usage count. Also we're not guaranteed
2283          * to drain pinned lists, so we can't always restart.
2284          */
2285
2286 restart:
2287         nentry = NULL;
2288         list_for_each_safe(list, next, head) {
2289                 prev = list->prev;
2290
2291                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2292                 atomic_inc(&entry->usage);
2293                 if (nentry) {
2294                         atomic_dec(&nentry->usage);
2295                         nentry = NULL;
2296                 }
2297
2298                 /*
2299                  * Protect the next item from destruction, so we can check
2300                  * its list pointers later on.
2301                  */
2302
2303                 if (next != head) {
2304                         nentry = drm_bo_entry(next, pinned_list);
2305                         atomic_inc(&nentry->usage);
2306                 }
2307                 mutex_unlock(&dev->struct_mutex);
2308
2309                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2310                                         allow_errors);
2311                 mutex_lock(&dev->struct_mutex);
2312
2313                 drm_bo_usage_deref_locked(&entry);
2314                 if (ret)
2315                         return ret;
2316
2317                 /*
2318                  * Has the next item disappeared from the list?
2319                  */
2320
2321                 do_restart = ((next->prev != list) && (next->prev != prev));
2322
2323                 if (nentry != NULL && do_restart)
2324                         drm_bo_usage_deref_locked(&nentry);
2325
2326                 if (do_restart)
2327                         goto restart;
2328         }
2329         return 0;
2330 }
2331
2332 int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
2333 {
2334         struct drm_buffer_manager *bm = &dev->bm;
2335         struct drm_mem_type_manager *man = &bm->man[mem_type];
2336         int ret = -EINVAL;
2337
2338         if (mem_type >= DRM_BO_MEM_TYPES) {
2339                 DRM_ERROR("Illegal memory type %d\n", mem_type);
2340                 return ret;
2341         }
2342
2343         if (!man->has_type) {
2344                 DRM_ERROR("Trying to take down uninitialized "
2345                           "memory manager type %u\n", mem_type);
2346                 return ret;
2347         }
2348         man->use_type = 0;
2349         man->has_type = 0;
2350
2351         ret = 0;
2352         if (mem_type > 0) {
2353
2354                 drm_bo_clean_unfenced(dev);
2355                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2356                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2357
2358                 if (drm_mm_clean(&man->manager)) {
2359                         drm_mm_takedown(&man->manager);
2360                 } else {
2361                         ret = -EBUSY;
2362                 }
2363         }
2364
2365         return ret;
2366 }
2367 EXPORT_SYMBOL(drm_bo_clean_mm);
2368
2369 /**
2370  *Evict all buffers of a particular mem_type, but leave memory manager
2371  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2372  *point since we have the hardware lock.
2373  */
2374
2375 static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
2376 {
2377         int ret;
2378         struct drm_buffer_manager *bm = &dev->bm;
2379         struct drm_mem_type_manager *man = &bm->man[mem_type];
2380
2381         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2382                 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2383                 return -EINVAL;
2384         }
2385
2386         if (!man->has_type) {
2387                 DRM_ERROR("Memory type %u has not been initialized.\n",
2388                           mem_type);
2389                 return 0;
2390         }
2391
2392         drm_bo_clean_unfenced(dev);
2393         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2394         if (ret)
2395                 return ret;
2396         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2397
2398         return ret;
2399 }
2400
2401 int drm_bo_init_mm(struct drm_device * dev,
2402                    unsigned type,
2403                    unsigned long p_offset, unsigned long p_size)
2404 {
2405         struct drm_buffer_manager *bm = &dev->bm;
2406         int ret = -EINVAL;
2407         struct drm_mem_type_manager *man;
2408
2409         if (type >= DRM_BO_MEM_TYPES) {
2410                 DRM_ERROR("Illegal memory type %d\n", type);
2411                 return ret;
2412         }
2413
2414         man = &bm->man[type];
2415         if (man->has_type) {
2416                 DRM_ERROR("Memory manager already initialized for type %d\n",
2417                           type);
2418                 return ret;
2419         }
2420
2421         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2422         if (ret)
2423                 return ret;
2424
2425         ret = 0;
2426         if (type != DRM_BO_MEM_LOCAL) {
2427                 if (!p_size) {
2428                         DRM_ERROR("Zero size memory manager type %d\n", type);
2429                         return ret;
2430                 }
2431                 ret = drm_mm_init(&man->manager, p_offset, p_size);
2432                 if (ret)
2433                         return ret;
2434         }
2435         man->has_type = 1;
2436         man->use_type = 1;
2437
2438         INIT_LIST_HEAD(&man->lru);
2439         INIT_LIST_HEAD(&man->pinned);
2440
2441         return 0;
2442 }
2443 EXPORT_SYMBOL(drm_bo_init_mm);
2444
2445 /*
2446  * This is called from lastclose, so we don't need to bother about
2447  * any clients still running when we set the initialized flag to zero.
2448  */
2449
2450 int drm_bo_driver_finish(struct drm_device * dev)
2451 {
2452         struct drm_buffer_manager *bm = &dev->bm;
2453         int ret = 0;
2454         unsigned i = DRM_BO_MEM_TYPES;
2455         struct drm_mem_type_manager *man;
2456
2457         mutex_lock(&dev->bm.init_mutex);
2458         mutex_lock(&dev->struct_mutex);
2459
2460         if (!bm->initialized)
2461                 goto out;
2462         bm->initialized = 0;
2463
2464         while (i--) {
2465                 man = &bm->man[i];
2466                 if (man->has_type) {
2467                         man->use_type = 0;
2468                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2469                                 ret = -EBUSY;
2470                                 DRM_ERROR("DRM memory manager type %d "
2471                                           "is not clean.\n", i);
2472                         }
2473                         man->has_type = 0;
2474                 }
2475         }
2476         mutex_unlock(&dev->struct_mutex);
2477
2478         if (!cancel_delayed_work(&bm->wq)) {
2479                 flush_scheduled_work();
2480         }
2481         mutex_lock(&dev->struct_mutex);
2482         drm_bo_delayed_delete(dev, 1);
2483         if (list_empty(&bm->ddestroy)) {
2484                 DRM_DEBUG("Delayed destroy list was clean\n");
2485         }
2486         if (list_empty(&bm->man[0].lru)) {
2487                 DRM_DEBUG("Swap list was clean\n");
2488         }
2489         if (list_empty(&bm->man[0].pinned)) {
2490                 DRM_DEBUG("NO_MOVE list was clean\n");
2491         }
2492         if (list_empty(&bm->unfenced)) {
2493                 DRM_DEBUG("Unfenced list was clean\n");
2494         }
2495       out:
2496         mutex_unlock(&dev->struct_mutex);
2497         mutex_unlock(&dev->bm.init_mutex);
2498         return ret;
2499 }
2500
2501 int drm_bo_driver_init(struct drm_device * dev)
2502 {
2503         struct drm_bo_driver *driver = dev->driver->bo_driver;
2504         struct drm_buffer_manager *bm = &dev->bm;
2505         int ret = -EINVAL;
2506
2507         mutex_lock(&dev->bm.init_mutex);
2508         mutex_lock(&dev->struct_mutex);
2509         if (!driver)
2510                 goto out_unlock;
2511
2512         /*
2513          * Initialize the system memory buffer type.
2514          * Other types need to be driver / IOCTL initialized.
2515          */
2516         ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
2517         if (ret)
2518                 goto out_unlock;
2519
2520 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2521         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2522 #else
2523         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2524 #endif
2525         bm->initialized = 1;
2526         bm->nice_mode = 1;
2527         atomic_set(&bm->count, 0);
2528         bm->cur_pages = 0;
2529         INIT_LIST_HEAD(&bm->unfenced);
2530         INIT_LIST_HEAD(&bm->ddestroy);
2531       out_unlock:
2532         mutex_unlock(&dev->struct_mutex);
2533         mutex_unlock(&dev->bm.init_mutex);
2534         return ret;
2535 }
2536
2537 EXPORT_SYMBOL(drm_bo_driver_init);
2538
2539 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2540 {
2541         struct drm_mm_init_arg *arg = data;
2542         struct drm_buffer_manager *bm = &dev->bm;
2543         struct drm_bo_driver *driver = dev->driver->bo_driver;
2544         int ret;
2545
2546         DRM_DEBUG("drm_mm_init_ioctl: type %d, 0x%08llx offset, %dkb\n",
2547             arg->mem_type, arg->p_offset * PAGE_SIZE, (int)(arg->p_size * 4));
2548
2549         if (!driver) {
2550                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2551                 return -EINVAL;
2552         }
2553
2554         ret = -EINVAL;
2555         if (arg->magic != DRM_BO_INIT_MAGIC) {
2556                 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2557                           "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2558                 return -EINVAL;
2559         }
2560         if (arg->major != DRM_BO_INIT_MAJOR) {
2561                 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2562                           "\tversion don't match. Got %d, expected %d,\n",
2563                           arg->major, DRM_BO_INIT_MAJOR);
2564                 return -EINVAL;
2565         }
2566         if (arg->minor > DRM_BO_INIT_MINOR) {
2567                 DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n"
2568                           "\tlibdrm buffer object interface version is %d.%d.\n"
2569                           "\tkernel DRM buffer object interface version is %d.%d\n",
2570                           arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR);
2571                 return -EINVAL;
2572         }
2573
2574         mutex_lock(&dev->bm.init_mutex);
2575         mutex_lock(&dev->struct_mutex);
2576         if (!bm->initialized) {
2577                 DRM_ERROR("DRM memory manager was not initialized.\n");
2578                 goto out;
2579         }
2580         if (arg->mem_type == 0) {
2581                 DRM_ERROR("System memory buffers already initialized.\n");
2582                 goto out;
2583         }
2584         ret = drm_bo_init_mm(dev, arg->mem_type,
2585                              arg->p_offset, arg->p_size);
2586
2587 out:
2588         mutex_unlock(&dev->struct_mutex);
2589         mutex_unlock(&dev->bm.init_mutex);
2590         if (ret)
2591                 return ret;
2592
2593         return 0;
2594 }
2595
2596 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2597 {
2598         struct drm_mm_type_arg *arg = data;
2599         struct drm_buffer_manager *bm = &dev->bm;
2600         struct drm_bo_driver *driver = dev->driver->bo_driver;
2601         int ret;
2602
2603         DRM_DEBUG("drm_mm_takedown_ioctl: %d type\n", arg->mem_type);
2604
2605         if (!driver) {
2606                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2607                 return -EINVAL;
2608         }
2609
2610         LOCK_TEST_WITH_RETURN(dev, file_priv);
2611         mutex_lock(&dev->bm.init_mutex);
2612         mutex_lock(&dev->struct_mutex);
2613         ret = -EINVAL;
2614         if (!bm->initialized) {
2615                 DRM_ERROR("DRM memory manager was not initialized\n");
2616                 goto out;
2617         }
2618         if (arg->mem_type == 0) {
2619                 DRM_ERROR("No takedown for System memory buffers.\n");
2620                 goto out;
2621         }
2622         ret = 0;
2623         if (drm_bo_clean_mm(dev, arg->mem_type)) {
2624                 DRM_ERROR("Memory manager type %d not clean. "
2625                           "Delaying takedown\n", arg->mem_type);
2626         }
2627 out:
2628         mutex_unlock(&dev->struct_mutex);
2629         mutex_unlock(&dev->bm.init_mutex);
2630         if (ret)
2631                 return ret;
2632
2633         return 0;
2634 }
2635
2636 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2637 {
2638         struct drm_mm_type_arg *arg = data;
2639         struct drm_bo_driver *driver = dev->driver->bo_driver;
2640         int ret;
2641
2642         DRM_DEBUG("drm_mm_lock_ioctl: %d type\n", arg->mem_type);
2643
2644         if (!driver) {
2645                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2646                 return -EINVAL;
2647         }
2648
2649         LOCK_TEST_WITH_RETURN(dev, file_priv);
2650         mutex_lock(&dev->bm.init_mutex);
2651         mutex_lock(&dev->struct_mutex);
2652         ret = drm_bo_lock_mm(dev, arg->mem_type);
2653         mutex_unlock(&dev->struct_mutex);
2654         mutex_unlock(&dev->bm.init_mutex);
2655         if (ret)
2656                 return ret;
2657
2658         return 0;
2659 }
2660
2661 int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2662 {
2663         struct drm_bo_driver *driver = dev->driver->bo_driver;
2664         int ret;
2665
2666         DRM_DEBUG("drm_mm_unlock_ioctl\n");
2667
2668         if (!driver) {
2669                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2670                 return -EINVAL;
2671         }
2672
2673         LOCK_TEST_WITH_RETURN(dev, file_priv);
2674         mutex_lock(&dev->bm.init_mutex);
2675         mutex_lock(&dev->struct_mutex);
2676         ret = 0;
2677
2678         mutex_unlock(&dev->struct_mutex);
2679         mutex_unlock(&dev->bm.init_mutex);
2680         if (ret)
2681                 return ret;
2682
2683         return 0;
2684 }
2685
2686 /*
2687  * buffer object vm functions.
2688  */
2689
2690 int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
2691 {
2692         struct drm_buffer_manager *bm = &dev->bm;
2693         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2694
2695         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2696                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2697                         return 0;
2698
2699                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2700                         return 0;
2701
2702                 if (mem->flags & DRM_BO_FLAG_CACHED)
2703                         return 0;
2704         }
2705         return 1;
2706 }
2707
2708 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2709
2710 /**
2711  * \c Get the PCI offset for the buffer object memory.
2712  *
2713  * \param bo The buffer object.
2714  * \param bus_base On return the base of the PCI region
2715  * \param bus_offset On return the byte offset into the PCI region
2716  * \param bus_size On return the byte size of the buffer object or zero if
2717  *     the buffer object memory is not accessible through a PCI region.
2718  * \return Failure indication.
2719  *
2720  * Returns -EINVAL if the buffer object is currently not mappable.
2721  * Otherwise returns zero.
2722  */
2723
2724 int drm_bo_pci_offset(struct drm_device *dev,
2725                       struct drm_bo_mem_reg *mem,
2726                       unsigned long *bus_base,
2727                       unsigned long *bus_offset, unsigned long *bus_size)
2728 {
2729         struct drm_buffer_manager *bm = &dev->bm;
2730         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2731
2732         *bus_size = 0;
2733         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2734                 return -EINVAL;
2735
2736         if (drm_mem_reg_is_pci(dev, mem)) {
2737                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2738                 *bus_size = mem->num_pages << PAGE_SHIFT;
2739                 *bus_base = man->io_offset;
2740         }
2741
2742         return 0;
2743 }
2744
2745 /**
2746  * \c Kill all user-space virtual mappings of this buffer object.
2747  *
2748  * \param bo The buffer object.
2749  *
2750  * Call bo->mutex locked.
2751  */
2752
2753 void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
2754 {
2755         struct drm_device *dev = bo->dev;
2756         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2757         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2758
2759         if (!dev->dev_mapping)
2760                 return;
2761
2762         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2763 }
2764
2765 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
2766 {
2767         struct drm_map_list *list = &bo->map_list;
2768         drm_local_map_t *map;
2769         struct drm_device *dev = bo->dev;
2770
2771         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2772         if (list->user_token) {
2773                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2774                 list->user_token = 0;
2775         }
2776         if (list->file_offset_node) {
2777                 drm_mm_put_block(list->file_offset_node);
2778                 list->file_offset_node = NULL;
2779         }
2780
2781         map = list->map;
2782         if (!map)
2783                 return;
2784
2785         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2786         list->map = NULL;
2787         list->user_token = 0ULL;
2788         drm_bo_usage_deref_locked(&bo);
2789 }
2790
2791 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
2792 {
2793         struct drm_map_list *list = &bo->map_list;
2794         drm_local_map_t *map;
2795         struct drm_device *dev = bo->dev;
2796
2797         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2798         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2799         if (!list->map)
2800                 return -ENOMEM;
2801
2802         map = list->map;
2803         map->offset = 0;
2804         map->type = _DRM_TTM;
2805         map->flags = _DRM_REMOVABLE;
2806         map->size = bo->mem.num_pages * PAGE_SIZE;
2807         atomic_inc(&bo->usage);
2808         map->handle = (void *)bo;
2809
2810         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2811                                                     bo->mem.num_pages, 0, 0);
2812
2813         if (!list->file_offset_node) {
2814                 drm_bo_takedown_vm_locked(bo);
2815                 return -ENOMEM;
2816         }
2817
2818         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2819                                                   bo->mem.num_pages, 0);
2820
2821         list->hash.key = list->file_offset_node->start;
2822         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2823                 drm_bo_takedown_vm_locked(bo);
2824                 return -ENOMEM;
2825         }
2826
2827         list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2828
2829         return 0;
2830 }