Fix drm_bo.c compiling.
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
40  * heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
44  * both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
47  * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
48  * traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(struct drm_buffer_object * bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object * bo);
56
57 static inline uint32_t drm_bo_type_flags(unsigned type)
58 {
59         return (1 << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
67 {
68         struct drm_mem_type_manager *man;
69
70         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71         DRM_ASSERT_LOCKED(&bo->mutex);
72
73         man = &bo->dev->bm.man[bo->pinned_mem_type];
74         list_add_tail(&bo->pinned_lru, &man->pinned);
75 }
76
77 void drm_bo_add_to_lru(struct drm_buffer_object * bo)
78 {
79         struct drm_mem_type_manager *man;
80
81         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
82
83         if (!bo->pinned || bo->mem.mem_type != bo->pinned_mem_type) {
84                 man = &bo->dev->bm.man[bo->mem.mem_type];
85                 list_add_tail(&bo->lru, &man->lru);
86         } else {
87                 INIT_LIST_HEAD(&bo->lru);
88         }
89 }
90
91 static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
92 {
93 #ifdef DRM_ODD_MM_COMPAT
94         int ret;
95
96         if (!bo->map_list.map)
97                 return 0;
98
99         ret = drm_bo_lock_kmm(bo);
100         if (ret)
101                 return ret;
102         drm_bo_unmap_virtual(bo);
103         if (old_is_pci)
104                 drm_bo_finish_unmap(bo);
105 #else
106         if (!bo->map_list.map)
107                 return 0;
108
109         drm_bo_unmap_virtual(bo);
110 #endif
111         return 0;
112 }
113
114 static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
115 {
116 #ifdef DRM_ODD_MM_COMPAT
117         int ret;
118
119         if (!bo->map_list.map)
120                 return;
121
122         ret = drm_bo_remap_bound(bo);
123         if (ret) {
124                 DRM_ERROR("Failed to remap a bound buffer object.\n"
125                           "\tThis might cause a sigbus later.\n");
126         }
127         drm_bo_unlock_kmm(bo);
128 #endif
129 }
130
131 /*
132  * Call bo->mutex locked.
133  */
134
135 static int drm_bo_add_ttm(struct drm_buffer_object * bo)
136 {
137         struct drm_device *dev = bo->dev;
138         int ret = 0;
139         bo->ttm = NULL;
140
141         DRM_ASSERT_LOCKED(&bo->mutex);
142
143         switch (bo->type) {
144         case drm_bo_type_dc:
145         case drm_bo_type_kernel:
146                 bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
147                 if (!bo->ttm)
148                         ret = -ENOMEM;
149                 break;
150         case drm_bo_type_user:
151         case drm_bo_type_fake:
152                 break;
153         default:
154                 DRM_ERROR("Illegal buffer object type\n");
155                 ret = -EINVAL;
156                 break;
157         }
158
159         return ret;
160 }
161
162 static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
163                                   struct drm_bo_mem_reg * mem,
164                                   int evict, int no_wait)
165 {
166         struct drm_device *dev = bo->dev;
167         struct drm_buffer_manager *bm = &dev->bm;
168         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
169         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
170         struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
171         struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
172         int ret = 0;
173
174         if (old_is_pci || new_is_pci ||
175             ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
176                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
177         if (ret)
178                 return ret;
179
180         /*
181          * Create and bind a ttm if required.
182          */
183
184         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
185                 ret = drm_bo_add_ttm(bo);
186                 if (ret)
187                         goto out_err;
188
189                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
190                         ret = drm_bind_ttm(bo->ttm, mem);
191                         if (ret)
192                                 goto out_err;
193                 }
194         }
195
196         if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
197
198                 struct drm_bo_mem_reg *old_mem = &bo->mem;
199                 uint64_t save_flags = old_mem->flags;
200                 uint64_t save_mask = old_mem->mask;
201
202                 *old_mem = *mem;
203                 mem->mm_node = NULL;
204                 old_mem->mask = save_mask;
205                 DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
206
207         } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
208                    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
209
210                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
211
212         } else if (dev->driver->bo_driver->move) {
213                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
214
215         } else {
216
217                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
218
219         }
220
221         if (ret)
222                 goto out_err;
223
224         if (old_is_pci || new_is_pci)
225                 drm_bo_vm_post_move(bo);
226
227         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
228                 ret =
229                     dev->driver->bo_driver->invalidate_caches(dev,
230                                                               bo->mem.flags);
231                 if (ret)
232                         DRM_ERROR("Can not flush read caches\n");
233         }
234
235         DRM_FLAG_MASKED(bo->priv_flags,
236                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
237                         _DRM_BO_FLAG_EVICTED);
238
239         if (bo->mem.mm_node)
240                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
241                         bm->man[bo->mem.mem_type].gpu_offset;
242
243
244         return 0;
245
246       out_err:
247         if (old_is_pci || new_is_pci)
248                 drm_bo_vm_post_move(bo);
249
250         new_man = &bm->man[bo->mem.mem_type];
251         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
252                 drm_ttm_unbind(bo->ttm);
253                 drm_destroy_ttm(bo->ttm);
254                 bo->ttm = NULL;
255         }
256
257         return ret;
258 }
259
260 /*
261  * Call bo->mutex locked.
262  * Wait until the buffer is idle.
263  */
264
265 int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
266                 int no_wait)
267 {
268         int ret;
269
270         DRM_ASSERT_LOCKED(&bo->mutex);
271
272         if (bo->fence) {
273                 if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
274                         drm_fence_usage_deref_unlocked(&bo->fence);
275                         return 0;
276                 }
277                 if (no_wait) {
278                         return -EBUSY;
279                 }
280                 ret =
281                     drm_fence_object_wait(bo->fence, lazy, ignore_signals,
282                                           bo->fence_type);
283                 if (ret)
284                         return ret;
285
286                 drm_fence_usage_deref_unlocked(&bo->fence);
287         }
288         return 0;
289 }
290 EXPORT_SYMBOL(drm_bo_wait);
291
292 static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
293 {
294         struct drm_device *dev = bo->dev;
295         struct drm_buffer_manager *bm = &dev->bm;
296
297         if (bo->fence) {
298                 if (bm->nice_mode) {
299                         unsigned long _end = jiffies + 3 * DRM_HZ;
300                         int ret;
301                         do {
302                                 ret = drm_bo_wait(bo, 0, 1, 0);
303                                 if (ret && allow_errors)
304                                         return ret;
305
306                         } while (ret && !time_after_eq(jiffies, _end));
307
308                         if (bo->fence) {
309                                 bm->nice_mode = 0;
310                                 DRM_ERROR("Detected GPU lockup or "
311                                           "fence driver was taken down. "
312                                           "Evicting buffer.\n");
313                         }
314                 }
315                 if (bo->fence)
316                         drm_fence_usage_deref_unlocked(&bo->fence);
317         }
318         return 0;
319 }
320
321 /*
322  * Call dev->struct_mutex locked.
323  * Attempts to remove all private references to a buffer by expiring its
324  * fence object and removing from lru lists and memory managers.
325  */
326
327 static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
328 {
329         struct drm_device *dev = bo->dev;
330         struct drm_buffer_manager *bm = &dev->bm;
331
332         DRM_ASSERT_LOCKED(&dev->struct_mutex);
333
334         atomic_inc(&bo->usage);
335         mutex_unlock(&dev->struct_mutex);
336         mutex_lock(&bo->mutex);
337
338         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
339
340         if (bo->fence && drm_fence_object_signaled(bo->fence,
341                                                    bo->fence_type, 0))
342                 drm_fence_usage_deref_unlocked(&bo->fence);
343
344         if (bo->fence && remove_all)
345                 (void)drm_bo_expire_fence(bo, 0);
346
347         mutex_lock(&dev->struct_mutex);
348
349         if (!atomic_dec_and_test(&bo->usage)) {
350                 goto out;
351         }
352
353         if (!bo->fence) {
354                 list_del_init(&bo->lru);
355                 if (bo->mem.mm_node) {
356                         drm_mm_put_block(bo->mem.mm_node);
357                         if (bo->pinned_node == bo->mem.mm_node)
358                                 bo->pinned_node = NULL;
359                         bo->mem.mm_node = NULL;
360                 }
361                 list_del_init(&bo->pinned_lru);
362                 if (bo->pinned_node) {
363                         drm_mm_put_block(bo->pinned_node);
364                         bo->pinned_node = NULL;
365                 }
366                 list_del_init(&bo->ddestroy);
367                 mutex_unlock(&bo->mutex);
368                 drm_bo_destroy_locked(bo);
369                 return;
370         }
371
372         if (list_empty(&bo->ddestroy)) {
373                 drm_fence_object_flush(bo->fence, bo->fence_type);
374                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
375                 schedule_delayed_work(&bm->wq,
376                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
377         }
378
379       out:
380         mutex_unlock(&bo->mutex);
381         return;
382 }
383
384 /*
385  * Verify that refcount is 0 and that there are no internal references
386  * to the buffer object. Then destroy it.
387  */
388
389 static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
390 {
391         struct drm_device *dev = bo->dev;
392         struct drm_buffer_manager *bm = &dev->bm;
393
394         DRM_ASSERT_LOCKED(&dev->struct_mutex);
395
396         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
397             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
398             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
399                 if (bo->fence != NULL) {
400                         DRM_ERROR("Fence was non-zero.\n");
401                         drm_bo_cleanup_refs(bo, 0);
402                         return;
403                 }
404
405 #ifdef DRM_ODD_MM_COMPAT
406                 BUG_ON(!list_empty(&bo->vma_list));
407                 BUG_ON(!list_empty(&bo->p_mm_list));
408 #endif
409
410                 if (bo->ttm) {
411                         drm_ttm_unbind(bo->ttm);
412                         drm_destroy_ttm(bo->ttm);
413                         bo->ttm = NULL;
414                 }
415
416                 atomic_dec(&bm->count);
417
418                 //              BUG_ON(!list_empty(&bo->base.list));
419                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
420
421                 return;
422         }
423
424         /*
425          * Some stuff is still trying to reference the buffer object.
426          * Get rid of those references.
427          */
428
429         drm_bo_cleanup_refs(bo, 0);
430
431         return;
432 }
433
434 /*
435  * Call dev->struct_mutex locked.
436  */
437
438 static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
439 {
440         struct drm_buffer_manager *bm = &dev->bm;
441
442         struct drm_buffer_object *entry, *nentry;
443         struct list_head *list, *next;
444
445         list_for_each_safe(list, next, &bm->ddestroy) {
446                 entry = list_entry(list, struct drm_buffer_object, ddestroy);
447
448                 nentry = NULL;
449                 if (next != &bm->ddestroy) {
450                         nentry = list_entry(next, struct drm_buffer_object,
451                                             ddestroy);
452                         atomic_inc(&nentry->usage);
453                 }
454
455                 drm_bo_cleanup_refs(entry, remove_all);
456
457                 if (nentry) {
458                         atomic_dec(&nentry->usage);
459                 }
460         }
461 }
462
463 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
464 static void drm_bo_delayed_workqueue(void *data)
465 #else
466 static void drm_bo_delayed_workqueue(struct work_struct *work)
467 #endif
468 {
469 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
470         struct drm_device *dev = (struct drm_device *) data;
471         struct drm_buffer_manager *bm = &dev->bm;
472 #else
473         struct drm_buffer_manager *bm =
474             container_of(work, struct drm_buffer_manager, wq.work);
475         struct drm_device *dev = container_of(bm, struct drm_device, bm);
476 #endif
477
478         DRM_DEBUG("Delayed delete Worker\n");
479
480         mutex_lock(&dev->struct_mutex);
481         if (!bm->initialized) {
482                 mutex_unlock(&dev->struct_mutex);
483                 return;
484         }
485         drm_bo_delayed_delete(dev, 0);
486         if (bm->initialized && !list_empty(&bm->ddestroy)) {
487                 schedule_delayed_work(&bm->wq,
488                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
489         }
490         mutex_unlock(&dev->struct_mutex);
491 }
492
493 void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
494 {
495         struct drm_buffer_object *tmp_bo = *bo;
496         bo = NULL;
497
498         DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
499
500         if (atomic_dec_and_test(&tmp_bo->usage)) {
501                 drm_bo_destroy_locked(tmp_bo);
502         }
503 }
504 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
505
506 static void drm_bo_base_deref_locked(struct drm_file * file_priv,
507                                      struct drm_user_object * uo)
508 {
509         struct drm_buffer_object *bo =
510             drm_user_object_entry(uo, struct drm_buffer_object, base);
511
512         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
513
514         drm_bo_takedown_vm_locked(bo);
515         drm_bo_usage_deref_locked(&bo);
516 }
517
518 void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
519 {
520         struct drm_buffer_object *tmp_bo = *bo;
521         struct drm_device *dev = tmp_bo->dev;
522
523         *bo = NULL;
524         if (atomic_dec_and_test(&tmp_bo->usage)) {
525                 mutex_lock(&dev->struct_mutex);
526                 if (atomic_read(&tmp_bo->usage) == 0)
527                         drm_bo_destroy_locked(tmp_bo);
528                 mutex_unlock(&dev->struct_mutex);
529         }
530 }
531 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
532
533 void drm_putback_buffer_objects(struct drm_device *dev)
534 {
535         struct drm_buffer_manager *bm = &dev->bm;
536         struct list_head *list = &bm->unfenced;
537         struct drm_buffer_object *entry, *next;
538
539         mutex_lock(&dev->struct_mutex);
540         list_for_each_entry_safe(entry, next, list, lru) {
541                 atomic_inc(&entry->usage);
542                 mutex_unlock(&dev->struct_mutex);
543
544                 mutex_lock(&entry->mutex);
545                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
546                 mutex_lock(&dev->struct_mutex);
547
548                 list_del_init(&entry->lru);
549                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
550                 DRM_WAKEUP(&entry->event_queue);
551
552                 /*
553                  * FIXME: Might want to put back on head of list
554                  * instead of tail here.
555                  */
556
557                 drm_bo_add_to_lru(entry);
558                 mutex_unlock(&entry->mutex);
559                 drm_bo_usage_deref_locked(&entry);
560         }
561         mutex_unlock(&dev->struct_mutex);
562 }
563 EXPORT_SYMBOL(drm_putback_buffer_objects);
564
565
566 /*
567  * Note. The caller has to register (if applicable)
568  * and deregister fence object usage.
569  */
570
571 int drm_fence_buffer_objects(struct drm_device *dev,
572                              struct list_head *list,
573                              uint32_t fence_flags,
574                              struct drm_fence_object * fence,
575                              struct drm_fence_object ** used_fence)
576 {
577         struct drm_buffer_manager *bm = &dev->bm;
578         struct drm_buffer_object *entry;
579         uint32_t fence_type = 0;
580         uint32_t fence_class = ~0;
581         int count = 0;
582         int ret = 0;
583         struct list_head *l;
584
585         mutex_lock(&dev->struct_mutex);
586
587         if (!list)
588                 list = &bm->unfenced;
589
590         if (fence)
591                 fence_class = fence->class;
592
593         list_for_each_entry(entry, list, lru) {
594                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
595                 fence_type |= entry->new_fence_type;
596                 if (fence_class == ~0)
597                         fence_class = entry->new_fence_class;
598                 else if (entry->new_fence_class != fence_class) {
599                         DRM_ERROR("Unmatching fence classes on unfenced list: "
600                                   "%d and %d.\n",
601                                   fence_class,
602                                   entry->new_fence_class);
603                         ret = -EINVAL;
604                         goto out;
605                 }
606                 count++;
607         }
608
609         if (!count) {
610                 ret = -EINVAL;
611                 goto out;
612         }
613
614         if (fence) {
615                 if ((fence_type & fence->type) != fence_type) {
616                         DRM_ERROR("Given fence doesn't match buffers "
617                                   "on unfenced list.\n");
618                         ret = -EINVAL;
619                         goto out;
620                 }
621         } else {
622                 mutex_unlock(&dev->struct_mutex);
623                 ret = drm_fence_object_create(dev, fence_class, fence_type,
624                                               fence_flags | DRM_FENCE_FLAG_EMIT,
625                                               &fence);
626                 mutex_lock(&dev->struct_mutex);
627                 if (ret)
628                         goto out;
629         }
630
631         count = 0;
632         l = list->next;
633         while (l != list) {
634                 prefetch(l->next);
635                 entry = list_entry(l, struct drm_buffer_object, lru);
636                 atomic_inc(&entry->usage);
637                 mutex_unlock(&dev->struct_mutex);
638                 mutex_lock(&entry->mutex);
639                 mutex_lock(&dev->struct_mutex);
640                 list_del_init(l);
641                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
642                         count++;
643                         if (entry->fence)
644                                 drm_fence_usage_deref_locked(&entry->fence);
645                         entry->fence = drm_fence_reference_locked(fence);
646                         entry->fence_class = entry->new_fence_class;
647                         entry->fence_type = entry->new_fence_type;
648                         DRM_FLAG_MASKED(entry->priv_flags, 0,
649                                         _DRM_BO_FLAG_UNFENCED);
650                         DRM_WAKEUP(&entry->event_queue);
651                         drm_bo_add_to_lru(entry);
652                 }
653                 mutex_unlock(&entry->mutex);
654                 drm_bo_usage_deref_locked(&entry);
655                 l = list->next;
656         }
657         DRM_DEBUG("Fenced %d buffers\n", count);
658       out:
659         mutex_unlock(&dev->struct_mutex);
660         *used_fence = fence;
661         return ret;
662 }
663 EXPORT_SYMBOL(drm_fence_buffer_objects);
664
665 /*
666  * bo->mutex locked
667  */
668
669 static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
670                         int no_wait)
671 {
672         int ret = 0;
673         struct drm_device *dev = bo->dev;
674         struct drm_bo_mem_reg evict_mem;
675
676         /*
677          * Someone might have modified the buffer before we took the buffer mutex.
678          */
679
680         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
681                 goto out;
682         if (bo->mem.mem_type != mem_type)
683                 goto out;
684
685         ret = drm_bo_wait(bo, 0, 0, no_wait);
686
687         if (ret && ret != -EAGAIN) {
688                 DRM_ERROR("Failed to expire fence before "
689                           "buffer eviction.\n");
690                 goto out;
691         }
692
693         evict_mem = bo->mem;
694         evict_mem.mm_node = NULL;
695
696         if (bo->type == drm_bo_type_fake) {
697                 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
698                 bo->mem.mm_node = NULL;
699                 goto out1;
700         }
701
702         evict_mem = bo->mem;
703         evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
704         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
705
706         if (ret) {
707                 if (ret != -EAGAIN)
708                         DRM_ERROR("Failed to find memory space for "
709                                   "buffer 0x%p eviction.\n", bo);
710                 goto out;
711         }
712
713         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
714
715         if (ret) {
716                 if (ret != -EAGAIN)
717                         DRM_ERROR("Buffer eviction failed\n");
718                 goto out;
719         }
720
721       out1:
722         mutex_lock(&dev->struct_mutex);
723         if (evict_mem.mm_node) {
724                 if (evict_mem.mm_node != bo->pinned_node)
725                         drm_mm_put_block(evict_mem.mm_node);
726                 evict_mem.mm_node = NULL;
727         }
728         list_del(&bo->lru);
729         drm_bo_add_to_lru(bo);
730         mutex_unlock(&dev->struct_mutex);
731
732         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
733                         _DRM_BO_FLAG_EVICTED);
734
735       out:
736         return ret;
737 }
738
739 /**
740  * Repeatedly evict memory from the LRU for @mem_type until we create enough
741  * space, or we've evicted everything and there isn't enough space.
742  */
743 static int drm_bo_mem_force_space(struct drm_device * dev,
744                                   struct drm_bo_mem_reg * mem,
745                                   uint32_t mem_type, int no_wait)
746 {
747         struct drm_mm_node *node;
748         struct drm_buffer_manager *bm = &dev->bm;
749         struct drm_buffer_object *entry;
750         struct drm_mem_type_manager *man = &bm->man[mem_type];
751         struct list_head *lru;
752         unsigned long num_pages = mem->num_pages;
753         int ret;
754
755         mutex_lock(&dev->struct_mutex);
756         do {
757                 node = drm_mm_search_free(&man->manager, num_pages,
758                                           mem->page_alignment, 1);
759                 if (node)
760                         break;
761
762                 lru = &man->lru;
763                 if (lru->next == lru)
764                         break;
765
766                 entry = list_entry(lru->next, struct drm_buffer_object, lru);
767                 atomic_inc(&entry->usage);
768                 mutex_unlock(&dev->struct_mutex);
769                 mutex_lock(&entry->mutex);
770                 BUG_ON(entry->pinned);
771
772                 ret = drm_bo_evict(entry, mem_type, no_wait);
773                 mutex_unlock(&entry->mutex);
774                 drm_bo_usage_deref_unlocked(&entry);
775                 if (ret)
776                         return ret;
777                 mutex_lock(&dev->struct_mutex);
778         } while (1);
779
780         if (!node) {
781                 mutex_unlock(&dev->struct_mutex);
782                 return -ENOMEM;
783         }
784
785         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
786         mutex_unlock(&dev->struct_mutex);
787         mem->mm_node = node;
788         mem->mem_type = mem_type;
789         return 0;
790 }
791
792 static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
793                                 uint32_t mem_type,
794                                 uint32_t mask, uint32_t * res_mask)
795 {
796         uint32_t cur_flags = drm_bo_type_flags(mem_type);
797         uint32_t flag_diff;
798
799         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
800                 cur_flags |= DRM_BO_FLAG_CACHED;
801         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
802                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
803         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
804                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
805
806         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
807                 return 0;
808
809         if (mem_type == DRM_BO_MEM_LOCAL) {
810                 *res_mask = cur_flags;
811                 return 1;
812         }
813
814         flag_diff = (mask ^ cur_flags);
815         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
816             (!(mask & DRM_BO_FLAG_CACHED) ||
817              (mask & DRM_BO_FLAG_FORCE_CACHING)))
818                 return 0;
819
820         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
821             ((mask & DRM_BO_FLAG_MAPPABLE) ||
822              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
823                 return 0;
824
825         *res_mask = cur_flags;
826         return 1;
827 }
828
829 /**
830  * Creates space for memory region @mem according to its type.
831  *
832  * This function first searches for free space in compatible memory types in
833  * the priority order defined by the driver.  If free space isn't found, then
834  * drm_bo_mem_force_space is attempted in priority order to evict and find
835  * space.
836  */
837 int drm_bo_mem_space(struct drm_buffer_object * bo,
838                      struct drm_bo_mem_reg * mem, int no_wait)
839 {
840         struct drm_device *dev = bo->dev;
841         struct drm_buffer_manager *bm = &dev->bm;
842         struct drm_mem_type_manager *man;
843
844         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
845         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
846         uint32_t i;
847         uint32_t mem_type = DRM_BO_MEM_LOCAL;
848         uint32_t cur_flags;
849         int type_found = 0;
850         int type_ok = 0;
851         int has_eagain = 0;
852         struct drm_mm_node *node = NULL;
853         int ret;
854
855         mem->mm_node = NULL;
856         for (i = 0; i < num_prios; ++i) {
857                 mem_type = prios[i];
858                 man = &bm->man[mem_type];
859
860                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
861                                                &cur_flags);
862
863                 if (!type_ok)
864                         continue;
865
866                 if (mem_type == DRM_BO_MEM_LOCAL)
867                         break;
868
869                 if ((mem_type == bo->pinned_mem_type) &&
870                     (bo->pinned_node != NULL)) {
871                         node = bo->pinned_node;
872                         break;
873                 }
874
875                 mutex_lock(&dev->struct_mutex);
876                 if (man->has_type && man->use_type) {
877                         type_found = 1;
878                         node = drm_mm_search_free(&man->manager, mem->num_pages,
879                                                   mem->page_alignment, 1);
880                         if (node)
881                                 node = drm_mm_get_block(node, mem->num_pages,
882                                                         mem->page_alignment);
883                 }
884                 mutex_unlock(&dev->struct_mutex);
885                 if (node)
886                         break;
887         }
888
889         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
890                 mem->mm_node = node;
891                 mem->mem_type = mem_type;
892                 mem->flags = cur_flags;
893                 return 0;
894         }
895
896         if (!type_found)
897                 return -EINVAL;
898
899         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
900         prios = dev->driver->bo_driver->mem_busy_prio;
901
902         for (i = 0; i < num_prios; ++i) {
903                 mem_type = prios[i];
904                 man = &bm->man[mem_type];
905
906                 if (!man->has_type)
907                         continue;
908
909                 if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
910                         continue;
911
912                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
913
914                 if (ret == 0) {
915                         mem->flags = cur_flags;
916                         return 0;
917                 }
918
919                 if (ret == -EAGAIN)
920                         has_eagain = 1;
921         }
922
923         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
924         return ret;
925 }
926
927 EXPORT_SYMBOL(drm_bo_mem_space);
928
929 static int drm_bo_new_mask(struct drm_buffer_object * bo,
930                            uint64_t new_mask, uint32_t hint)
931 {
932         uint32_t new_props;
933
934         if (bo->type == drm_bo_type_user) {
935                 DRM_ERROR("User buffers are not supported yet\n");
936                 return -EINVAL;
937         }
938
939         new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
940                                 DRM_BO_FLAG_READ);
941
942         if (!new_props) {
943                 DRM_ERROR("Invalid buffer object rwx properties\n");
944                 return -EINVAL;
945         }
946
947         bo->mem.mask = new_mask;
948         return 0;
949 }
950
951 /*
952  * Call dev->struct_mutex locked.
953  */
954
955 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
956                                               uint32_t handle, int check_owner)
957 {
958         struct drm_user_object *uo;
959         struct drm_buffer_object *bo;
960
961         uo = drm_lookup_user_object(file_priv, handle);
962
963         if (!uo || (uo->type != drm_buffer_type)) {
964                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
965                 return NULL;
966         }
967
968         if (check_owner && file_priv != uo->owner) {
969                 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
970                         return NULL;
971         }
972
973         bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
974         atomic_inc(&bo->usage);
975         return bo;
976 }
977 EXPORT_SYMBOL(drm_lookup_buffer_object);
978
979 /*
980  * Call bo->mutex locked.
981  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
982  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
983  */
984
985 static int drm_bo_quick_busy(struct drm_buffer_object * bo)
986 {
987         struct drm_fence_object *fence = bo->fence;
988
989         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
990         if (fence) {
991                 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
992                         drm_fence_usage_deref_unlocked(&bo->fence);
993                         return 0;
994                 }
995                 return 1;
996         }
997         return 0;
998 }
999
1000 /*
1001  * Call bo->mutex locked.
1002  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1003  */
1004
1005 static int drm_bo_busy(struct drm_buffer_object * bo)
1006 {
1007         struct drm_fence_object *fence = bo->fence;
1008
1009         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1010         if (fence) {
1011                 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
1012                         drm_fence_usage_deref_unlocked(&bo->fence);
1013                         return 0;
1014                 }
1015                 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
1016                 if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
1017                         drm_fence_usage_deref_unlocked(&bo->fence);
1018                         return 0;
1019                 }
1020                 return 1;
1021         }
1022         return 0;
1023 }
1024
1025 static int drm_bo_read_cached(struct drm_buffer_object * bo)
1026 {
1027         int ret = 0;
1028
1029         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1030         if (bo->mem.mm_node)
1031                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1032         return ret;
1033 }
1034
1035 /*
1036  * Wait until a buffer is unmapped.
1037  */
1038
1039 static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
1040 {
1041         int ret = 0;
1042
1043         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1044                 return -EBUSY;
1045
1046         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1047                     atomic_read(&bo->mapped) == -1);
1048
1049         if (ret == -EINTR)
1050                 ret = -EAGAIN;
1051
1052         return ret;
1053 }
1054
1055 static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
1056 {
1057         int ret;
1058
1059         mutex_lock(&bo->mutex);
1060         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1061         mutex_unlock(&bo->mutex);
1062         return ret;
1063 }
1064
1065 /*
1066  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1067  * Until then, we cannot really do anything with it except delete it.
1068  * The unfenced list is a PITA, and the operations
1069  * 1) validating
1070  * 2) submitting commands
1071  * 3) fencing
1072  * Should really be an atomic operation.
1073  * We now "solve" this problem by keeping
1074  * the buffer "unfenced" after validating, but before fencing.
1075  */
1076
1077 static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
1078                                 int eagain_if_wait)
1079 {
1080         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1081
1082         if (ret && no_wait)
1083                 return -EBUSY;
1084         else if (!ret)
1085                 return 0;
1086
1087         ret = 0;
1088         mutex_unlock(&bo->mutex);
1089         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1090                     !drm_bo_check_unfenced(bo));
1091         mutex_lock(&bo->mutex);
1092         if (ret == -EINTR)
1093                 return -EAGAIN;
1094         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1095         if (ret) {
1096                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1097                 return -EBUSY;
1098         }
1099         if (eagain_if_wait)
1100                 return -EAGAIN;
1101
1102         return 0;
1103 }
1104
1105 /*
1106  * Fill in the ioctl reply argument with buffer info.
1107  * Bo locked.
1108  */
1109
1110 static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
1111                                 struct drm_bo_info_rep *rep)
1112 {
1113         if (!rep)
1114                 return;
1115
1116         rep->handle = bo->base.hash.key;
1117         rep->flags = bo->mem.flags;
1118         rep->size = bo->num_pages * PAGE_SIZE;
1119         rep->offset = bo->offset;
1120         rep->arg_handle = bo->map_list.user_token;
1121         rep->mask = bo->mem.mask;
1122         rep->buffer_start = bo->buffer_start;
1123         rep->fence_flags = bo->fence_type;
1124         rep->rep_flags = 0;
1125         rep->page_alignment = bo->mem.page_alignment;
1126
1127         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1128                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1129                                 DRM_BO_REP_BUSY);
1130         }
1131 }
1132
1133 /*
1134  * Wait for buffer idle and register that we've mapped the buffer.
1135  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1136  * so that if the client dies, the mapping is automatically
1137  * unregistered.
1138  */
1139
1140 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1141                                  uint32_t map_flags, unsigned hint,
1142                                  struct drm_bo_info_rep *rep)
1143 {
1144         struct drm_buffer_object *bo;
1145         struct drm_device *dev = file_priv->head->dev;
1146         int ret = 0;
1147         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1148
1149         mutex_lock(&dev->struct_mutex);
1150         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1151         mutex_unlock(&dev->struct_mutex);
1152
1153         if (!bo)
1154                 return -EINVAL;
1155
1156         mutex_lock(&bo->mutex);
1157         if (!(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
1158                 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1159                 if (ret)
1160                         goto out;
1161         }
1162
1163         /*
1164          * If this returns true, we are currently unmapped.
1165          * We need to do this test, because unmapping can
1166          * be done without the bo->mutex held.
1167          */
1168
1169         while (1) {
1170                 if (atomic_inc_and_test(&bo->mapped)) {
1171                         if (no_wait && drm_bo_busy(bo)) {
1172                                 atomic_dec(&bo->mapped);
1173                                 ret = -EBUSY;
1174                                 goto out;
1175                         }
1176                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1177                         if (ret) {
1178                                 atomic_dec(&bo->mapped);
1179                                 goto out;
1180                         }
1181
1182                         if ((map_flags & DRM_BO_FLAG_READ) &&
1183                             (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1184                             (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1185                                 drm_bo_read_cached(bo);
1186                         }
1187                         break;
1188                 } else if ((map_flags & DRM_BO_FLAG_READ) &&
1189                            (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
1190                            (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
1191
1192                         /*
1193                          * We are already mapped with different flags.
1194                          * need to wait for unmap.
1195                          */
1196
1197                         ret = drm_bo_wait_unmapped(bo, no_wait);
1198                         if (ret)
1199                                 goto out;
1200
1201                         continue;
1202                 }
1203                 break;
1204         }
1205
1206         mutex_lock(&dev->struct_mutex);
1207         ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1208         mutex_unlock(&dev->struct_mutex);
1209         if (ret) {
1210                 if (atomic_add_negative(-1, &bo->mapped))
1211                         DRM_WAKEUP(&bo->event_queue);
1212
1213         } else
1214                 drm_bo_fill_rep_arg(bo, rep);
1215       out:
1216         mutex_unlock(&bo->mutex);
1217         drm_bo_usage_deref_unlocked(&bo);
1218         return ret;
1219 }
1220
1221 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1222 {
1223         struct drm_device *dev = file_priv->head->dev;
1224         struct drm_buffer_object *bo;
1225         struct drm_ref_object *ro;
1226         int ret = 0;
1227
1228         mutex_lock(&dev->struct_mutex);
1229
1230         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1231         if (!bo) {
1232                 ret = -EINVAL;
1233                 goto out;
1234         }
1235
1236         ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1237         if (!ro) {
1238                 ret = -EINVAL;
1239                 goto out;
1240         }
1241
1242         drm_remove_ref_object(file_priv, ro);
1243         drm_bo_usage_deref_locked(&bo);
1244       out:
1245         mutex_unlock(&dev->struct_mutex);
1246         return ret;
1247 }
1248
1249 /*
1250  * Call struct-sem locked.
1251  */
1252
1253 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1254                                          struct drm_user_object * uo,
1255                                          enum drm_ref_type action)
1256 {
1257         struct drm_buffer_object *bo =
1258             drm_user_object_entry(uo, struct drm_buffer_object, base);
1259
1260         /*
1261          * We DON'T want to take the bo->lock here, because we want to
1262          * hold it when we wait for unmapped buffer.
1263          */
1264
1265         BUG_ON(action != _DRM_REF_TYPE1);
1266
1267         if (atomic_add_negative(-1, &bo->mapped))
1268                 DRM_WAKEUP(&bo->event_queue);
1269 }
1270
1271 /*
1272  * bo->mutex locked.
1273  * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1274  */
1275
1276 int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
1277                        int no_wait, int move_unfenced)
1278 {
1279         struct drm_device *dev = bo->dev;
1280         struct drm_buffer_manager *bm = &dev->bm;
1281         int ret = 0;
1282         struct drm_bo_mem_reg mem;
1283         /*
1284          * Flush outstanding fences.
1285          */
1286
1287         drm_bo_busy(bo);
1288
1289         /*
1290          * Wait for outstanding fences.
1291          */
1292
1293         ret = drm_bo_wait(bo, 0, 0, no_wait);
1294         if (ret)
1295                 return ret;
1296
1297         mem.num_pages = bo->num_pages;
1298         mem.size = mem.num_pages << PAGE_SHIFT;
1299         mem.mask = new_mem_flags;
1300         mem.page_alignment = bo->mem.page_alignment;
1301
1302         mutex_lock(&bm->evict_mutex);
1303         mutex_lock(&dev->struct_mutex);
1304         list_del(&bo->lru);
1305         list_add_tail(&bo->lru, &bm->unfenced);
1306         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1307                         _DRM_BO_FLAG_UNFENCED);
1308         mutex_unlock(&dev->struct_mutex);
1309
1310         /*
1311          * Determine where to move the buffer.
1312          */
1313         ret = drm_bo_mem_space(bo, &mem, no_wait);
1314         if (ret)
1315                 goto out_unlock;
1316
1317         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1318
1319  out_unlock:
1320         if (ret || !move_unfenced) {
1321                 mutex_lock(&dev->struct_mutex);
1322                 if (mem.mm_node) {
1323                         if (mem.mm_node != bo->pinned_node)
1324                                 drm_mm_put_block(mem.mm_node);
1325                         mem.mm_node = NULL;
1326                 }
1327                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
1328                 DRM_WAKEUP(&bo->event_queue);
1329                 list_del(&bo->lru);
1330                 drm_bo_add_to_lru(bo);
1331                 mutex_unlock(&dev->struct_mutex);
1332         }
1333
1334         mutex_unlock(&bm->evict_mutex);
1335         return ret;
1336 }
1337
1338 static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
1339 {
1340         uint32_t flag_diff = (mem->mask ^ mem->flags);
1341
1342         if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1343                 return 0;
1344         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1345             (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
1346              (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
1347           return 0;
1348         }
1349         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1350             ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1351              (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1352                 return 0;
1353         return 1;
1354 }
1355
1356 static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem)
1357 {
1358         struct drm_buffer_manager *bm = &dev->bm;
1359         struct drm_mem_type_manager *man;
1360         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
1361         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
1362         uint32_t i;
1363         int type_ok = 0;
1364         uint32_t mem_type = 0;
1365         uint32_t cur_flags;
1366
1367         if (drm_bo_mem_compat(mem))
1368                 return 0;
1369
1370         BUG_ON(mem->mm_node);
1371
1372         for (i = 0; i < num_prios; ++i) {
1373                 mem_type = prios[i];
1374                 man = &bm->man[mem_type];
1375                 type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
1376                                                &cur_flags);
1377                 if (type_ok)
1378                         break;
1379         }
1380
1381         if (type_ok) {
1382                 mem->mm_node = NULL;
1383                 mem->mem_type = mem_type;
1384                 mem->flags = cur_flags;
1385                 DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE);
1386                 return 0;
1387         }
1388
1389         DRM_ERROR("Illegal fake buffer flags 0x%016llx\n",
1390                   (unsigned long long) mem->mask);
1391         return -EINVAL;
1392 }
1393
1394 /*
1395  * bo locked.
1396  */
1397
1398 static int drm_buffer_object_validate(struct drm_buffer_object * bo,
1399                                       uint32_t fence_class,
1400                                       int move_unfenced, int no_wait)
1401 {
1402         struct drm_device *dev = bo->dev;
1403         struct drm_buffer_manager *bm = &dev->bm;
1404         struct drm_bo_driver *driver = dev->driver->bo_driver;
1405         uint32_t ftype;
1406         int ret;
1407
1408         DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
1409                   (unsigned long long) bo->mem.mask,
1410                   (unsigned long long) bo->mem.flags);
1411
1412         ret = driver->fence_type(bo, &fence_class, &ftype);
1413
1414         if (ret) {
1415                 DRM_ERROR("Driver did not support given buffer permissions\n");
1416                 return ret;
1417         }
1418
1419         if (bo->pinned && bo->pinned_mem_type != bo->mem.mem_type) {
1420                 DRM_ERROR("Attempt to validate pinned buffer into different memory "
1421                     "type\n");
1422                 return -EINVAL;
1423         }
1424
1425         /*
1426          * We're switching command submission mechanism,
1427          * or cannot simply rely on the hardware serializing for us.
1428          *
1429          * Wait for buffer idle.
1430          */
1431
1432         if ((fence_class != bo->fence_class) ||
1433             ((ftype ^ bo->fence_type) & bo->fence_type)) {
1434
1435                 ret = drm_bo_wait(bo, 0, 0, no_wait);
1436
1437                 if (ret)
1438                         return ret;
1439
1440         }
1441
1442         bo->new_fence_class = fence_class;
1443         bo->new_fence_type = ftype;
1444
1445         ret = drm_bo_wait_unmapped(bo, no_wait);
1446         if (ret) {
1447                 DRM_ERROR("Timed out waiting for buffer unmap.\n");
1448                 return ret;
1449         }
1450         if (bo->type == drm_bo_type_fake) {
1451                 ret = drm_bo_check_fake(dev, &bo->mem);
1452                 if (ret)
1453                         return ret;
1454         }
1455
1456         /*
1457          * Check whether we need to move buffer.
1458          */
1459
1460         if (!drm_bo_mem_compat(&bo->mem)) {
1461                 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1462                                          move_unfenced);
1463                 if (ret) {
1464                         if (ret != -EAGAIN)
1465                                 DRM_ERROR("Failed moving buffer.\n");
1466                         return ret;
1467                 }
1468         }
1469
1470         /*
1471          * We might need to add a TTM.
1472          */
1473
1474         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1475                 ret = drm_bo_add_ttm(bo);
1476                 if (ret)
1477                         return ret;
1478         }
1479         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1480
1481         /*
1482          * Finally, adjust lru to be sure.
1483          */
1484
1485         mutex_lock(&dev->struct_mutex);
1486         list_del(&bo->lru);
1487         if (move_unfenced) {
1488                 list_add_tail(&bo->lru, &bm->unfenced);
1489                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1490                                 _DRM_BO_FLAG_UNFENCED);
1491         } else {
1492                 drm_bo_add_to_lru(bo);
1493                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1494                         DRM_WAKEUP(&bo->event_queue);
1495                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1496                                         _DRM_BO_FLAG_UNFENCED);
1497                 }
1498         }
1499         mutex_unlock(&dev->struct_mutex);
1500
1501         return 0;
1502 }
1503
1504 int drm_bo_do_validate(struct drm_buffer_object *bo,
1505                        uint64_t flags, uint64_t mask, uint32_t hint,
1506                        uint32_t fence_class,
1507                        int no_wait,
1508                        struct drm_bo_info_rep *rep)
1509 {
1510         int ret;
1511
1512         mutex_lock(&bo->mutex);
1513         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1514
1515         if (ret)
1516                 goto out;
1517
1518
1519         DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1520         ret = drm_bo_new_mask(bo, flags, hint);
1521         if (ret)
1522                 goto out;
1523
1524         ret = drm_buffer_object_validate(bo,
1525                                          fence_class,
1526                                          !(hint & DRM_BO_HINT_DONT_FENCE),
1527                                          no_wait);
1528 out:
1529         if (rep)
1530                 drm_bo_fill_rep_arg(bo, rep);
1531
1532         mutex_unlock(&bo->mutex);
1533         return ret;
1534 }
1535 EXPORT_SYMBOL(drm_bo_do_validate);
1536
1537
1538 int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
1539                            uint32_t fence_class,
1540                            uint64_t flags, uint64_t mask, uint32_t hint,
1541                            struct drm_bo_info_rep * rep,
1542                            struct drm_buffer_object **bo_rep)
1543 {
1544         struct drm_device *dev = file_priv->head->dev;
1545         struct drm_buffer_object *bo;
1546         int ret;
1547         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1548
1549         mutex_lock(&dev->struct_mutex);
1550         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1551         mutex_unlock(&dev->struct_mutex);
1552
1553         if (!bo) {
1554                 return -EINVAL;
1555         }
1556
1557         ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
1558                                  no_wait, rep);
1559
1560         if (!ret && bo_rep)
1561                 *bo_rep = bo;
1562         else
1563                 drm_bo_usage_deref_unlocked(&bo);
1564
1565         return ret;
1566 }
1567 EXPORT_SYMBOL(drm_bo_handle_validate);
1568
1569 /**
1570  * Fills out the generic buffer object ioctl reply with the information for
1571  * the BO with id of handle.
1572  */
1573 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1574                               struct drm_bo_info_rep *rep)
1575 {
1576         struct drm_device *dev = file_priv->head->dev;
1577         struct drm_buffer_object *bo;
1578
1579         mutex_lock(&dev->struct_mutex);
1580         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1581         mutex_unlock(&dev->struct_mutex);
1582
1583         if (!bo) {
1584                 return -EINVAL;
1585         }
1586         mutex_lock(&bo->mutex);
1587         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1588                 (void)drm_bo_busy(bo);
1589         drm_bo_fill_rep_arg(bo, rep);
1590         mutex_unlock(&bo->mutex);
1591         drm_bo_usage_deref_unlocked(&bo);
1592         return 0;
1593 }
1594
1595 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1596                               uint32_t hint,
1597                               struct drm_bo_info_rep *rep)
1598 {
1599         struct drm_device *dev = file_priv->head->dev;
1600         struct drm_buffer_object *bo;
1601         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1602         int ret;
1603
1604         mutex_lock(&dev->struct_mutex);
1605         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1606         mutex_unlock(&dev->struct_mutex);
1607
1608         if (!bo) {
1609                 return -EINVAL;
1610         }
1611
1612         mutex_lock(&bo->mutex);
1613         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1614         if (ret)
1615                 goto out;
1616         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1617         if (ret)
1618                 goto out;
1619
1620         drm_bo_fill_rep_arg(bo, rep);
1621
1622       out:
1623         mutex_unlock(&bo->mutex);
1624         drm_bo_usage_deref_unlocked(&bo);
1625         return ret;
1626 }
1627
1628 int drm_buffer_object_create(struct drm_device *dev,
1629                              unsigned long size,
1630                              enum drm_bo_type type,
1631                              uint64_t mask,
1632                              uint32_t hint,
1633                              uint32_t page_alignment,
1634                              unsigned long buffer_start,
1635                              struct drm_buffer_object ** buf_obj)
1636 {
1637         struct drm_buffer_manager *bm = &dev->bm;
1638         struct drm_buffer_object *bo;
1639         struct drm_bo_driver *driver = dev->driver->bo_driver;
1640         int ret = 0;
1641         unsigned long num_pages;
1642
1643         if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) {
1644                 DRM_ERROR("Invalid buffer object start.\n");
1645                 return -EINVAL;
1646         }
1647         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1648         if (num_pages == 0) {
1649                 DRM_ERROR("Illegal buffer object size.\n");
1650                 return -EINVAL;
1651         }
1652
1653         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1654
1655         if (!bo)
1656                 return -ENOMEM;
1657
1658         mutex_init(&bo->mutex);
1659         mutex_lock(&bo->mutex);
1660
1661         atomic_set(&bo->usage, 1);
1662         atomic_set(&bo->mapped, -1);
1663         DRM_INIT_WAITQUEUE(&bo->event_queue);
1664         INIT_LIST_HEAD(&bo->lru);
1665         INIT_LIST_HEAD(&bo->pinned_lru);
1666         INIT_LIST_HEAD(&bo->ddestroy);
1667 #ifdef DRM_ODD_MM_COMPAT
1668         INIT_LIST_HEAD(&bo->p_mm_list);
1669         INIT_LIST_HEAD(&bo->vma_list);
1670 #endif
1671         bo->dev = dev;
1672         bo->type = type;
1673         bo->num_pages = num_pages;
1674         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1675         bo->mem.num_pages = bo->num_pages;
1676         bo->mem.mm_node = NULL;
1677         bo->mem.page_alignment = page_alignment;
1678         if (bo->type == drm_bo_type_fake) {
1679                 bo->offset = buffer_start;
1680                 bo->buffer_start = 0;
1681         } else {
1682                 bo->buffer_start = buffer_start;
1683         }
1684         bo->priv_flags = 0;
1685         bo->mem.flags = 0ULL;
1686         bo->mem.mask = 0ULL;
1687         atomic_inc(&bm->count);
1688         ret = drm_bo_new_mask(bo, mask, hint);
1689
1690         if (ret)
1691                 goto out_err;
1692
1693         if (bo->type == drm_bo_type_dc) {
1694                 mutex_lock(&dev->struct_mutex);
1695                 ret = drm_bo_setup_vm_locked(bo);
1696                 mutex_unlock(&dev->struct_mutex);
1697                 if (ret)
1698                         goto out_err;
1699         }
1700
1701         bo->fence_class = 0;
1702         ret = driver->fence_type(bo, &bo->fence_class, &bo->fence_type);
1703         if (ret) {
1704                 DRM_ERROR("Driver did not support given buffer permissions\n");
1705                 goto out_err;
1706         }
1707
1708         if (bo->type == drm_bo_type_fake) {
1709                 ret = drm_bo_check_fake(dev, &bo->mem);
1710                 if (ret)
1711                         goto out_err;
1712         }
1713
1714         ret = drm_bo_add_ttm(bo);
1715         if (ret)
1716                 goto out_err;
1717
1718         mutex_lock(&dev->struct_mutex);
1719         drm_bo_add_to_lru(bo);
1720         mutex_unlock(&dev->struct_mutex);
1721
1722         mutex_unlock(&bo->mutex);
1723         *buf_obj = bo;
1724         return 0;
1725
1726       out_err:
1727         mutex_unlock(&bo->mutex);
1728
1729         drm_bo_usage_deref_unlocked(&bo);
1730         return ret;
1731 }
1732 EXPORT_SYMBOL(drm_buffer_object_create);
1733
1734 int drm_bo_add_user_object(struct drm_file *file_priv,
1735                            struct drm_buffer_object *bo, int shareable)
1736 {
1737         struct drm_device *dev = file_priv->head->dev;
1738         int ret;
1739
1740         mutex_lock(&dev->struct_mutex);
1741         ret = drm_add_user_object(file_priv, &bo->base, shareable);
1742         if (ret)
1743                 goto out;
1744
1745         bo->base.remove = drm_bo_base_deref_locked;
1746         bo->base.type = drm_buffer_type;
1747         bo->base.ref_struct_locked = NULL;
1748         bo->base.unref = drm_buffer_user_object_unmap;
1749
1750       out:
1751         mutex_unlock(&dev->struct_mutex);
1752         return ret;
1753 }
1754 EXPORT_SYMBOL(drm_bo_add_user_object);
1755
1756 static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv)
1757 {
1758         LOCK_TEST_WITH_RETURN(dev, file_priv);
1759         return 0;
1760 }
1761
1762 int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1763 {
1764         struct drm_bo_op_arg curarg;
1765         struct drm_bo_op_arg *arg = data;
1766         struct drm_bo_op_req *req = &arg->d.req;
1767         struct drm_bo_info_rep rep;
1768         struct drm_buffer_object *dummy;
1769         unsigned long next = 0;
1770         void __user *curuserarg = NULL;
1771         int ret;
1772
1773         DRM_DEBUG("drm_bo_op_ioctl\n");
1774
1775         if (!dev->bm.initialized) {
1776                 DRM_ERROR("Buffer object manager is not initialized.\n");
1777                 return -EINVAL;
1778         }
1779
1780         do {
1781                 if (next != 0) {
1782                         curuserarg = (void __user *)next;
1783                         if (copy_from_user(&curarg, curuserarg,
1784                                            sizeof(curarg)) != 0)
1785                                 return -EFAULT;
1786                         arg = &curarg;
1787                 }
1788
1789                 if (arg->handled) {
1790                         next = arg->next;
1791                         continue;
1792                 }
1793                 req = &arg->d.req;
1794                 ret = 0;
1795                 switch (req->op) {
1796                 case drm_bo_validate:
1797                         ret = drm_bo_lock_test(dev, file_priv);
1798                         if (ret)
1799                                 break;
1800                         ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
1801                                                      req->bo_req.fence_class,
1802                                                      req->bo_req.flags,
1803                                                      req->bo_req.mask,
1804                                                      req->bo_req.hint,
1805                                                      &rep, &dummy);
1806                         break;
1807                 case drm_bo_fence:
1808                         ret = -EINVAL;
1809                         DRM_ERROR("Function is not implemented yet.\n");
1810                         break;
1811                 case drm_bo_ref_fence:
1812                         ret = -EINVAL;
1813                         DRM_ERROR("Function is not implemented yet.\n");
1814                         break;
1815                 default:
1816                         ret = -EINVAL;
1817                 }
1818                 next = arg->next;
1819
1820                 /*
1821                  * A signal interrupted us. Make sure the ioctl is restartable.
1822                  */
1823
1824                 if (ret == -EAGAIN)
1825                         return -EAGAIN;
1826
1827                 arg->handled = 1;
1828                 arg->d.rep.ret = ret;
1829                 arg->d.rep.bo_info = rep;
1830                 if (arg != data) {
1831                         if (copy_to_user(curuserarg, &curarg,
1832                                          sizeof(curarg)) != 0)
1833                                 return -EFAULT;
1834                 }
1835         } while (next != 0);
1836         return 0;
1837 }
1838
1839 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1840 {
1841         struct drm_bo_create_arg *arg = data;
1842         struct drm_bo_create_req *req = &arg->d.req;
1843         struct drm_bo_info_rep *rep = &arg->d.rep;
1844         struct drm_buffer_object *entry;
1845         int ret = 0;
1846
1847         DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align, %d type\n",
1848             (int)(req->size / 1024), req->page_alignment * 4, req->type);
1849
1850         if (!dev->bm.initialized) {
1851                 DRM_ERROR("Buffer object manager is not initialized.\n");
1852                 return -EINVAL;
1853         }
1854         if (req->type == drm_bo_type_fake)
1855                 LOCK_TEST_WITH_RETURN(dev, file_priv);
1856
1857         ret = drm_buffer_object_create(file_priv->head->dev,
1858                                        req->size, req->type, req->mask,
1859                                        req->hint, req->page_alignment,
1860                                        req->buffer_start, &entry);
1861         if (ret)
1862                 goto out;
1863         
1864         ret = drm_bo_add_user_object(file_priv, entry,
1865                                      req->mask & DRM_BO_FLAG_SHAREABLE);
1866         if (ret) {
1867                 drm_bo_usage_deref_unlocked(&entry);
1868                 goto out;
1869         }
1870         
1871         mutex_lock(&entry->mutex);
1872         drm_bo_fill_rep_arg(entry, rep);
1873         mutex_unlock(&entry->mutex);
1874
1875 out:
1876         return ret;
1877 }
1878
1879
1880 int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1881 {
1882         struct drm_bo_handle_arg *arg = data;
1883         struct drm_user_object *uo;
1884         int ret = 0;
1885
1886         DRM_DEBUG("drm_bo_destroy_ioctl: buffer %d\n", arg->handle);
1887
1888         if (!dev->bm.initialized) {
1889                 DRM_ERROR("Buffer object manager is not initialized.\n");
1890                 return -EINVAL;
1891         }
1892
1893         mutex_lock(&dev->struct_mutex);
1894         uo = drm_lookup_user_object(file_priv, arg->handle);
1895         if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) {
1896                 mutex_unlock(&dev->struct_mutex);
1897                 return -EINVAL;
1898         }
1899         ret = drm_remove_user_object(file_priv, uo);
1900         mutex_unlock(&dev->struct_mutex);
1901         
1902         return ret;
1903 }
1904
1905 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1906 {
1907         struct drm_bo_map_wait_idle_arg *arg = data;
1908         struct drm_bo_info_req *req = &arg->d.req;
1909         struct drm_bo_info_rep *rep = &arg->d.rep;
1910         int ret;
1911
1912         DRM_DEBUG("drm_bo_map_ioctl: buffer %d\n", req->handle);
1913
1914         if (!dev->bm.initialized) {
1915                 DRM_ERROR("Buffer object manager is not initialized.\n");
1916                 return -EINVAL;
1917         }
1918
1919         ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1920                                     req->hint, rep);
1921         if (ret)
1922                 return ret;
1923
1924         return 0;
1925 }
1926
1927 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1928 {
1929         struct drm_bo_handle_arg *arg = data;
1930         int ret;
1931
1932         DRM_DEBUG("drm_bo_unmap_ioctl: buffer %d\n", arg->handle);
1933
1934         if (!dev->bm.initialized) {
1935                 DRM_ERROR("Buffer object manager is not initialized.\n");
1936                 return -EINVAL;
1937         }
1938
1939         ret = drm_buffer_object_unmap(file_priv, arg->handle);
1940         return ret;
1941 }
1942
1943
1944 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1945 {
1946         struct drm_bo_reference_info_arg *arg = data;
1947         struct drm_bo_handle_arg *req = &arg->d.req;
1948         struct drm_bo_info_rep *rep = &arg->d.rep;
1949         struct drm_user_object *uo;
1950         int ret;
1951
1952         DRM_DEBUG("drm_bo_reference_ioctl: buffer %d\n", req->handle);
1953
1954         if (!dev->bm.initialized) {
1955                 DRM_ERROR("Buffer object manager is not initialized.\n");
1956                 return -EINVAL;
1957         }
1958
1959         ret = drm_user_object_ref(file_priv, req->handle,
1960                                   drm_buffer_type, &uo);
1961         if (ret)
1962                 return ret;
1963         
1964         ret = drm_bo_handle_info(file_priv, req->handle, rep);
1965         if (ret)
1966                 return ret;
1967
1968         return 0;
1969 }
1970
1971 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1972 {
1973         struct drm_bo_handle_arg *arg = data;
1974         int ret = 0;
1975
1976         DRM_DEBUG("drm_bo_unreference_ioctl: buffer %d\n", arg->handle);
1977
1978         if (!dev->bm.initialized) {
1979                 DRM_ERROR("Buffer object manager is not initialized.\n");
1980                 return -EINVAL;
1981         }
1982
1983         ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
1984         return ret;
1985 }
1986
1987 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1988 {
1989         struct drm_bo_reference_info_arg *arg = data;
1990         struct drm_bo_handle_arg *req = &arg->d.req;
1991         struct drm_bo_info_rep *rep = &arg->d.rep;
1992         int ret;
1993
1994         DRM_DEBUG("drm_bo_info_ioctl: buffer %d\n", req->handle);
1995
1996         if (!dev->bm.initialized) {
1997                 DRM_ERROR("Buffer object manager is not initialized.\n");
1998                 return -EINVAL;
1999         }
2000
2001         ret = drm_bo_handle_info(file_priv, req->handle, rep);
2002         if (ret)
2003                 return ret;
2004
2005         return 0;
2006 }
2007
2008 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2009 {
2010         struct drm_bo_map_wait_idle_arg *arg = data;
2011         struct drm_bo_info_req *req = &arg->d.req;
2012         struct drm_bo_info_rep *rep = &arg->d.rep;
2013         int ret;
2014
2015         DRM_DEBUG("drm_bo_wait_idle_ioctl: buffer %d\n", req->handle);
2016
2017         if (!dev->bm.initialized) {
2018                 DRM_ERROR("Buffer object manager is not initialized.\n");
2019                 return -EINVAL;
2020         }
2021
2022         ret = drm_bo_handle_wait(file_priv, req->handle,
2023                                  req->hint, rep);
2024         if (ret)
2025                 return ret;
2026
2027         return 0;
2028 }
2029
2030 /**
2031  * Pins or unpins the given buffer object in the given memory area.
2032  *
2033  * Pinned buffers will not be evicted from or move within their memory area.
2034  * Must be called with the hardware lock held for pinning.
2035  */
2036 static int
2037 drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo,
2038     int pin)
2039 {
2040         int ret = 0;
2041
2042         mutex_lock(&bo->mutex);
2043         if (bo->pinned == pin) {
2044                 mutex_unlock(&bo->mutex);
2045                 return 0;
2046         }
2047
2048         if (pin) {
2049                 ret = drm_bo_wait_unfenced(bo, 0, 0);
2050                 if (ret) {
2051                         mutex_unlock(&bo->mutex);
2052                         return ret;
2053                 }
2054
2055                 /* Validate the buffer into its pinned location, with no
2056                  * pending fence.
2057                  */
2058                 ret = drm_buffer_object_validate(bo, 0, 0, 0);
2059                 if (ret) {
2060                         mutex_unlock(&bo->mutex);
2061                         return ret;
2062                 }
2063
2064                 /* Pull the buffer off of the LRU and add it to the pinned
2065                  * list
2066                  */
2067                 bo->pinned_mem_type = bo->mem.mem_type;
2068                 mutex_lock(&dev->struct_mutex);
2069                 list_del_init(&bo->lru);
2070                 list_del_init(&bo->pinned_lru);
2071                 drm_bo_add_to_pinned_lru(bo);
2072
2073                 if (bo->pinned_node != bo->mem.mm_node) {
2074                         if (bo->pinned_node != NULL)
2075                                 drm_mm_put_block(bo->pinned_node);
2076                         bo->pinned_node = bo->mem.mm_node;
2077                 }
2078
2079                 bo->pinned = pin;
2080                 mutex_unlock(&dev->struct_mutex);
2081
2082         } else {
2083                 mutex_lock(&dev->struct_mutex);
2084
2085                 /* Remove our buffer from the pinned list */
2086                 if (bo->pinned_node != bo->mem.mm_node)
2087                         drm_mm_put_block(bo->pinned_node);
2088
2089                 list_del_init(&bo->pinned_lru);
2090                 bo->pinned_node = NULL;
2091                 bo->pinned = pin;
2092                 mutex_unlock(&dev->struct_mutex);
2093         }
2094         mutex_unlock(&bo->mutex);
2095         return 0;
2096 }
2097
2098 int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data,
2099                          struct drm_file *file_priv)
2100 {
2101         struct drm_bo_set_pin_arg *arg = data;
2102         struct drm_bo_set_pin_req *req = &arg->d.req;
2103         struct drm_bo_info_rep *rep = &arg->d.rep;
2104         struct drm_buffer_object *bo;
2105         int ret;
2106
2107         DRM_DEBUG("drm_bo_set_pin_ioctl: buffer %d, pin %d\n",
2108             req->handle, req->pin);
2109
2110         if (!dev->bm.initialized) {
2111                 DRM_ERROR("Buffer object manager is not initialized.\n");
2112                 return -EINVAL;
2113         }
2114
2115         if (req->pin < 0 || req->pin > 1) {
2116                 DRM_ERROR("Bad arguments to set_pin\n");
2117                 return -EINVAL;
2118         }
2119
2120         if (req->pin)
2121                 LOCK_TEST_WITH_RETURN(dev, file_priv);
2122
2123         mutex_lock(&dev->struct_mutex);
2124         bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
2125         mutex_unlock(&dev->struct_mutex);
2126         if (!bo) {
2127                 return -EINVAL;
2128         }
2129
2130         ret = drm_bo_set_pin(dev, bo, req->pin);
2131         if (ret) {
2132                 drm_bo_usage_deref_unlocked(&bo);
2133                 return ret;
2134         }
2135
2136         drm_bo_fill_rep_arg(bo, rep);
2137         drm_bo_usage_deref_unlocked(&bo);
2138
2139         return 0;
2140 }
2141
2142
2143 /**
2144  *Clean the unfenced list and put on regular LRU.
2145  *This is part of the memory manager cleanup and should only be
2146  *called with the DRI lock held.
2147  *Call dev->struct_sem locked.
2148  */
2149
2150 static void drm_bo_clean_unfenced(struct drm_device *dev)
2151 {
2152         struct drm_buffer_manager *bm  = &dev->bm;
2153         struct list_head *head, *list;
2154         struct drm_buffer_object *entry;
2155         struct drm_fence_object *fence;
2156
2157         head = &bm->unfenced;
2158
2159         if (list_empty(head))
2160                 return;
2161
2162         DRM_ERROR("Clean unfenced\n");
2163
2164         if (drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence)) {
2165
2166                 /*
2167                  * Fixme: Should really wait here.
2168                  */
2169         }
2170
2171         if (fence)
2172                 drm_fence_usage_deref_locked(&fence);
2173
2174         if (list_empty(head))
2175                 return;
2176
2177         DRM_ERROR("Really clean unfenced\n");
2178
2179         list = head->next;
2180         while(list != head) {
2181                 prefetch(list->next);
2182                 entry = list_entry(list, struct drm_buffer_object, lru);
2183
2184                 atomic_inc(&entry->usage);
2185                 mutex_unlock(&dev->struct_mutex);
2186                 mutex_lock(&entry->mutex);
2187                 mutex_lock(&dev->struct_mutex);
2188
2189                 list_del(&entry->lru);
2190                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
2191                 drm_bo_add_to_lru(entry);
2192                 mutex_unlock(&entry->mutex);
2193                 list = head->next;
2194         }
2195 }
2196
2197 static int drm_bo_leave_list(struct drm_buffer_object * bo,
2198                              uint32_t mem_type,
2199                              int free_pinned, int allow_errors)
2200 {
2201         struct drm_device *dev = bo->dev;
2202         int ret = 0;
2203
2204         mutex_lock(&bo->mutex);
2205
2206         ret = drm_bo_expire_fence(bo, allow_errors);
2207         if (ret)
2208                 goto out;
2209
2210         if (free_pinned) {
2211                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2212                 mutex_lock(&dev->struct_mutex);
2213                 list_del_init(&bo->pinned_lru);
2214                 if (bo->pinned_node == bo->mem.mm_node)
2215                         bo->pinned_node = NULL;
2216                 if (bo->pinned_node != NULL) {
2217                         drm_mm_put_block(bo->pinned_node);
2218                         bo->pinned_node = NULL;
2219                 }
2220                 mutex_unlock(&dev->struct_mutex);
2221         }
2222
2223         if (bo->pinned) {
2224                 DRM_ERROR("A pinned buffer was present at "
2225                           "cleanup. Removing flag and evicting.\n");
2226                 bo->pinned = 0;
2227         }
2228
2229         if (bo->mem.mem_type == mem_type)
2230                 ret = drm_bo_evict(bo, mem_type, 0);
2231
2232         if (ret) {
2233                 if (allow_errors) {
2234                         goto out;
2235                 } else {
2236                         ret = 0;
2237                         DRM_ERROR("Cleanup eviction failed\n");
2238                 }
2239         }
2240
2241       out:
2242         mutex_unlock(&bo->mutex);
2243         return ret;
2244 }
2245
2246
2247 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2248                                          int pinned_list)
2249 {
2250         if (pinned_list)
2251                 return list_entry(list, struct drm_buffer_object, pinned_lru);
2252         else
2253                 return list_entry(list, struct drm_buffer_object, lru);
2254 }
2255
2256 /*
2257  * dev->struct_mutex locked.
2258  */
2259
2260 static int drm_bo_force_list_clean(struct drm_device * dev,
2261                                    struct list_head *head,
2262                                    unsigned mem_type,
2263                                    int free_pinned,
2264                                    int allow_errors,
2265                                    int pinned_list)
2266 {
2267         struct list_head *list, *next, *prev;
2268         struct drm_buffer_object *entry, *nentry;
2269         int ret;
2270         int do_restart;
2271
2272         /*
2273          * The list traversal is a bit odd here, because an item may
2274          * disappear from the list when we release the struct_mutex or
2275          * when we decrease the usage count. Also we're not guaranteed
2276          * to drain pinned lists, so we can't always restart.
2277          */
2278
2279 restart:
2280         nentry = NULL;
2281         list_for_each_safe(list, next, head) {
2282                 prev = list->prev;
2283
2284                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2285                 atomic_inc(&entry->usage);
2286                 if (nentry) {
2287                         atomic_dec(&nentry->usage);
2288                         nentry = NULL;
2289                 }
2290
2291                 /*
2292                  * Protect the next item from destruction, so we can check
2293                  * its list pointers later on.
2294                  */
2295
2296                 if (next != head) {
2297                         nentry = drm_bo_entry(next, pinned_list);
2298                         atomic_inc(&nentry->usage);
2299                 }
2300                 mutex_unlock(&dev->struct_mutex);
2301
2302                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2303                                         allow_errors);
2304                 mutex_lock(&dev->struct_mutex);
2305
2306                 drm_bo_usage_deref_locked(&entry);
2307                 if (ret)
2308                         return ret;
2309
2310                 /*
2311                  * Has the next item disappeared from the list?
2312                  */
2313
2314                 do_restart = ((next->prev != list) && (next->prev != prev));
2315
2316                 if (nentry != NULL && do_restart)
2317                         drm_bo_usage_deref_locked(&nentry);
2318
2319                 if (do_restart)
2320                         goto restart;
2321         }
2322         return 0;
2323 }
2324
2325 int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
2326 {
2327         struct drm_buffer_manager *bm = &dev->bm;
2328         struct drm_mem_type_manager *man = &bm->man[mem_type];
2329         int ret = -EINVAL;
2330
2331         if (mem_type >= DRM_BO_MEM_TYPES) {
2332                 DRM_ERROR("Illegal memory type %d\n", mem_type);
2333                 return ret;
2334         }
2335
2336         if (!man->has_type) {
2337                 DRM_ERROR("Trying to take down uninitialized "
2338                           "memory manager type %u\n", mem_type);
2339                 return ret;
2340         }
2341         man->use_type = 0;
2342         man->has_type = 0;
2343
2344         ret = 0;
2345         if (mem_type > 0) {
2346
2347                 drm_bo_clean_unfenced(dev);
2348                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2349                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2350
2351                 if (drm_mm_clean(&man->manager)) {
2352                         drm_mm_takedown(&man->manager);
2353                 } else {
2354                         ret = -EBUSY;
2355                 }
2356         }
2357
2358         return ret;
2359 }
2360 EXPORT_SYMBOL(drm_bo_clean_mm);
2361
2362 /**
2363  *Evict all buffers of a particular mem_type, but leave memory manager
2364  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2365  *point since we have the hardware lock.
2366  */
2367
2368 static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
2369 {
2370         int ret;
2371         struct drm_buffer_manager *bm = &dev->bm;
2372         struct drm_mem_type_manager *man = &bm->man[mem_type];
2373
2374         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2375                 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2376                 return -EINVAL;
2377         }
2378
2379         if (!man->has_type) {
2380                 DRM_ERROR("Memory type %u has not been initialized.\n",
2381                           mem_type);
2382                 return 0;
2383         }
2384
2385         drm_bo_clean_unfenced(dev);
2386         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2387         if (ret)
2388                 return ret;
2389         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2390
2391         return ret;
2392 }
2393
2394 int drm_bo_init_mm(struct drm_device * dev,
2395                    unsigned type,
2396                    unsigned long p_offset, unsigned long p_size)
2397 {
2398         struct drm_buffer_manager *bm = &dev->bm;
2399         int ret = -EINVAL;
2400         struct drm_mem_type_manager *man;
2401
2402         if (type >= DRM_BO_MEM_TYPES) {
2403                 DRM_ERROR("Illegal memory type %d\n", type);
2404                 return ret;
2405         }
2406
2407         man = &bm->man[type];
2408         if (man->has_type) {
2409                 DRM_ERROR("Memory manager already initialized for type %d\n",
2410                           type);
2411                 return ret;
2412         }
2413
2414         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2415         if (ret)
2416                 return ret;
2417
2418         ret = 0;
2419         if (type != DRM_BO_MEM_LOCAL) {
2420                 if (!p_size) {
2421                         DRM_ERROR("Zero size memory manager type %d\n", type);
2422                         return ret;
2423                 }
2424                 ret = drm_mm_init(&man->manager, p_offset, p_size);
2425                 if (ret)
2426                         return ret;
2427         }
2428         man->has_type = 1;
2429         man->use_type = 1;
2430
2431         INIT_LIST_HEAD(&man->lru);
2432         INIT_LIST_HEAD(&man->pinned);
2433
2434         return 0;
2435 }
2436 EXPORT_SYMBOL(drm_bo_init_mm);
2437
2438 /*
2439  * This is called from lastclose, so we don't need to bother about
2440  * any clients still running when we set the initialized flag to zero.
2441  */
2442
2443 int drm_bo_driver_finish(struct drm_device * dev)
2444 {
2445         struct drm_buffer_manager *bm = &dev->bm;
2446         int ret = 0;
2447         unsigned i = DRM_BO_MEM_TYPES;
2448         struct drm_mem_type_manager *man;
2449
2450         mutex_lock(&dev->bm.init_mutex);
2451         mutex_lock(&dev->struct_mutex);
2452
2453         if (!bm->initialized)
2454                 goto out;
2455         bm->initialized = 0;
2456
2457         while (i--) {
2458                 man = &bm->man[i];
2459                 if (man->has_type) {
2460                         man->use_type = 0;
2461                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2462                                 ret = -EBUSY;
2463                                 DRM_ERROR("DRM memory manager type %d "
2464                                           "is not clean.\n", i);
2465                         }
2466                         man->has_type = 0;
2467                 }
2468         }
2469         mutex_unlock(&dev->struct_mutex);
2470
2471         if (!cancel_delayed_work(&bm->wq)) {
2472                 flush_scheduled_work();
2473         }
2474         mutex_lock(&dev->struct_mutex);
2475         drm_bo_delayed_delete(dev, 1);
2476         if (list_empty(&bm->ddestroy)) {
2477                 DRM_DEBUG("Delayed destroy list was clean\n");
2478         }
2479         if (list_empty(&bm->man[0].lru)) {
2480                 DRM_DEBUG("Swap list was clean\n");
2481         }
2482         if (list_empty(&bm->man[0].pinned)) {
2483                 DRM_DEBUG("NO_MOVE list was clean\n");
2484         }
2485         if (list_empty(&bm->unfenced)) {
2486                 DRM_DEBUG("Unfenced list was clean\n");
2487         }
2488       out:
2489         mutex_unlock(&dev->struct_mutex);
2490         mutex_unlock(&dev->bm.init_mutex);
2491         return ret;
2492 }
2493
2494 int drm_bo_driver_init(struct drm_device * dev)
2495 {
2496         struct drm_bo_driver *driver = dev->driver->bo_driver;
2497         struct drm_buffer_manager *bm = &dev->bm;
2498         int ret = -EINVAL;
2499
2500         mutex_lock(&dev->bm.init_mutex);
2501         mutex_lock(&dev->struct_mutex);
2502         if (!driver)
2503                 goto out_unlock;
2504
2505         /*
2506          * Initialize the system memory buffer type.
2507          * Other types need to be driver / IOCTL initialized.
2508          */
2509         ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
2510         if (ret)
2511                 goto out_unlock;
2512
2513 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2514         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2515 #else
2516         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2517 #endif
2518         bm->initialized = 1;
2519         bm->nice_mode = 1;
2520         atomic_set(&bm->count, 0);
2521         bm->cur_pages = 0;
2522         INIT_LIST_HEAD(&bm->unfenced);
2523         INIT_LIST_HEAD(&bm->ddestroy);
2524       out_unlock:
2525         mutex_unlock(&dev->struct_mutex);
2526         mutex_unlock(&dev->bm.init_mutex);
2527         return ret;
2528 }
2529
2530 EXPORT_SYMBOL(drm_bo_driver_init);
2531
2532 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2533 {
2534         struct drm_mm_init_arg *arg = data;
2535         struct drm_buffer_manager *bm = &dev->bm;
2536         struct drm_bo_driver *driver = dev->driver->bo_driver;
2537         int ret;
2538
2539         DRM_DEBUG("drm_mm_init_ioctl: type %d, 0x%08llx offset, %dkb\n",
2540             arg->mem_type, arg->p_offset * PAGE_SIZE, (int)(arg->p_size * 4));
2541
2542         if (!driver) {
2543                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2544                 return -EINVAL;
2545         }
2546
2547         ret = -EINVAL;
2548         if (arg->magic != DRM_BO_INIT_MAGIC) {
2549                 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2550                           "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2551                 return -EINVAL;
2552         }
2553         if (arg->major != DRM_BO_INIT_MAJOR) {
2554                 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2555                           "\tversion don't match. Got %d, expected %d,\n",
2556                           arg->major, DRM_BO_INIT_MAJOR);
2557                 return -EINVAL;
2558         }
2559         if (arg->minor > DRM_BO_INIT_MINOR) {
2560                 DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n"
2561                           "\tlibdrm buffer object interface version is %d.%d.\n"
2562                           "\tkernel DRM buffer object interface version is %d.%d\n",
2563                           arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR);
2564                 return -EINVAL;
2565         }
2566
2567         mutex_lock(&dev->bm.init_mutex);
2568         mutex_lock(&dev->struct_mutex);
2569         if (!bm->initialized) {
2570                 DRM_ERROR("DRM memory manager was not initialized.\n");
2571                 goto out;
2572         }
2573         if (arg->mem_type == 0) {
2574                 DRM_ERROR("System memory buffers already initialized.\n");
2575                 goto out;
2576         }
2577         ret = drm_bo_init_mm(dev, arg->mem_type,
2578                              arg->p_offset, arg->p_size);
2579
2580 out:
2581         mutex_unlock(&dev->struct_mutex);
2582         mutex_unlock(&dev->bm.init_mutex);
2583         if (ret)
2584                 return ret;
2585
2586         return 0;
2587 }
2588
2589 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2590 {
2591         struct drm_mm_type_arg *arg = data;
2592         struct drm_buffer_manager *bm = &dev->bm;
2593         struct drm_bo_driver *driver = dev->driver->bo_driver;
2594         int ret;
2595
2596         DRM_DEBUG("drm_mm_takedown_ioctl: %d type\n", arg->mem_type);
2597
2598         if (!driver) {
2599                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2600                 return -EINVAL;
2601         }
2602
2603         LOCK_TEST_WITH_RETURN(dev, file_priv);
2604         mutex_lock(&dev->bm.init_mutex);
2605         mutex_lock(&dev->struct_mutex);
2606         ret = -EINVAL;
2607         if (!bm->initialized) {
2608                 DRM_ERROR("DRM memory manager was not initialized\n");
2609                 goto out;
2610         }
2611         if (arg->mem_type == 0) {
2612                 DRM_ERROR("No takedown for System memory buffers.\n");
2613                 goto out;
2614         }
2615         ret = 0;
2616         if (drm_bo_clean_mm(dev, arg->mem_type)) {
2617                 DRM_ERROR("Memory manager type %d not clean. "
2618                           "Delaying takedown\n", arg->mem_type);
2619         }
2620 out:
2621         mutex_unlock(&dev->struct_mutex);
2622         mutex_unlock(&dev->bm.init_mutex);
2623         if (ret)
2624                 return ret;
2625
2626         return 0;
2627 }
2628
2629 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2630 {
2631         struct drm_mm_type_arg *arg = data;
2632         struct drm_bo_driver *driver = dev->driver->bo_driver;
2633         int ret;
2634
2635         DRM_DEBUG("drm_mm_lock_ioctl: %d type\n", arg->mem_type);
2636
2637         if (!driver) {
2638                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2639                 return -EINVAL;
2640         }
2641
2642         LOCK_TEST_WITH_RETURN(dev, file_priv);
2643         mutex_lock(&dev->bm.init_mutex);
2644         mutex_lock(&dev->struct_mutex);
2645         ret = drm_bo_lock_mm(dev, arg->mem_type);
2646         mutex_unlock(&dev->struct_mutex);
2647         mutex_unlock(&dev->bm.init_mutex);
2648         if (ret)
2649                 return ret;
2650
2651         return 0;
2652 }
2653
2654 int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2655 {
2656         struct drm_bo_driver *driver = dev->driver->bo_driver;
2657         int ret;
2658
2659         DRM_DEBUG("drm_mm_unlock_ioctl\n");
2660
2661         if (!driver) {
2662                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2663                 return -EINVAL;
2664         }
2665
2666         LOCK_TEST_WITH_RETURN(dev, file_priv);
2667         mutex_lock(&dev->bm.init_mutex);
2668         mutex_lock(&dev->struct_mutex);
2669         ret = 0;
2670
2671         mutex_unlock(&dev->struct_mutex);
2672         mutex_unlock(&dev->bm.init_mutex);
2673         if (ret)
2674                 return ret;
2675
2676         return 0;
2677 }
2678
2679 /*
2680  * buffer object vm functions.
2681  */
2682
2683 int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
2684 {
2685         struct drm_buffer_manager *bm = &dev->bm;
2686         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2687
2688         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2689                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2690                         return 0;
2691
2692                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2693                         return 0;
2694
2695                 if (mem->flags & DRM_BO_FLAG_CACHED)
2696                         return 0;
2697         }
2698         return 1;
2699 }
2700
2701 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2702
2703 /**
2704  * \c Get the PCI offset for the buffer object memory.
2705  *
2706  * \param bo The buffer object.
2707  * \param bus_base On return the base of the PCI region
2708  * \param bus_offset On return the byte offset into the PCI region
2709  * \param bus_size On return the byte size of the buffer object or zero if
2710  *     the buffer object memory is not accessible through a PCI region.
2711  * \return Failure indication.
2712  *
2713  * Returns -EINVAL if the buffer object is currently not mappable.
2714  * Otherwise returns zero.
2715  */
2716
2717 int drm_bo_pci_offset(struct drm_device *dev,
2718                       struct drm_bo_mem_reg *mem,
2719                       unsigned long *bus_base,
2720                       unsigned long *bus_offset, unsigned long *bus_size)
2721 {
2722         struct drm_buffer_manager *bm = &dev->bm;
2723         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2724
2725         *bus_size = 0;
2726         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2727                 return -EINVAL;
2728
2729         if (drm_mem_reg_is_pci(dev, mem)) {
2730                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2731                 *bus_size = mem->num_pages << PAGE_SHIFT;
2732                 *bus_base = man->io_offset;
2733         }
2734
2735         return 0;
2736 }
2737
2738 /**
2739  * \c Kill all user-space virtual mappings of this buffer object.
2740  *
2741  * \param bo The buffer object.
2742  *
2743  * Call bo->mutex locked.
2744  */
2745
2746 void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
2747 {
2748         struct drm_device *dev = bo->dev;
2749         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2750         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2751
2752         if (!dev->dev_mapping)
2753                 return;
2754
2755         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2756 }
2757
2758 static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
2759 {
2760         struct drm_map_list *list = &bo->map_list;
2761         drm_local_map_t *map;
2762         struct drm_device *dev = bo->dev;
2763
2764         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2765         if (list->user_token) {
2766                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2767                 list->user_token = 0;
2768         }
2769         if (list->file_offset_node) {
2770                 drm_mm_put_block(list->file_offset_node);
2771                 list->file_offset_node = NULL;
2772         }
2773
2774         map = list->map;
2775         if (!map)
2776                 return;
2777
2778         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2779         list->map = NULL;
2780         list->user_token = 0ULL;
2781         drm_bo_usage_deref_locked(&bo);
2782 }
2783
2784 static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
2785 {
2786         struct drm_map_list *list = &bo->map_list;
2787         drm_local_map_t *map;
2788         struct drm_device *dev = bo->dev;
2789
2790         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2791         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2792         if (!list->map)
2793                 return -ENOMEM;
2794
2795         map = list->map;
2796         map->offset = 0;
2797         map->type = _DRM_TTM;
2798         map->flags = _DRM_REMOVABLE;
2799         map->size = bo->mem.num_pages * PAGE_SIZE;
2800         atomic_inc(&bo->usage);
2801         map->handle = (void *)bo;
2802
2803         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2804                                                     bo->mem.num_pages, 0, 0);
2805
2806         if (!list->file_offset_node) {
2807                 drm_bo_takedown_vm_locked(bo);
2808                 return -ENOMEM;
2809         }
2810
2811         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2812                                                   bo->mem.num_pages, 0);
2813
2814         list->hash.key = list->file_offset_node->start;
2815         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2816                 drm_bo_takedown_vm_locked(bo);
2817                 return -ENOMEM;
2818         }
2819
2820         list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2821
2822         return 0;
2823 }