LUT updates
[platform/upstream/libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads,
40  * Hash tables and hash heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those,
44  * we need both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
47  * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
48  * the list traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
54 static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
55
56 static inline uint64_t drm_bo_type_flags(unsigned type)
57 {
58         return (1ULL << (24 + type));
59 }
60
61 /*
62  * bo locked. dev->struct_mutex locked.
63  */
64
65 void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
66 {
67         struct drm_mem_type_manager *man;
68
69         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
70         DRM_ASSERT_LOCKED(&bo->mutex);
71
72         man = &bo->dev->bm.man[bo->pinned_mem_type];
73         list_add_tail(&bo->pinned_lru, &man->pinned);
74 }
75
76 void drm_bo_add_to_lru(struct drm_buffer_object *bo)
77 {
78         struct drm_mem_type_manager *man;
79
80         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
81
82         if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
83             || bo->mem.mem_type != bo->pinned_mem_type) {
84                 man = &bo->dev->bm.man[bo->mem.mem_type];
85                 list_add_tail(&bo->lru, &man->lru);
86         } else {
87                 INIT_LIST_HEAD(&bo->lru);
88         }
89 }
90
91 static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
92 {
93 #ifdef DRM_ODD_MM_COMPAT
94         int ret;
95
96         if (!bo->map_list.map)
97                 return 0;
98
99         ret = drm_bo_lock_kmm(bo);
100         if (ret)
101                 return ret;
102         drm_bo_unmap_virtual(bo);
103         if (old_is_pci)
104                 drm_bo_finish_unmap(bo);
105 #else
106         if (!bo->map_list.map)
107                 return 0;
108
109         drm_bo_unmap_virtual(bo);
110 #endif
111         return 0;
112 }
113
114 static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
115 {
116 #ifdef DRM_ODD_MM_COMPAT
117         int ret;
118
119         if (!bo->map_list.map)
120                 return;
121
122         ret = drm_bo_remap_bound(bo);
123         if (ret) {
124                 DRM_ERROR("Failed to remap a bound buffer object.\n"
125                           "\tThis might cause a sigbus later.\n");
126         }
127         drm_bo_unlock_kmm(bo);
128 #endif
129 }
130
131 /*
132  * Call bo->mutex locked.
133  */
134
135 static int drm_bo_add_ttm(struct drm_buffer_object *bo)
136 {
137         struct drm_device *dev = bo->dev;
138         int ret = 0;
139         uint32_t page_flags = 0;
140
141         DRM_ASSERT_LOCKED(&bo->mutex);
142         bo->ttm = NULL;
143
144         if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
145                 page_flags |= DRM_TTM_PAGE_WRITE;
146
147         switch (bo->type) {
148         case drm_bo_type_device:
149         case drm_bo_type_kernel:
150                 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, 
151                                          page_flags, dev->bm.dummy_read_page);
152                 if (!bo->ttm)
153                         ret = -ENOMEM;
154                 break;
155         case drm_bo_type_user:
156                 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
157                                          page_flags | DRM_TTM_PAGE_USER,
158                                          dev->bm.dummy_read_page);
159                 if (!bo->ttm)
160                         ret = -ENOMEM;
161
162                 ret = drm_ttm_set_user(bo->ttm, current,
163                                        bo->buffer_start,
164                                        bo->num_pages);
165                 if (ret)
166                         return ret;
167
168                 break;
169         default:
170                 DRM_ERROR("Illegal buffer object type\n");
171                 ret = -EINVAL;
172                 break;
173         }
174
175         return ret;
176 }
177
178 static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
179                                   struct drm_bo_mem_reg *mem,
180                                   int evict, int no_wait)
181 {
182         struct drm_device *dev = bo->dev;
183         struct drm_buffer_manager *bm = &dev->bm;
184         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
185         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
186         struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
187         struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
188         int ret = 0;
189
190         if (old_is_pci || new_is_pci ||
191             ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
192                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
193         if (ret)
194                 return ret;
195
196         /*
197          * Create and bind a ttm if required.
198          */
199
200         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
201                 ret = drm_bo_add_ttm(bo);
202                 if (ret)
203                         goto out_err;
204
205                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
206                         ret = drm_ttm_bind(bo->ttm, mem);
207                         if (ret)
208                                 goto out_err;
209                 }
210
211                 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
212                         
213                         struct drm_bo_mem_reg *old_mem = &bo->mem;
214                         uint64_t save_flags = old_mem->flags;
215                         uint64_t save_proposed_flags = old_mem->proposed_flags;
216                         
217                         *old_mem = *mem;
218                         mem->mm_node = NULL;
219                         old_mem->proposed_flags = save_proposed_flags;
220                         DRM_FLAG_MASKED(save_flags, mem->flags,
221                                         DRM_BO_MASK_MEMTYPE);
222                         goto moved;
223                 }
224                 
225         }
226
227         if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
228             !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED))                
229                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
230         else if (dev->driver->bo_driver->move) 
231                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
232         else
233                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
234
235         if (ret)
236                 goto out_err;
237
238 moved:
239         if (old_is_pci || new_is_pci)
240                 drm_bo_vm_post_move(bo);
241
242         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
243                 ret =
244                     dev->driver->bo_driver->invalidate_caches(dev,
245                                                               bo->mem.flags);
246                 if (ret)
247                         DRM_ERROR("Can not flush read caches\n");
248         }
249
250         DRM_FLAG_MASKED(bo->priv_flags,
251                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
252                         _DRM_BO_FLAG_EVICTED);
253
254         if (bo->mem.mm_node)
255                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
256                         bm->man[bo->mem.mem_type].gpu_offset;
257
258
259         return 0;
260
261 out_err:
262         if (old_is_pci || new_is_pci)
263                 drm_bo_vm_post_move(bo);
264
265         new_man = &bm->man[bo->mem.mem_type];
266         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
267                 drm_ttm_unbind(bo->ttm);
268                 drm_ttm_destroy(bo->ttm);
269                 bo->ttm = NULL;
270         }
271
272         return ret;
273 }
274
275 /*
276  * Call bo->mutex locked.
277  * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
278  */
279
280 static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced)
281 {
282         struct drm_fence_object *fence = bo->fence;
283
284         if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
285                 return -EBUSY;
286
287         if (fence) {
288                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
289                         drm_fence_usage_deref_unlocked(&bo->fence);
290                         return 0;
291                 }
292                 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
293                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
294                         drm_fence_usage_deref_unlocked(&bo->fence);
295                         return 0;
296                 }
297                 return -EBUSY;
298         }
299         return 0;
300 }
301
302 static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
303 {
304         int ret;
305
306         mutex_lock(&bo->mutex);
307         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
308         mutex_unlock(&bo->mutex);
309         return ret;
310 }
311
312
313 /*
314  * Call bo->mutex locked.
315  * Wait until the buffer is idle.
316  */
317
318 int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
319                 int no_wait, int check_unfenced)
320 {
321         int ret;
322
323         DRM_ASSERT_LOCKED(&bo->mutex);
324         while(unlikely(drm_bo_busy(bo, check_unfenced))) {
325                 if (no_wait)
326                         return -EBUSY;
327
328                 if (check_unfenced &&  (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) {
329                         mutex_unlock(&bo->mutex);
330                         wait_event(bo->event_queue, !drm_bo_check_unfenced(bo));
331                         mutex_lock(&bo->mutex);
332                         bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
333                 }
334
335                 if (bo->fence) {
336                         struct drm_fence_object *fence;
337                         uint32_t fence_type = bo->fence_type;
338
339                         drm_fence_reference_unlocked(&fence, bo->fence);
340                         mutex_unlock(&bo->mutex);
341
342                         ret = drm_fence_object_wait(fence, lazy, !interruptible,
343                                                     fence_type);
344
345                         drm_fence_usage_deref_unlocked(&fence);
346                         mutex_lock(&bo->mutex);
347                         bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
348                         if (ret)
349                                 return ret;
350                 }
351
352         }
353         return 0;
354 }
355 EXPORT_SYMBOL(drm_bo_wait);
356
357 static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
358 {
359         struct drm_device *dev = bo->dev;
360         struct drm_buffer_manager *bm = &dev->bm;
361
362         if (bo->fence) {
363                 if (bm->nice_mode) {
364                         unsigned long _end = jiffies + 3 * DRM_HZ;
365                         int ret;
366                         do {
367                                 ret = drm_bo_wait(bo, 0, 0, 0, 0);
368                                 if (ret && allow_errors)
369                                         return ret;
370
371                         } while (ret && !time_after_eq(jiffies, _end));
372
373                         if (bo->fence) {
374                                 bm->nice_mode = 0;
375                                 DRM_ERROR("Detected GPU lockup or "
376                                           "fence driver was taken down. "
377                                           "Evicting buffer.\n");
378                         }
379                 }
380                 if (bo->fence)
381                         drm_fence_usage_deref_unlocked(&bo->fence);
382         }
383         return 0;
384 }
385
386 /*
387  * Call dev->struct_mutex locked.
388  * Attempts to remove all private references to a buffer by expiring its
389  * fence object and removing from lru lists and memory managers.
390  */
391
392 static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
393 {
394         struct drm_device *dev = bo->dev;
395         struct drm_buffer_manager *bm = &dev->bm;
396
397         DRM_ASSERT_LOCKED(&dev->struct_mutex);
398
399         atomic_inc(&bo->usage);
400         mutex_unlock(&dev->struct_mutex);
401         mutex_lock(&bo->mutex);
402
403         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
404
405         if (bo->fence && drm_fence_object_signaled(bo->fence,
406                                                    bo->fence_type))
407                 drm_fence_usage_deref_unlocked(&bo->fence);
408
409         if (bo->fence && remove_all)
410                 (void)drm_bo_expire_fence(bo, 0);
411
412         mutex_lock(&dev->struct_mutex);
413
414         if (!atomic_dec_and_test(&bo->usage))
415                 goto out;
416
417         if (!bo->fence) {
418                 list_del_init(&bo->lru);
419                 if (bo->mem.mm_node) {
420                         drm_memrange_put_block(bo->mem.mm_node);
421                         if (bo->pinned_node == bo->mem.mm_node)
422                                 bo->pinned_node = NULL;
423                         bo->mem.mm_node = NULL;
424                 }
425                 list_del_init(&bo->pinned_lru);
426                 if (bo->pinned_node) {
427                         drm_memrange_put_block(bo->pinned_node);
428                         bo->pinned_node = NULL;
429                 }
430                 list_del_init(&bo->ddestroy);
431                 mutex_unlock(&bo->mutex);
432                 drm_bo_destroy_locked(bo);
433                 return;
434         }
435
436         if (list_empty(&bo->ddestroy)) {
437                 drm_fence_object_flush(bo->fence, bo->fence_type);
438                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
439                 schedule_delayed_work(&bm->wq,
440                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
441         }
442
443 out:
444         mutex_unlock(&bo->mutex);
445         return;
446 }
447
448 /*
449  * Verify that refcount is 0 and that there are no internal references
450  * to the buffer object. Then destroy it.
451  */
452
453 static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
454 {
455         struct drm_device *dev = bo->dev;
456         struct drm_buffer_manager *bm = &dev->bm;
457
458         DRM_ASSERT_LOCKED(&dev->struct_mutex);
459
460         DRM_DEBUG("freeing %p\n", bo);
461         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
462             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
463             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
464                 if (bo->fence != NULL) {
465                         DRM_ERROR("Fence was non-zero.\n");
466                         drm_bo_cleanup_refs(bo, 0);
467                         return;
468                 }
469
470 #ifdef DRM_ODD_MM_COMPAT
471                 BUG_ON(!list_empty(&bo->vma_list));
472                 BUG_ON(!list_empty(&bo->p_mm_list));
473 #endif
474
475                 if (bo->ttm) {
476                         drm_ttm_unbind(bo->ttm);
477                         drm_ttm_destroy(bo->ttm);
478                         bo->ttm = NULL;
479                 }
480
481                 atomic_dec(&bm->count);
482
483                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
484
485                 return;
486         }
487
488         /*
489          * Some stuff is still trying to reference the buffer object.
490          * Get rid of those references.
491          */
492
493         drm_bo_cleanup_refs(bo, 0);
494
495         return;
496 }
497
498 /*
499  * Call dev->struct_mutex locked.
500  */
501
502 static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
503 {
504         struct drm_buffer_manager *bm = &dev->bm;
505
506         struct drm_buffer_object *entry, *nentry;
507         struct list_head *list, *next;
508
509         list_for_each_safe(list, next, &bm->ddestroy) {
510                 entry = list_entry(list, struct drm_buffer_object, ddestroy);
511
512                 nentry = NULL;
513                 if (next != &bm->ddestroy) {
514                         nentry = list_entry(next, struct drm_buffer_object,
515                                             ddestroy);
516                         atomic_inc(&nentry->usage);
517                 }
518
519                 drm_bo_cleanup_refs(entry, remove_all);
520
521                 if (nentry)
522                         atomic_dec(&nentry->usage);
523         }
524 }
525
526 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
527 static void drm_bo_delayed_workqueue(void *data)
528 #else
529 static void drm_bo_delayed_workqueue(struct work_struct *work)
530 #endif
531 {
532 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
533         struct drm_device *dev = (struct drm_device *) data;
534         struct drm_buffer_manager *bm = &dev->bm;
535 #else
536         struct drm_buffer_manager *bm =
537             container_of(work, struct drm_buffer_manager, wq.work);
538         struct drm_device *dev = container_of(bm, struct drm_device, bm);
539 #endif
540
541         DRM_DEBUG("Delayed delete Worker\n");
542
543         mutex_lock(&dev->struct_mutex);
544         if (!bm->initialized) {
545                 mutex_unlock(&dev->struct_mutex);
546                 return;
547         }
548         drm_bo_delayed_delete(dev, 0);
549         if (bm->initialized && !list_empty(&bm->ddestroy)) {
550                 schedule_delayed_work(&bm->wq,
551                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
552         }
553         mutex_unlock(&dev->struct_mutex);
554 }
555
556 void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
557 {
558         struct drm_buffer_object *tmp_bo = *bo;
559         bo = NULL;
560
561         DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
562
563         if (atomic_dec_and_test(&tmp_bo->usage))
564                 drm_bo_destroy_locked(tmp_bo);
565 }
566 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
567
568 void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
569 {
570         struct drm_buffer_object *tmp_bo = *bo;
571         struct drm_device *dev = tmp_bo->dev;
572
573         *bo = NULL;
574         if (atomic_dec_and_test(&tmp_bo->usage)) {
575                 mutex_lock(&dev->struct_mutex);
576                 if (atomic_read(&tmp_bo->usage) == 0)
577                         drm_bo_destroy_locked(tmp_bo);
578                 mutex_unlock(&dev->struct_mutex);
579         }
580 }
581 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
582
583 void drm_putback_buffer_objects(struct drm_device *dev)
584 {
585         struct drm_buffer_manager *bm = &dev->bm;
586         struct list_head *list = &bm->unfenced;
587         struct drm_buffer_object *entry, *next;
588
589         mutex_lock(&dev->struct_mutex);
590         list_for_each_entry_safe(entry, next, list, lru) {
591                 atomic_inc(&entry->usage);
592                 mutex_unlock(&dev->struct_mutex);
593
594                 mutex_lock(&entry->mutex);
595                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
596                 mutex_lock(&dev->struct_mutex);
597
598                 list_del_init(&entry->lru);
599                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
600                 wake_up_all(&entry->event_queue);
601
602                 /*
603                  * FIXME: Might want to put back on head of list
604                  * instead of tail here.
605                  */
606
607                 drm_bo_add_to_lru(entry);
608                 mutex_unlock(&entry->mutex);
609                 drm_bo_usage_deref_locked(&entry);
610         }
611         mutex_unlock(&dev->struct_mutex);
612 }
613 EXPORT_SYMBOL(drm_putback_buffer_objects);
614
615 /*
616  * Note. The caller has to register (if applicable)
617  * and deregister fence object usage.
618  */
619
620 int drm_fence_buffer_objects(struct drm_device *dev,
621                              struct list_head *list,
622                              uint32_t fence_flags,
623                              struct drm_fence_object *fence,
624                              struct drm_fence_object **used_fence)
625 {
626         struct drm_buffer_manager *bm = &dev->bm;
627         struct drm_buffer_object *entry;
628         uint32_t fence_type = 0;
629         uint32_t fence_class = ~0;
630         int count = 0;
631         int ret = 0;
632         struct list_head *l;
633
634         mutex_lock(&dev->struct_mutex);
635
636         if (!list)
637                 list = &bm->unfenced;
638
639         if (fence)
640                 fence_class = fence->fence_class;
641
642         list_for_each_entry(entry, list, lru) {
643                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
644                 fence_type |= entry->new_fence_type;
645                 if (fence_class == ~0)
646                         fence_class = entry->new_fence_class;
647                 else if (entry->new_fence_class != fence_class) {
648                         DRM_ERROR("Unmatching fence classes on unfenced list: "
649                                   "%d and %d.\n",
650                                   fence_class,
651                                   entry->new_fence_class);
652                         ret = -EINVAL;
653                         goto out;
654                 }
655                 count++;
656         }
657
658         if (!count) {
659                 ret = -EINVAL;
660                 goto out;
661         }
662
663         if (fence) {
664                 if ((fence_type & fence->type) != fence_type ||
665                     (fence->fence_class != fence_class)) {
666                         DRM_ERROR("Given fence doesn't match buffers "
667                                   "on unfenced list.\n");
668                         ret = -EINVAL;
669                         goto out;
670                 }
671         } else {
672                 mutex_unlock(&dev->struct_mutex);
673                 ret = drm_fence_object_create(dev, fence_class, fence_type,
674                                               fence_flags | DRM_FENCE_FLAG_EMIT,
675                                               &fence);
676                 mutex_lock(&dev->struct_mutex);
677                 if (ret)
678                         goto out;
679         }
680
681         count = 0;
682         l = list->next;
683         while (l != list) {
684                 prefetch(l->next);
685                 entry = list_entry(l, struct drm_buffer_object, lru);
686                 atomic_inc(&entry->usage);
687                 mutex_unlock(&dev->struct_mutex);
688                 mutex_lock(&entry->mutex);
689                 mutex_lock(&dev->struct_mutex);
690                 list_del_init(l);
691                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
692                         count++;
693                         if (entry->fence)
694                                 drm_fence_usage_deref_locked(&entry->fence);
695                         entry->fence = drm_fence_reference_locked(fence);
696                         entry->fence_class = entry->new_fence_class;
697                         entry->fence_type = entry->new_fence_type;
698                         DRM_FLAG_MASKED(entry->priv_flags, 0,
699                                         _DRM_BO_FLAG_UNFENCED);
700                         wake_up_all(&entry->event_queue);
701                         drm_bo_add_to_lru(entry);
702                 }
703                 mutex_unlock(&entry->mutex);
704                 drm_bo_usage_deref_locked(&entry);
705                 l = list->next;
706         }
707         DRM_DEBUG("Fenced %d buffers\n", count);
708 out:
709         mutex_unlock(&dev->struct_mutex);
710         *used_fence = fence;
711         return ret;
712 }
713 EXPORT_SYMBOL(drm_fence_buffer_objects);
714
715 /*
716  * bo->mutex locked
717  */
718
719 static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
720                         int no_wait)
721 {
722         int ret = 0;
723         struct drm_device *dev = bo->dev;
724         struct drm_bo_mem_reg evict_mem;
725
726         /*
727          * Someone might have modified the buffer before we took the
728          * buffer mutex.
729          */
730
731         do {
732                 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
733
734                 if (unlikely(bo->mem.flags &
735                              (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)))
736                         goto out_unlock;
737                 if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
738                         goto out_unlock;
739                 if (unlikely(bo->mem.mem_type != mem_type))
740                         goto out_unlock;
741                 ret = drm_bo_wait(bo, 0, 1, no_wait, 0);
742                 if (ret)
743                         goto out_unlock;
744
745         } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
746
747         evict_mem = bo->mem;
748         evict_mem.mm_node = NULL;
749
750         evict_mem = bo->mem;
751         evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
752
753         mutex_lock(&dev->struct_mutex);
754         list_del_init(&bo->lru);
755         mutex_unlock(&dev->struct_mutex);
756
757         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
758
759         if (ret) {
760                 if (ret != -EAGAIN)
761                         DRM_ERROR("Failed to find memory space for "
762                                   "buffer 0x%p eviction.\n", bo);
763                 goto out;
764         }
765
766         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
767
768         if (ret) {
769                 if (ret != -EAGAIN)
770                         DRM_ERROR("Buffer eviction failed\n");
771                 goto out;
772         }
773
774         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
775                         _DRM_BO_FLAG_EVICTED);
776
777 out:
778         mutex_lock(&dev->struct_mutex);
779         if (evict_mem.mm_node) {
780                 if (evict_mem.mm_node != bo->pinned_node)
781                         drm_memrange_put_block(evict_mem.mm_node);
782                 evict_mem.mm_node = NULL;
783         }
784         drm_bo_add_to_lru(bo);
785         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
786 out_unlock:
787         mutex_unlock(&dev->struct_mutex);
788
789         return ret;
790 }
791
792 /**
793  * Repeatedly evict memory from the LRU for @mem_type until we create enough
794  * space, or we've evicted everything and there isn't enough space.
795  */
796 static int drm_bo_mem_force_space(struct drm_device *dev,
797                                   struct drm_bo_mem_reg *mem,
798                                   uint32_t mem_type, int no_wait)
799 {
800         struct drm_memrange_node *node;
801         struct drm_buffer_manager *bm = &dev->bm;
802         struct drm_buffer_object *entry;
803         struct drm_mem_type_manager *man = &bm->man[mem_type];
804         struct list_head *lru;
805         unsigned long num_pages = mem->num_pages;
806         int ret;
807
808         mutex_lock(&dev->struct_mutex);
809         do {
810                 node = drm_memrange_search_free(&man->manager, num_pages,
811                                           mem->page_alignment, 1);
812                 if (node)
813                         break;
814
815                 lru = &man->lru;
816                 if (lru->next == lru)
817                         break;
818
819                 entry = list_entry(lru->next, struct drm_buffer_object, lru);
820                 atomic_inc(&entry->usage);
821                 mutex_unlock(&dev->struct_mutex);
822                 mutex_lock(&entry->mutex);
823                 ret = drm_bo_evict(entry, mem_type, no_wait);
824                 mutex_unlock(&entry->mutex);
825                 drm_bo_usage_deref_unlocked(&entry);
826                 if (ret)
827                         return ret;
828                 mutex_lock(&dev->struct_mutex);
829         } while (1);
830
831         if (!node) {
832                 mutex_unlock(&dev->struct_mutex);
833                 return -ENOMEM;
834         }
835
836         node = drm_memrange_get_block(node, num_pages, mem->page_alignment);
837         if (unlikely(!node)) {
838                 mutex_unlock(&dev->struct_mutex);
839                 return -ENOMEM;
840         }
841
842         mutex_unlock(&dev->struct_mutex);
843         mem->mm_node = node;
844         mem->mem_type = mem_type;
845         return 0;
846 }
847
848 static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
849                                 int disallow_fixed,
850                                 uint32_t mem_type,
851                                 uint64_t mask, uint32_t *res_mask)
852 {
853         uint64_t cur_flags = drm_bo_type_flags(mem_type);
854         uint64_t flag_diff;
855
856         if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
857                 return 0;
858         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
859                 cur_flags |= DRM_BO_FLAG_CACHED;
860         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
861                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
862         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
863                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
864
865         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
866                 return 0;
867
868         if (mem_type == DRM_BO_MEM_LOCAL) {
869                 *res_mask = cur_flags;
870                 return 1;
871         }
872
873         flag_diff = (mask ^ cur_flags);
874         if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
875                 cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
876
877         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
878             (!(mask & DRM_BO_FLAG_CACHED) ||
879              (mask & DRM_BO_FLAG_FORCE_CACHING)))
880                 return 0;
881
882         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
883             ((mask & DRM_BO_FLAG_MAPPABLE) ||
884              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
885                 return 0;
886
887         *res_mask = cur_flags;
888         return 1;
889 }
890
891 /**
892  * Creates space for memory region @mem according to its type.
893  *
894  * This function first searches for free space in compatible memory types in
895  * the priority order defined by the driver.  If free space isn't found, then
896  * drm_bo_mem_force_space is attempted in priority order to evict and find
897  * space.
898  */
899 int drm_bo_mem_space(struct drm_buffer_object *bo,
900                      struct drm_bo_mem_reg *mem, int no_wait)
901 {
902         struct drm_device *dev = bo->dev;
903         struct drm_buffer_manager *bm = &dev->bm;
904         struct drm_mem_type_manager *man;
905
906         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
907         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
908         uint32_t i;
909         uint32_t mem_type = DRM_BO_MEM_LOCAL;
910         uint32_t cur_flags;
911         int type_found = 0;
912         int type_ok = 0;
913         int has_eagain = 0;
914         struct drm_memrange_node *node = NULL;
915         int ret;
916
917         mem->mm_node = NULL;
918         for (i = 0; i < num_prios; ++i) {
919                 mem_type = prios[i];
920                 man = &bm->man[mem_type];
921
922                 type_ok = drm_bo_mt_compatible(man,
923                                                bo->type == drm_bo_type_user,
924                                                mem_type, mem->proposed_flags,
925                                                &cur_flags);
926
927                 if (!type_ok)
928                         continue;
929
930                 if (mem_type == DRM_BO_MEM_LOCAL)
931                         break;
932
933                 if ((mem_type == bo->pinned_mem_type) &&
934                     (bo->pinned_node != NULL)) {
935                         node = bo->pinned_node;
936                         break;
937                 }
938
939                 mutex_lock(&dev->struct_mutex);
940                 if (man->has_type && man->use_type) {
941                         type_found = 1;
942                         node = drm_memrange_search_free(&man->manager, mem->num_pages,
943                                                   mem->page_alignment, 1);
944                         if (node)
945                                 node = drm_memrange_get_block(node, mem->num_pages,
946                                                         mem->page_alignment);
947                 }
948                 mutex_unlock(&dev->struct_mutex);
949                 if (node)
950                         break;
951         }
952
953         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
954                 mem->mm_node = node;
955                 mem->mem_type = mem_type;
956                 mem->flags = cur_flags;
957                 return 0;
958         }
959
960         if (!type_found)
961                 return -EINVAL;
962
963         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
964         prios = dev->driver->bo_driver->mem_busy_prio;
965
966         for (i = 0; i < num_prios; ++i) {
967                 mem_type = prios[i];
968                 man = &bm->man[mem_type];
969
970                 if (!man->has_type)
971                         continue;
972
973                 if (!drm_bo_mt_compatible(man,
974                                           bo->type == drm_bo_type_user,
975                                           mem_type,
976                                           mem->proposed_flags,
977                                           &cur_flags))
978                         continue;
979
980                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
981
982                 if (ret == 0 && mem->mm_node) {
983                         mem->flags = cur_flags;
984                         return 0;
985                 }
986
987                 if (ret == -EAGAIN)
988                         has_eagain = 1;
989         }
990
991         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
992         return ret;
993 }
994 EXPORT_SYMBOL(drm_bo_mem_space);
995
996 /*
997  * drm_bo_propose_flags:
998  *
999  * @bo: the buffer object getting new flags
1000  *
1001  * @new_flags: the new set of proposed flag bits
1002  *
1003  * @new_mask: the mask of bits changed in new_flags
1004  *
1005  * Modify the proposed_flag bits in @bo
1006  */
1007 static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
1008                                          uint64_t new_flags, uint64_t new_mask)
1009 {
1010         uint32_t new_access;
1011
1012         /* Copy unchanging bits from existing proposed_flags */
1013         DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
1014          
1015         if (bo->type == drm_bo_type_user &&
1016             ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
1017              (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
1018                 DRM_ERROR("User buffers require cache-coherent memory.\n");
1019                 return -EINVAL;
1020         }
1021
1022         if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
1023                 DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
1024                 return -EPERM;
1025         }
1026
1027         if (likely(new_mask & DRM_BO_MASK_MEM) &&
1028             (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
1029             !DRM_SUSER(DRM_CURPROC)) {
1030                 if (likely(bo->mem.flags & new_flags & new_mask &
1031                            DRM_BO_MASK_MEM))
1032                         new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
1033                                 (bo->mem.flags & DRM_BO_MASK_MEM);
1034                 else {
1035                         DRM_ERROR("Incompatible memory type specification "
1036                                   "for NO_EVICT buffer.\n");
1037                         return -EPERM;
1038                 }
1039         }
1040
1041         if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
1042                 DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
1043                 return -EPERM;
1044         }
1045
1046         new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
1047                                   DRM_BO_FLAG_READ);
1048
1049         if (new_access == 0) {
1050                 DRM_ERROR("Invalid buffer object rwx properties\n");
1051                 return -EINVAL;
1052         }
1053
1054         bo->mem.proposed_flags = new_flags;
1055         return 0;
1056 }
1057
1058 /*
1059  * Call bo->mutex locked.
1060  * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
1061  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
1062  */
1063
1064 int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
1065 {
1066         struct drm_fence_object *fence = bo->fence;
1067
1068         if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1069                 return -EBUSY;
1070
1071         if (fence) {
1072                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1073                         drm_fence_usage_deref_unlocked(&bo->fence);
1074                         return 0;
1075                 }
1076                 return -EBUSY;
1077         }
1078         return 0;
1079 }
1080
1081 int drm_bo_evict_cached(struct drm_buffer_object *bo)
1082 {
1083         int ret = 0;
1084
1085         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1086         if (bo->mem.mm_node)
1087                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1088         return ret;
1089 }
1090
1091 EXPORT_SYMBOL(drm_bo_evict_cached);
1092 /*
1093  * Wait until a buffer is unmapped.
1094  */
1095
1096 static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
1097 {
1098         int ret = 0;
1099
1100         if (likely(atomic_read(&bo->mapped)) == 0)
1101                 return 0;
1102
1103         if (unlikely(no_wait))
1104                 return -EBUSY;
1105
1106         do {
1107                 mutex_unlock(&bo->mutex);
1108                 ret = wait_event_interruptible(bo->event_queue,
1109                                                atomic_read(&bo->mapped) == 0);
1110                 mutex_lock(&bo->mutex);
1111                 bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
1112
1113                 if (ret == -ERESTARTSYS)
1114                         ret = -EAGAIN;
1115         } while((ret == 0) && atomic_read(&bo->mapped) > 0);
1116
1117         return ret;
1118 }
1119
1120 /*
1121  * bo->mutex locked.
1122  * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
1123  */
1124
1125 int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
1126                        int no_wait, int move_unfenced)
1127 {
1128         struct drm_device *dev = bo->dev;
1129         struct drm_buffer_manager *bm = &dev->bm;
1130         int ret = 0;
1131         struct drm_bo_mem_reg mem;
1132
1133         BUG_ON(bo->fence != NULL);
1134
1135         mem.num_pages = bo->num_pages;
1136         mem.size = mem.num_pages << PAGE_SHIFT;
1137         mem.proposed_flags = new_mem_flags;
1138         mem.page_alignment = bo->mem.page_alignment;
1139
1140         mutex_lock(&bm->evict_mutex);
1141         mutex_lock(&dev->struct_mutex);
1142         list_del_init(&bo->lru);
1143         mutex_unlock(&dev->struct_mutex);
1144
1145         /*
1146          * Determine where to move the buffer.
1147          */
1148         ret = drm_bo_mem_space(bo, &mem, no_wait);
1149         if (ret)
1150                 goto out_unlock;
1151
1152         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1153
1154 out_unlock:
1155         mutex_lock(&dev->struct_mutex);
1156         if (ret || !move_unfenced) {
1157                 if (mem.mm_node) {
1158                         if (mem.mm_node != bo->pinned_node)
1159                                 drm_memrange_put_block(mem.mm_node);
1160                         mem.mm_node = NULL;
1161                 }
1162                 drm_bo_add_to_lru(bo);
1163                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1164                         wake_up_all(&bo->event_queue);
1165                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1166                                         _DRM_BO_FLAG_UNFENCED);
1167                 }
1168         } else {
1169                 list_add_tail(&bo->lru, &bm->unfenced);
1170                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1171                                 _DRM_BO_FLAG_UNFENCED);
1172         }
1173         mutex_unlock(&dev->struct_mutex);
1174         mutex_unlock(&bm->evict_mutex);
1175         return ret;
1176 }
1177
1178 static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
1179 {
1180         uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
1181
1182         if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
1183                 return 0;
1184         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1185             (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
1186              (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
1187                 return 0;
1188
1189         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1190             ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
1191              (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
1192                 return 0;
1193         return 1;
1194 }
1195
1196 /**
1197  * drm_buffer_object_validate:
1198  *
1199  * @bo: the buffer object to modify
1200  *
1201  * @fence_class: the new fence class covering this buffer
1202  *
1203  * @move_unfenced: a boolean indicating whether switching the
1204  * memory space of this buffer should cause the buffer to
1205  * be placed on the unfenced list.
1206  *
1207  * @no_wait: whether this function should return -EBUSY instead
1208  * of waiting.
1209  *
1210  * Change buffer access parameters. This can involve moving
1211  * the buffer to the correct memory type, pinning the buffer
1212  * or changing the class/type of fence covering this buffer
1213  *
1214  * Must be called with bo locked.
1215  */
1216
1217 static int drm_buffer_object_validate(struct drm_buffer_object *bo,
1218                                       uint32_t fence_class,
1219                                       int move_unfenced, int no_wait,
1220                                       int move_buffer)
1221 {
1222         struct drm_device *dev = bo->dev;
1223         struct drm_buffer_manager *bm = &dev->bm;
1224         int ret;
1225
1226         if (move_buffer) {
1227                 ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
1228                                          move_unfenced);
1229                 if (ret) {
1230                         if (ret != -EAGAIN)
1231                                 DRM_ERROR("Failed moving buffer.\n");
1232                         if (ret == -ENOMEM)
1233                                 DRM_ERROR("Out of aperture space or "
1234                                           "DRM memory quota.\n");
1235                         return ret;
1236                 }
1237         }
1238
1239         /*
1240          * Pinned buffers.
1241          */
1242
1243         if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1244                 bo->pinned_mem_type = bo->mem.mem_type;
1245                 mutex_lock(&dev->struct_mutex);
1246                 list_del_init(&bo->pinned_lru);
1247                 drm_bo_add_to_pinned_lru(bo);
1248
1249                 if (bo->pinned_node != bo->mem.mm_node) {
1250                         if (bo->pinned_node != NULL)
1251                                 drm_memrange_put_block(bo->pinned_node);
1252                         bo->pinned_node = bo->mem.mm_node;
1253                 }
1254
1255                 mutex_unlock(&dev->struct_mutex);
1256
1257         } else if (bo->pinned_node != NULL) {
1258
1259                 mutex_lock(&dev->struct_mutex);
1260
1261                 if (bo->pinned_node != bo->mem.mm_node)
1262                         drm_memrange_put_block(bo->pinned_node);
1263
1264                 list_del_init(&bo->pinned_lru);
1265                 bo->pinned_node = NULL;
1266                 mutex_unlock(&dev->struct_mutex);
1267
1268         }
1269
1270         /*
1271          * We might need to add a TTM.
1272          */
1273
1274         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1275                 ret = drm_bo_add_ttm(bo);
1276                 if (ret)
1277                         return ret;
1278         }
1279         /*
1280          * Validation has succeeded, move the access and other
1281          * non-mapping-related flag bits from the proposed flags to
1282          * the active flags
1283          */
1284
1285         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
1286
1287         /*
1288          * Finally, adjust lru to be sure.
1289          */
1290
1291         mutex_lock(&dev->struct_mutex);
1292         list_del(&bo->lru);
1293         if (move_unfenced) {
1294                 list_add_tail(&bo->lru, &bm->unfenced);
1295                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1296                                 _DRM_BO_FLAG_UNFENCED);
1297         } else {
1298                 drm_bo_add_to_lru(bo);
1299                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1300                         wake_up_all(&bo->event_queue);
1301                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1302                                         _DRM_BO_FLAG_UNFENCED);
1303                 }
1304         }
1305         mutex_unlock(&dev->struct_mutex);
1306
1307         return 0;
1308 }
1309
1310 /*
1311  * This function is called with bo->mutex locked, but may release it
1312  * temporarily to wait for events.
1313  */
1314
1315 static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
1316                                        uint64_t flags,
1317                                        uint64_t mask,
1318                                        uint32_t hint,
1319                                        uint32_t fence_class,
1320                                        int no_wait,
1321                                        int *move_buffer)
1322 {
1323         struct drm_device *dev = bo->dev;
1324         struct drm_bo_driver *driver = dev->driver->bo_driver;
1325         uint32_t ftype;
1326
1327         int ret;
1328
1329         DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
1330                   (unsigned long long) bo->mem.proposed_flags,
1331                   (unsigned long long) bo->mem.flags);
1332
1333         ret = drm_bo_modify_proposed_flags (bo, flags, mask);
1334         if (ret)
1335                 return ret;
1336
1337         ret = drm_bo_wait_unmapped(bo, no_wait);
1338         if (ret)
1339                 return ret;
1340
1341         ret = driver->fence_type(bo, &fence_class, &ftype);
1342
1343         if (ret) {
1344                 DRM_ERROR("Driver did not support given buffer permissions.\n");
1345                 return ret;
1346         }
1347
1348         /*
1349          * We're switching command submission mechanism,
1350          * or cannot simply rely on the hardware serializing for us.
1351          * Insert a driver-dependant barrier or wait for buffer idle.
1352          */
1353
1354         if ((fence_class != bo->fence_class) ||
1355             ((ftype ^ bo->fence_type) & bo->fence_type)) {
1356
1357                 ret = -EINVAL;
1358                 if (driver->command_stream_barrier) {
1359                         ret = driver->command_stream_barrier(bo,
1360                                                              fence_class,
1361                                                              ftype,
1362                                                              no_wait);
1363                 }
1364                 if (ret && ret != -EAGAIN) 
1365                         ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1366                 
1367                 if (ret)
1368                         return ret;
1369         }
1370
1371         bo->new_fence_class = fence_class;
1372         bo->new_fence_type = ftype;
1373
1374         /*
1375          * Check whether we need to move buffer.
1376          */
1377
1378         *move_buffer = 0;
1379         if (!drm_bo_mem_compat(&bo->mem)) {
1380                 *move_buffer = 1;
1381                 ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1382         }
1383
1384         return ret;
1385 }
1386
1387 /**
1388  * drm_bo_do_validate:
1389  *
1390  * @bo: the buffer object
1391  *
1392  * @flags: access rights, mapping parameters and cacheability. See
1393  * the DRM_BO_FLAG_* values in drm.h
1394  *
1395  * @mask: Which flag values to change; this allows callers to modify
1396  * things without knowing the current state of other flags.
1397  *
1398  * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1399  * values in drm.h.
1400  *
1401  * @fence_class: a driver-specific way of doing fences. Presumably,
1402  * this would be used if the driver had more than one submission and
1403  * fencing mechanism. At this point, there isn't any use of this
1404  * from the user mode code.
1405  *
1406  * @rep: To be stuffed with the reply from validation
1407  * 
1408  * 'validate' a buffer object. This changes where the buffer is
1409  * located, along with changing access modes.
1410  */
1411
1412 int drm_bo_do_validate(struct drm_buffer_object *bo,
1413                        uint64_t flags, uint64_t mask, uint32_t hint,
1414                        uint32_t fence_class)
1415 {
1416         int ret;
1417         int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
1418         int move_buffer;
1419
1420         mutex_lock(&bo->mutex);
1421
1422         do {
1423                 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
1424
1425                 ret = drm_bo_prepare_for_validate(bo, flags, mask, hint,
1426                                                   fence_class, no_wait,
1427                                                   &move_buffer);
1428                 if (ret)
1429                         goto out;
1430
1431         } while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
1432
1433         ret = drm_buffer_object_validate(bo,
1434                                          fence_class,
1435                                          !(hint & DRM_BO_HINT_DONT_FENCE),
1436                                          no_wait,
1437                                          move_buffer);
1438
1439         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
1440 out:
1441         mutex_unlock(&bo->mutex);
1442
1443         return ret;
1444 }
1445 EXPORT_SYMBOL(drm_bo_do_validate);
1446
1447 int drm_buffer_object_create(struct drm_device *dev,
1448                              unsigned long size,
1449                              enum drm_bo_type type,
1450                              uint64_t flags,
1451                              uint32_t hint,
1452                              uint32_t page_alignment,
1453                              unsigned long buffer_start,
1454                              struct drm_buffer_object **buf_obj)
1455 {
1456         struct drm_buffer_manager *bm = &dev->bm;
1457         struct drm_buffer_object *bo;
1458         int ret = 0;
1459         unsigned long num_pages;
1460
1461         size += buffer_start & ~PAGE_MASK;
1462         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1463         if (num_pages == 0) {
1464                 DRM_ERROR("Illegal buffer object size %ld.\n", size);
1465                 return -EINVAL;
1466         }
1467
1468         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1469
1470         if (!bo)
1471                 return -ENOMEM;
1472
1473         mutex_init(&bo->mutex);
1474         mutex_lock(&bo->mutex);
1475
1476         atomic_set(&bo->usage, 1);
1477         atomic_set(&bo->mapped, 0);
1478         DRM_INIT_WAITQUEUE(&bo->event_queue);
1479         INIT_LIST_HEAD(&bo->lru);
1480         INIT_LIST_HEAD(&bo->pinned_lru);
1481         INIT_LIST_HEAD(&bo->ddestroy);
1482 #ifdef DRM_ODD_MM_COMPAT
1483         INIT_LIST_HEAD(&bo->p_mm_list);
1484         INIT_LIST_HEAD(&bo->vma_list);
1485 #endif
1486         bo->dev = dev;
1487         bo->type = type;
1488         bo->num_pages = num_pages;
1489         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1490         bo->mem.num_pages = bo->num_pages;
1491         bo->mem.mm_node = NULL;
1492         bo->mem.page_alignment = page_alignment;
1493         bo->buffer_start = buffer_start & PAGE_MASK;
1494         bo->priv_flags = 0;
1495         bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1496                          DRM_BO_FLAG_MAPPABLE);
1497         bo->mem.proposed_flags = 0;
1498         atomic_inc(&bm->count);
1499         /*
1500          * Use drm_bo_modify_proposed_flags to error-check the proposed flags
1501          */
1502         ret = drm_bo_modify_proposed_flags (bo, flags, flags);
1503         if (ret)
1504                 goto out_err;
1505
1506         /*
1507          * For drm_bo_type_device buffers, allocate
1508          * address space from the device so that applications
1509          * can mmap the buffer from there
1510          */
1511         if (bo->type == drm_bo_type_device) {
1512                 mutex_lock(&dev->struct_mutex);
1513                 ret = drm_bo_setup_vm_locked(bo);
1514                 mutex_unlock(&dev->struct_mutex);
1515                 if (ret)
1516                         goto out_err;
1517         }
1518
1519         mutex_unlock(&bo->mutex);
1520         ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
1521                                  0);
1522         if (ret)
1523                 goto out_err_unlocked;
1524
1525         *buf_obj = bo;
1526         return 0;
1527
1528 out_err:
1529         mutex_unlock(&bo->mutex);
1530 out_err_unlocked:
1531         drm_bo_usage_deref_unlocked(&bo);
1532         return ret;
1533 }
1534 EXPORT_SYMBOL(drm_buffer_object_create);
1535
1536 static int drm_bo_leave_list(struct drm_buffer_object *bo,
1537                              uint32_t mem_type,
1538                              int free_pinned,
1539                              int allow_errors)
1540 {
1541         struct drm_device *dev = bo->dev;
1542         int ret = 0;
1543
1544         mutex_lock(&bo->mutex);
1545
1546         ret = drm_bo_expire_fence(bo, allow_errors);
1547         if (ret)
1548                 goto out;
1549
1550         if (free_pinned) {
1551                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
1552                 mutex_lock(&dev->struct_mutex);
1553                 list_del_init(&bo->pinned_lru);
1554                 if (bo->pinned_node == bo->mem.mm_node)
1555                         bo->pinned_node = NULL;
1556                 if (bo->pinned_node != NULL) {
1557                         drm_memrange_put_block(bo->pinned_node);
1558                         bo->pinned_node = NULL;
1559                 }
1560                 mutex_unlock(&dev->struct_mutex);
1561         }
1562
1563         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
1564                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
1565                           "cleanup. Removing flag and evicting.\n");
1566                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
1567                 bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
1568         }
1569
1570         if (bo->mem.mem_type == mem_type)
1571                 ret = drm_bo_evict(bo, mem_type, 0);
1572
1573         if (ret) {
1574                 if (allow_errors) {
1575                         goto out;
1576                 } else {
1577                         ret = 0;
1578                         DRM_ERROR("Cleanup eviction failed\n");
1579                 }
1580         }
1581
1582 out:
1583         mutex_unlock(&bo->mutex);
1584         return ret;
1585 }
1586
1587
1588 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
1589                                          int pinned_list)
1590 {
1591         if (pinned_list)
1592                 return list_entry(list, struct drm_buffer_object, pinned_lru);
1593         else
1594                 return list_entry(list, struct drm_buffer_object, lru);
1595 }
1596
1597 /*
1598  * dev->struct_mutex locked.
1599  */
1600
1601 static int drm_bo_force_list_clean(struct drm_device *dev,
1602                                    struct list_head *head,
1603                                    unsigned mem_type,
1604                                    int free_pinned,
1605                                    int allow_errors,
1606                                    int pinned_list)
1607 {
1608         struct list_head *list, *next, *prev;
1609         struct drm_buffer_object *entry, *nentry;
1610         int ret;
1611         int do_restart;
1612
1613         /*
1614          * The list traversal is a bit odd here, because an item may
1615          * disappear from the list when we release the struct_mutex or
1616          * when we decrease the usage count. Also we're not guaranteed
1617          * to drain pinned lists, so we can't always restart.
1618          */
1619
1620 restart:
1621         nentry = NULL;
1622         list_for_each_safe(list, next, head) {
1623                 prev = list->prev;
1624
1625                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
1626                 atomic_inc(&entry->usage);
1627                 if (nentry) {
1628                         atomic_dec(&nentry->usage);
1629                         nentry = NULL;
1630                 }
1631
1632                 /*
1633                  * Protect the next item from destruction, so we can check
1634                  * its list pointers later on.
1635                  */
1636
1637                 if (next != head) {
1638                         nentry = drm_bo_entry(next, pinned_list);
1639                         atomic_inc(&nentry->usage);
1640                 }
1641                 mutex_unlock(&dev->struct_mutex);
1642
1643                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
1644                                         allow_errors);
1645                 mutex_lock(&dev->struct_mutex);
1646
1647                 drm_bo_usage_deref_locked(&entry);
1648                 if (ret)
1649                         return ret;
1650
1651                 /*
1652                  * Has the next item disappeared from the list?
1653                  */
1654
1655                 do_restart = ((next->prev != list) && (next->prev != prev));
1656
1657                 if (nentry != NULL && do_restart)
1658                         drm_bo_usage_deref_locked(&nentry);
1659
1660                 if (do_restart)
1661                         goto restart;
1662         }
1663         return 0;
1664 }
1665
1666 int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
1667 {
1668         struct drm_buffer_manager *bm = &dev->bm;
1669         struct drm_mem_type_manager *man = &bm->man[mem_type];
1670         int ret = -EINVAL;
1671
1672         if (mem_type >= DRM_BO_MEM_TYPES) {
1673                 DRM_ERROR("Illegal memory type %d\n", mem_type);
1674                 return ret;
1675         }
1676
1677         if (!man->has_type) {
1678                 DRM_ERROR("Trying to take down uninitialized "
1679                           "memory manager type %u\n", mem_type);
1680                 return ret;
1681         }
1682
1683         if ((man->kern_init_type) && (kern_clean == 0)) {
1684                 DRM_ERROR("Trying to take down kernel initialized "
1685                           "memory manager type %u\n", mem_type);
1686                 return -EPERM;
1687         }
1688
1689         man->use_type = 0;
1690         man->has_type = 0;
1691
1692         ret = 0;
1693         if (mem_type > 0) {
1694                 BUG_ON(!list_empty(&bm->unfenced));
1695                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
1696                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
1697
1698                 if (drm_memrange_clean(&man->manager)) {
1699                         drm_memrange_takedown(&man->manager);
1700                 } else {
1701                         ret = -EBUSY;
1702                 }
1703         }
1704
1705         return ret;
1706 }
1707 EXPORT_SYMBOL(drm_bo_clean_mm);
1708
1709 /**
1710  *Evict all buffers of a particular mem_type, but leave memory manager
1711  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
1712  *point since we have the hardware lock.
1713  */
1714
1715 int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
1716 {
1717         int ret;
1718         struct drm_buffer_manager *bm = &dev->bm;
1719         struct drm_mem_type_manager *man = &bm->man[mem_type];
1720
1721         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
1722                 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
1723                 return -EINVAL;
1724         }
1725
1726         if (!man->has_type) {
1727                 DRM_ERROR("Memory type %u has not been initialized.\n",
1728                           mem_type);
1729                 return 0;
1730         }
1731
1732         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
1733         if (ret)
1734                 return ret;
1735         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
1736
1737         return ret;
1738 }
1739
1740 int drm_bo_init_mm(struct drm_device *dev, unsigned type,
1741                    unsigned long p_offset, unsigned long p_size,
1742                    int kern_init)
1743 {
1744         struct drm_buffer_manager *bm = &dev->bm;
1745         int ret = -EINVAL;
1746         struct drm_mem_type_manager *man;
1747
1748         if (type >= DRM_BO_MEM_TYPES) {
1749                 DRM_ERROR("Illegal memory type %d\n", type);
1750                 return ret;
1751         }
1752
1753         man = &bm->man[type];
1754         if (man->has_type) {
1755                 DRM_ERROR("Memory manager already initialized for type %d\n",
1756                           type);
1757                 return ret;
1758         }
1759
1760         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
1761         if (ret)
1762                 return ret;
1763
1764         ret = 0;
1765         if (type != DRM_BO_MEM_LOCAL) {
1766                 if (!p_size) {
1767                         DRM_ERROR("Zero size memory manager type %d\n", type);
1768                         return ret;
1769                 }
1770                 ret = drm_memrange_init(&man->manager, p_offset, p_size);
1771                 if (ret)
1772                         return ret;
1773         }
1774         man->has_type = 1;
1775         man->use_type = 1;
1776         man->kern_init_type = kern_init;
1777         man->size = p_size;
1778
1779         INIT_LIST_HEAD(&man->lru);
1780         INIT_LIST_HEAD(&man->pinned);
1781
1782         return 0;
1783 }
1784 EXPORT_SYMBOL(drm_bo_init_mm);
1785
1786 /*
1787  * This function is intended to be called on drm driver unload.
1788  * If you decide to call it from lastclose, you must protect the call
1789  * from a potentially racing drm_bo_driver_init in firstopen.
1790  * (This may happen on X server restart).
1791  */
1792
1793 int drm_bo_driver_finish(struct drm_device *dev)
1794 {
1795         struct drm_buffer_manager *bm = &dev->bm;
1796         int ret = 0;
1797         unsigned i = DRM_BO_MEM_TYPES;
1798         struct drm_mem_type_manager *man;
1799
1800         mutex_lock(&dev->struct_mutex);
1801
1802         if (!bm->initialized)
1803                 goto out;
1804         bm->initialized = 0;
1805
1806         while (i--) {
1807                 man = &bm->man[i];
1808                 if (man->has_type) {
1809                         man->use_type = 0;
1810                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) {
1811                                 ret = -EBUSY;
1812                                 DRM_ERROR("DRM memory manager type %d "
1813                                           "is not clean.\n", i);
1814                         }
1815                         man->has_type = 0;
1816                 }
1817         }
1818         mutex_unlock(&dev->struct_mutex);
1819
1820         if (!cancel_delayed_work(&bm->wq))
1821                 flush_scheduled_work();
1822
1823         mutex_lock(&dev->struct_mutex);
1824         drm_bo_delayed_delete(dev, 1);
1825         if (list_empty(&bm->ddestroy))
1826                 DRM_DEBUG("Delayed destroy list was clean\n");
1827
1828         if (list_empty(&bm->man[0].lru))
1829                 DRM_DEBUG("Swap list was clean\n");
1830
1831         if (list_empty(&bm->man[0].pinned))
1832                 DRM_DEBUG("NO_MOVE list was clean\n");
1833
1834         if (list_empty(&bm->unfenced))
1835                 DRM_DEBUG("Unfenced list was clean\n");
1836
1837         if (bm->dummy_read_page) {
1838 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
1839                 ClearPageReserved(bm->dummy_read_page);
1840 #endif
1841                 __free_page(bm->dummy_read_page);
1842         }
1843
1844 out:
1845         mutex_unlock(&dev->struct_mutex);
1846         return ret;
1847 }
1848 EXPORT_SYMBOL(drm_bo_driver_finish);
1849
1850 /*
1851  * This function is intended to be called on drm driver load.
1852  * If you decide to call it from firstopen, you must protect the call
1853  * from a potentially racing drm_bo_driver_finish in lastclose.
1854  * (This may happen on X server restart).
1855  */
1856
1857 int drm_bo_driver_init(struct drm_device *dev)
1858 {
1859         struct drm_bo_driver *driver = dev->driver->bo_driver;
1860         struct drm_buffer_manager *bm = &dev->bm;
1861         int ret = -EINVAL;
1862
1863         bm->dummy_read_page = NULL;
1864         mutex_lock(&dev->struct_mutex);
1865         if (!driver)
1866                 goto out_unlock;
1867
1868         bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1869         if (!bm->dummy_read_page) {
1870                 ret = -ENOMEM;
1871                 goto out_unlock;
1872         }
1873
1874 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
1875         SetPageReserved(bm->dummy_read_page);
1876 #endif
1877
1878         /*
1879          * Initialize the system memory buffer type.
1880          * Other types need to be driver / IOCTL initialized.
1881          */
1882         ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1);
1883         if (ret) {
1884 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
1885                 ClearPageReserved(bm->dummy_read_page);
1886 #endif
1887                 __free_page(bm->dummy_read_page);
1888                 bm->dummy_read_page = NULL;
1889                 goto out_unlock;
1890         }
1891
1892 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1893         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
1894 #else
1895         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
1896 #endif
1897         bm->initialized = 1;
1898         bm->nice_mode = 1;
1899         atomic_set(&bm->count, 0);
1900         bm->cur_pages = 0;
1901         INIT_LIST_HEAD(&bm->unfenced);
1902         INIT_LIST_HEAD(&bm->ddestroy);
1903 out_unlock:
1904         mutex_unlock(&dev->struct_mutex);
1905         return ret;
1906 }
1907 EXPORT_SYMBOL(drm_bo_driver_init);
1908
1909 /*
1910  * buffer object vm functions.
1911  */
1912
1913 int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
1914 {
1915         struct drm_buffer_manager *bm = &dev->bm;
1916         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
1917
1918         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
1919                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
1920                         return 0;
1921
1922                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
1923                         return 0;
1924
1925                 if (mem->flags & DRM_BO_FLAG_CACHED)
1926                         return 0;
1927         }
1928         return 1;
1929 }
1930 EXPORT_SYMBOL(drm_mem_reg_is_pci);
1931
1932 /**
1933  * \c Get the PCI offset for the buffer object memory.
1934  *
1935  * \param bo The buffer object.
1936  * \param bus_base On return the base of the PCI region
1937  * \param bus_offset On return the byte offset into the PCI region
1938  * \param bus_size On return the byte size of the buffer object or zero if
1939  *     the buffer object memory is not accessible through a PCI region.
1940  * \return Failure indication.
1941  *
1942  * Returns -EINVAL if the buffer object is currently not mappable.
1943  * Otherwise returns zero.
1944  */
1945
1946 int drm_bo_pci_offset(struct drm_device *dev,
1947                       struct drm_bo_mem_reg *mem,
1948                       unsigned long *bus_base,
1949                       unsigned long *bus_offset, unsigned long *bus_size)
1950 {
1951         struct drm_buffer_manager *bm = &dev->bm;
1952         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
1953
1954         *bus_size = 0;
1955         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
1956                 return -EINVAL;
1957
1958         if (drm_mem_reg_is_pci(dev, mem)) {
1959                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1960                 *bus_size = mem->num_pages << PAGE_SHIFT;
1961                 *bus_base = man->io_offset;
1962         }
1963
1964         return 0;
1965 }
1966
1967 /**
1968  * \c Kill all user-space virtual mappings of this buffer object.
1969  *
1970  * \param bo The buffer object.
1971  *
1972  * Call bo->mutex locked.
1973  */
1974
1975 void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
1976 {
1977         struct drm_device *dev = bo->dev;
1978         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
1979         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1980
1981         if (!dev->dev_mapping)
1982                 return;
1983
1984         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
1985 }
1986
1987 /**
1988  * drm_bo_takedown_vm_locked:
1989  *
1990  * @bo: the buffer object to remove any drm device mapping
1991  *
1992  * Remove any associated vm mapping on the drm device node that
1993  * would have been created for a drm_bo_type_device buffer
1994  */
1995 void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
1996 {
1997         struct drm_map_list *list;
1998         drm_local_map_t *map;
1999         struct drm_device *dev = bo->dev;
2000
2001         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2002         if (bo->type != drm_bo_type_device)
2003                 return;
2004
2005         list = &bo->map_list;
2006         if (list->user_token) {
2007                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2008                 list->user_token = 0;
2009         }
2010         if (list->file_offset_node) {
2011                 drm_memrange_put_block(list->file_offset_node);
2012                 list->file_offset_node = NULL;
2013         }
2014
2015         map = list->map;
2016         if (!map)
2017                 return;
2018
2019         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2020         list->map = NULL;
2021         list->user_token = 0ULL;
2022         drm_bo_usage_deref_locked(&bo);
2023 }
2024 EXPORT_SYMBOL(drm_bo_takedown_vm_locked);
2025
2026 /**
2027  * drm_bo_setup_vm_locked:
2028  *
2029  * @bo: the buffer to allocate address space for
2030  *
2031  * Allocate address space in the drm device so that applications
2032  * can mmap the buffer and access the contents. This only
2033  * applies to drm_bo_type_device objects as others are not
2034  * placed in the drm device address space.
2035  */
2036 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
2037 {
2038         struct drm_map_list *list = &bo->map_list;
2039         drm_local_map_t *map;
2040         struct drm_device *dev = bo->dev;
2041
2042         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2043         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2044         if (!list->map)
2045                 return -ENOMEM;
2046
2047         map = list->map;
2048         map->offset = 0;
2049         map->type = _DRM_TTM;
2050         map->flags = _DRM_REMOVABLE;
2051         map->size = bo->mem.num_pages * PAGE_SIZE;
2052         atomic_inc(&bo->usage);
2053         map->handle = (void *)bo;
2054
2055         list->file_offset_node = drm_memrange_search_free(&dev->offset_manager,
2056                                                     bo->mem.num_pages, 0, 0);
2057
2058         if (unlikely(!list->file_offset_node)) {
2059                 drm_bo_takedown_vm_locked(bo);
2060                 return -ENOMEM;
2061         }
2062
2063         list->file_offset_node = drm_memrange_get_block(list->file_offset_node,
2064                                                   bo->mem.num_pages, 0);
2065
2066         if (unlikely(!list->file_offset_node)) {
2067                 drm_bo_takedown_vm_locked(bo);
2068                 return -ENOMEM;
2069         }
2070                 
2071         list->hash.key = list->file_offset_node->start;
2072         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2073                 drm_bo_takedown_vm_locked(bo);
2074                 return -ENOMEM;
2075         }
2076
2077         list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2078
2079         return 0;
2080 }